[Date Prev][Date Next] [Thread Prev][Thread Next] [Date Index] [Thread Index]

[Nbd] [PATCH 1/1] kthread: introduce kthread_get_run() to fix __nbd_ioctl()



It is not safe to use the task_struct returned by kthread_run(threadfn)
if threadfn() can exit before the "owner" does kthread_stop(), nothing
protects this task_struct.

So __nbd_ioctl() looks buggy; a killed nbd_thread_send() can exit, free
its task_struct, and then kthread_stop() can use the freed/reused memory.

Add the new trivial helper, kthread_get_run(). Hopefully it will have more
users, this patch changes __nbd_ioctl() as an example.

Signed-off-by: Oleg Nesterov <oleg@...696...>
---
 drivers/block/nbd.c     |    5 +++--
 include/linux/kthread.h |   12 ++++++++++++
 2 files changed, 15 insertions(+), 2 deletions(-)

diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 93b3f99..b85e7a0 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -754,8 +754,8 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
 		else
 			blk_queue_flush(nbd->disk->queue, 0);
 
-		thread = kthread_run(nbd_thread_send, nbd, "%s",
-				     nbd_name(nbd));
+		thread = kthread_get_run(nbd_thread_send, nbd, "%s",
+					 nbd_name(nbd));
 		if (IS_ERR(thread)) {
 			mutex_lock(&nbd->tx_lock);
 			return PTR_ERR(thread);
@@ -765,6 +765,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
 		error = nbd_thread_recv(nbd);
 		nbd_dev_dbg_close(nbd);
 		kthread_stop(thread);
+		put_task_struct(thread);
 
 		mutex_lock(&nbd->tx_lock);
 
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 13d5520..b0465cc 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -37,6 +37,18 @@ struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
 	__k;								   \
 })
 
+/* Same as kthread_run() but also pin the task_struct */
+#define kthread_get_run(threadfn, data, namefmt, ...)			   \
+({									   \
+	struct task_struct *__k						   \
+		= kthread_create(threadfn, data, namefmt, ## __VA_ARGS__); \
+	if (!IS_ERR(__k)) {						   \
+		get_task_struct(__k);					   \
+		wake_up_process(__k);					   \
+	}								   \
+	__k;								   \
+})
+
 void kthread_bind(struct task_struct *k, unsigned int cpu);
 void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
 int kthread_stop(struct task_struct *k);
-- 
1.5.5.1





Reply to: