1// SPDX-License-Identifier: GPL-2.0-only
2/* Kernel thread helper functions.
3 * Copyright (C) 2004 IBM Corporation, Rusty Russell.
4 * Copyright (C) 2009 Red Hat, Inc.
5 *
6 * Creation is done via kthreadd, so that we get a clean environment
7 * even if we're invoked from userspace (think modprobe, hotplug cpu,
8 * etc.).
9 */
10#include <uapi/linux/sched/types.h>
11#include <linux/mm.h>
12#include <linux/mmu_context.h>
13#include <linux/sched.h>
14#include <linux/sched/mm.h>
15#include <linux/sched/task.h>
16#include <linux/kthread.h>
17#include <linux/completion.h>
18#include <linux/err.h>
19#include <linux/cgroup.h>
20#include <linux/cpuset.h>
21#include <linux/unistd.h>
22#include <linux/file.h>
23#include <linux/export.h>
24#include <linux/mutex.h>
25#include <linux/slab.h>
26#include <linux/freezer.h>
27#include <linux/ptrace.h>
28#include <linux/uaccess.h>
29#include <linux/numa.h>
30#include <linux/sched/isolation.h>
31#include <trace/events/sched.h>
32
33
34static DEFINE_SPINLOCK(kthread_create_lock);
35static LIST_HEAD(kthread_create_list);
36struct task_struct *kthreadd_task;
37
38static LIST_HEAD(kthreads_hotplug);
39static DEFINE_MUTEX(kthreads_hotplug_lock);
40
41struct kthread_create_info
42{
43 /* Information passed to kthread() from kthreadd. */
44 char *full_name;
45 int (*threadfn)(void *data);
46 void *data;
47 int node;
48
49 /* Result passed back to kthread_create() from kthreadd. */
50 struct task_struct *result;
51 struct completion *done;
52
53 struct list_head list;
54};
55
56struct kthread {
57 unsigned long flags;
58 unsigned int cpu;
59 unsigned int node;
60 int started;
61 int result;
62 int (*threadfn)(void *);
63 void *data;
64 struct completion parked;
65 struct completion exited;
66#ifdef CONFIG_BLK_CGROUP
67 struct cgroup_subsys_state *blkcg_css;
68#endif
69 /* To store the full name if task comm is truncated. */
70 char *full_name;
71 struct task_struct *task;
72 struct list_head hotplug_node;
73 struct cpumask *preferred_affinity;
74};
75
76enum KTHREAD_BITS {
77 KTHREAD_IS_PER_CPU = 0,
78 KTHREAD_SHOULD_STOP,
79 KTHREAD_SHOULD_PARK,
80};
81
82static inline struct kthread *to_kthread(struct task_struct *k)
83{
84 WARN_ON(!(k->flags & PF_KTHREAD));
85 return k->worker_private;
86}
87
88/*
89 * Variant of to_kthread() that doesn't assume @p is a kthread.
90 *
91 * When "(p->flags & PF_KTHREAD)" is set the task is a kthread and will
92 * always remain a kthread. For kthreads p->worker_private always
93 * points to a struct kthread. For tasks that are not kthreads
94 * p->worker_private is used to point to other things.
95 *
96 * Return NULL for any task that is not a kthread.
97 */
98static inline struct kthread *__to_kthread(struct task_struct *p)
99{
100 void *kthread = p->worker_private;
101 if (kthread && !(p->flags & PF_KTHREAD))
102 kthread = NULL;
103 return kthread;
104}
105
106void get_kthread_comm(char *buf, size_t buf_size, struct task_struct *tsk)
107{
108 struct kthread *kthread = to_kthread(k: tsk);
109
110 if (!kthread || !kthread->full_name) {
111 strscpy(buf, tsk->comm, buf_size);
112 return;
113 }
114
115 strscpy_pad(buf, kthread->full_name, buf_size);
116}
117
118bool set_kthread_struct(struct task_struct *p)
119{
120 struct kthread *kthread;
121
122 if (WARN_ON_ONCE(to_kthread(p)))
123 return false;
124
125 kthread = kzalloc(sizeof(*kthread), GFP_KERNEL);
126 if (!kthread)
127 return false;
128
129 init_completion(x: &kthread->exited);
130 init_completion(x: &kthread->parked);
131 INIT_LIST_HEAD(list: &kthread->hotplug_node);
132 p->vfork_done = &kthread->exited;
133
134 kthread->task = p;
135 kthread->node = tsk_fork_get_node(current);
136 p->worker_private = kthread;
137 return true;
138}
139
140void free_kthread_struct(struct task_struct *k)
141{
142 struct kthread *kthread;
143
144 /*
145 * Can be NULL if kmalloc() in set_kthread_struct() failed.
146 */
147 kthread = to_kthread(k);
148 if (!kthread)
149 return;
150
151#ifdef CONFIG_BLK_CGROUP
152 WARN_ON_ONCE(kthread->blkcg_css);
153#endif
154 k->worker_private = NULL;
155 kfree(objp: kthread->full_name);
156 kfree(objp: kthread);
157}
158
159/**
160 * kthread_should_stop - should this kthread return now?
161 *
162 * When someone calls kthread_stop() on your kthread, it will be woken
163 * and this will return true. You should then return, and your return
164 * value will be passed through to kthread_stop().
165 */
166bool kthread_should_stop(void)
167{
168 return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
169}
170EXPORT_SYMBOL(kthread_should_stop);
171
172static bool __kthread_should_park(struct task_struct *k)
173{
174 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
175}
176
177/**
178 * kthread_should_park - should this kthread park now?
179 *
180 * When someone calls kthread_park() on your kthread, it will be woken
181 * and this will return true. You should then do the necessary
182 * cleanup and call kthread_parkme()
183 *
184 * Similar to kthread_should_stop(), but this keeps the thread alive
185 * and in a park position. kthread_unpark() "restarts" the thread and
186 * calls the thread function again.
187 */
188bool kthread_should_park(void)
189{
190 return __kthread_should_park(current);
191}
192EXPORT_SYMBOL_GPL(kthread_should_park);
193
194bool kthread_should_stop_or_park(void)
195{
196 struct kthread *kthread = __to_kthread(current);
197
198 if (!kthread)
199 return false;
200
201 return kthread->flags & (BIT(KTHREAD_SHOULD_STOP) | BIT(KTHREAD_SHOULD_PARK));
202}
203
204/**
205 * kthread_freezable_should_stop - should this freezable kthread return now?
206 * @was_frozen: optional out parameter, indicates whether %current was frozen
207 *
208 * kthread_should_stop() for freezable kthreads, which will enter
209 * refrigerator if necessary. This function is safe from kthread_stop() /
210 * freezer deadlock and freezable kthreads should use this function instead
211 * of calling try_to_freeze() directly.
212 */
213bool kthread_freezable_should_stop(bool *was_frozen)
214{
215 bool frozen = false;
216
217 might_sleep();
218
219 if (unlikely(freezing(current)))
220 frozen = __refrigerator(check_kthr_stop: true);
221
222 if (was_frozen)
223 *was_frozen = frozen;
224
225 return kthread_should_stop();
226}
227EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
228
229/**
230 * kthread_func - return the function specified on kthread creation
231 * @task: kthread task in question
232 *
233 * Returns NULL if the task is not a kthread.
234 */
235void *kthread_func(struct task_struct *task)
236{
237 struct kthread *kthread = __to_kthread(p: task);
238 if (kthread)
239 return kthread->threadfn;
240 return NULL;
241}
242EXPORT_SYMBOL_GPL(kthread_func);
243
244/**
245 * kthread_data - return data value specified on kthread creation
246 * @task: kthread task in question
247 *
248 * Return the data value specified when kthread @task was created.
249 * The caller is responsible for ensuring the validity of @task when
250 * calling this function.
251 */
252void *kthread_data(struct task_struct *task)
253{
254 return to_kthread(k: task)->data;
255}
256EXPORT_SYMBOL_GPL(kthread_data);
257
258/**
259 * kthread_probe_data - speculative version of kthread_data()
260 * @task: possible kthread task in question
261 *
262 * @task could be a kthread task. Return the data value specified when it
263 * was created if accessible. If @task isn't a kthread task or its data is
264 * inaccessible for any reason, %NULL is returned. This function requires
265 * that @task itself is safe to dereference.
266 */
267void *kthread_probe_data(struct task_struct *task)
268{
269 struct kthread *kthread = __to_kthread(p: task);
270 void *data = NULL;
271
272 if (kthread)
273 copy_from_kernel_nofault(dst: &data, src: &kthread->data, size: sizeof(data));
274 return data;
275}
276
277static void __kthread_parkme(struct kthread *self)
278{
279 for (;;) {
280 /*
281 * TASK_PARKED is a special state; we must serialize against
282 * possible pending wakeups to avoid store-store collisions on
283 * task->state.
284 *
285 * Such a collision might possibly result in the task state
286 * changin from TASK_PARKED and us failing the
287 * wait_task_inactive() in kthread_park().
288 */
289 set_special_state(TASK_PARKED);
290 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
291 break;
292
293 /*
294 * Thread is going to call schedule(), do not preempt it,
295 * or the caller of kthread_park() may spend more time in
296 * wait_task_inactive().
297 */
298 preempt_disable();
299 complete(&self->parked);
300 schedule_preempt_disabled();
301 preempt_enable();
302 }
303 __set_current_state(TASK_RUNNING);
304}
305
306void kthread_parkme(void)
307{
308 __kthread_parkme(self: to_kthread(current));
309}
310EXPORT_SYMBOL_GPL(kthread_parkme);
311
312/**
313 * kthread_exit - Cause the current kthread return @result to kthread_stop().
314 * @result: The integer value to return to kthread_stop().
315 *
316 * While kthread_exit can be called directly, it exists so that
317 * functions which do some additional work in non-modular code such as
318 * module_put_and_kthread_exit can be implemented.
319 *
320 * Does not return.
321 */
322void __noreturn kthread_exit(long result)
323{
324 struct kthread *kthread = to_kthread(current);
325 kthread->result = result;
326 if (!list_empty(head: &kthread->hotplug_node)) {
327 mutex_lock(&kthreads_hotplug_lock);
328 list_del(entry: &kthread->hotplug_node);
329 mutex_unlock(lock: &kthreads_hotplug_lock);
330
331 if (kthread->preferred_affinity) {
332 kfree(objp: kthread->preferred_affinity);
333 kthread->preferred_affinity = NULL;
334 }
335 }
336 do_exit(error_code: 0);
337}
338EXPORT_SYMBOL(kthread_exit);
339
340/**
341 * kthread_complete_and_exit - Exit the current kthread.
342 * @comp: Completion to complete
343 * @code: The integer value to return to kthread_stop().
344 *
345 * If present, complete @comp and then return code to kthread_stop().
346 *
347 * A kernel thread whose module may be removed after the completion of
348 * @comp can use this function to exit safely.
349 *
350 * Does not return.
351 */
352void __noreturn kthread_complete_and_exit(struct completion *comp, long code)
353{
354 if (comp)
355 complete(comp);
356
357 kthread_exit(code);
358}
359EXPORT_SYMBOL(kthread_complete_and_exit);
360
361static void kthread_fetch_affinity(struct kthread *kthread, struct cpumask *cpumask)
362{
363 const struct cpumask *pref;
364
365 if (kthread->preferred_affinity) {
366 pref = kthread->preferred_affinity;
367 } else {
368 if (WARN_ON_ONCE(kthread->node == NUMA_NO_NODE))
369 return;
370 pref = cpumask_of_node(node: kthread->node);
371 }
372
373 cpumask_and(dstp: cpumask, src1p: pref, src2p: housekeeping_cpumask(type: HK_TYPE_KTHREAD));
374 if (cpumask_empty(srcp: cpumask))
375 cpumask_copy(dstp: cpumask, srcp: housekeeping_cpumask(type: HK_TYPE_KTHREAD));
376}
377
378static void kthread_affine_node(void)
379{
380 struct kthread *kthread = to_kthread(current);
381 cpumask_var_t affinity;
382
383 WARN_ON_ONCE(kthread_is_per_cpu(current));
384
385 if (kthread->node == NUMA_NO_NODE) {
386 housekeeping_affine(current, type: HK_TYPE_KTHREAD);
387 } else {
388 if (!zalloc_cpumask_var(mask: &affinity, GFP_KERNEL)) {
389 WARN_ON_ONCE(1);
390 return;
391 }
392
393 mutex_lock(&kthreads_hotplug_lock);
394 WARN_ON_ONCE(!list_empty(&kthread->hotplug_node));
395 list_add_tail(new: &kthread->hotplug_node, head: &kthreads_hotplug);
396 /*
397 * The node cpumask is racy when read from kthread() but:
398 * - a racing CPU going down will either fail on the subsequent
399 * call to set_cpus_allowed_ptr() or be migrated to housekeepers
400 * afterwards by the scheduler.
401 * - a racing CPU going up will be handled by kthreads_online_cpu()
402 */
403 kthread_fetch_affinity(kthread, cpumask: affinity);
404 set_cpus_allowed_ptr(current, new_mask: affinity);
405 mutex_unlock(lock: &kthreads_hotplug_lock);
406
407 free_cpumask_var(mask: affinity);
408 }
409}
410
411static int kthread(void *_create)
412{
413 static const struct sched_param param = { .sched_priority = 0 };
414 /* Copy data: it's on kthread's stack */
415 struct kthread_create_info *create = _create;
416 int (*threadfn)(void *data) = create->threadfn;
417 void *data = create->data;
418 struct completion *done;
419 struct kthread *self;
420 int ret;
421
422 self = to_kthread(current);
423
424 /* Release the structure when caller killed by a fatal signal. */
425 done = xchg(&create->done, NULL);
426 if (!done) {
427 kfree(objp: create->full_name);
428 kfree(objp: create);
429 kthread_exit(-EINTR);
430 }
431
432 self->full_name = create->full_name;
433 self->threadfn = threadfn;
434 self->data = data;
435
436 /*
437 * The new thread inherited kthreadd's priority and CPU mask. Reset
438 * back to default in case they have been changed.
439 */
440 sched_setscheduler_nocheck(current, SCHED_NORMAL, &param);
441
442 /* OK, tell user we're spawned, wait for stop or wakeup */
443 __set_current_state(TASK_UNINTERRUPTIBLE);
444 create->result = current;
445 /*
446 * Thread is going to call schedule(), do not preempt it,
447 * or the creator may spend more time in wait_task_inactive().
448 */
449 preempt_disable();
450 complete(done);
451 schedule_preempt_disabled();
452 preempt_enable();
453
454 self->started = 1;
455
456 if (!(current->flags & PF_NO_SETAFFINITY) && !self->preferred_affinity)
457 kthread_affine_node();
458
459 ret = -EINTR;
460 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
461 cgroup_kthread_ready();
462 __kthread_parkme(self);
463 ret = threadfn(data);
464 }
465 kthread_exit(ret);
466}
467
468/* called from kernel_clone() to get node information for about to be created task */
469int tsk_fork_get_node(struct task_struct *tsk)
470{
471#ifdef CONFIG_NUMA
472 if (tsk == kthreadd_task)
473 return tsk->pref_node_fork;
474#endif
475 return NUMA_NO_NODE;
476}
477
478static void create_kthread(struct kthread_create_info *create)
479{
480 int pid;
481
482#ifdef CONFIG_NUMA
483 current->pref_node_fork = create->node;
484#endif
485 /* We want our own signal handler (we take no signals by default). */
486 pid = kernel_thread(fn: kthread, arg: create, name: create->full_name,
487 CLONE_FS | CLONE_FILES | SIGCHLD);
488 if (pid < 0) {
489 /* Release the structure when caller killed by a fatal signal. */
490 struct completion *done = xchg(&create->done, NULL);
491
492 kfree(objp: create->full_name);
493 if (!done) {
494 kfree(objp: create);
495 return;
496 }
497 create->result = ERR_PTR(error: pid);
498 complete(done);
499 }
500}
501
502static __printf(4, 0)
503struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
504 void *data, int node,
505 const char namefmt[],
506 va_list args)
507{
508 DECLARE_COMPLETION_ONSTACK(done);
509 struct task_struct *task;
510 struct kthread_create_info *create = kmalloc(sizeof(*create),
511 GFP_KERNEL);
512
513 if (!create)
514 return ERR_PTR(error: -ENOMEM);
515 create->threadfn = threadfn;
516 create->data = data;
517 create->node = node;
518 create->done = &done;
519 create->full_name = kvasprintf(GFP_KERNEL, fmt: namefmt, args);
520 if (!create->full_name) {
521 task = ERR_PTR(error: -ENOMEM);
522 goto free_create;
523 }
524
525 spin_lock(lock: &kthread_create_lock);
526 list_add_tail(new: &create->list, head: &kthread_create_list);
527 spin_unlock(lock: &kthread_create_lock);
528
529 wake_up_process(tsk: kthreadd_task);
530 /*
531 * Wait for completion in killable state, for I might be chosen by
532 * the OOM killer while kthreadd is trying to allocate memory for
533 * new kernel thread.
534 */
535 if (unlikely(wait_for_completion_killable(&done))) {
536 /*
537 * If I was killed by a fatal signal before kthreadd (or new
538 * kernel thread) calls complete(), leave the cleanup of this
539 * structure to that thread.
540 */
541 if (xchg(&create->done, NULL))
542 return ERR_PTR(error: -EINTR);
543 /*
544 * kthreadd (or new kernel thread) will call complete()
545 * shortly.
546 */
547 wait_for_completion(&done);
548 }
549 task = create->result;
550free_create:
551 kfree(objp: create);
552 return task;
553}
554
555/**
556 * kthread_create_on_node - create a kthread.
557 * @threadfn: the function to run until signal_pending(current).
558 * @data: data ptr for @threadfn.
559 * @node: task and thread structures for the thread are allocated on this node
560 * @namefmt: printf-style name for the thread.
561 *
562 * Description: This helper function creates and names a kernel
563 * thread. The thread will be stopped: use wake_up_process() to start
564 * it. See also kthread_run(). The new thread has SCHED_NORMAL policy and
565 * is affine to all CPUs.
566 *
567 * If thread is going to be bound on a particular cpu, give its node
568 * in @node, to get NUMA affinity for kthread stack, or else give NUMA_NO_NODE.
569 * When woken, the thread will run @threadfn() with @data as its
570 * argument. @threadfn() can either return directly if it is a
571 * standalone thread for which no one will call kthread_stop(), or
572 * return when 'kthread_should_stop()' is true (which means
573 * kthread_stop() has been called). The return value should be zero
574 * or a negative error number; it will be passed to kthread_stop().
575 *
576 * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
577 */
578struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
579 void *data, int node,
580 const char namefmt[],
581 ...)
582{
583 struct task_struct *task;
584 va_list args;
585
586 va_start(args, namefmt);
587 task = __kthread_create_on_node(threadfn, data, node, namefmt, args);
588 va_end(args);
589
590 return task;
591}
592EXPORT_SYMBOL(kthread_create_on_node);
593
594static void __kthread_bind_mask(struct task_struct *p, const struct cpumask *mask, unsigned int state)
595{
596 if (!wait_task_inactive(p, match_state: state)) {
597 WARN_ON(1);
598 return;
599 }
600
601 scoped_guard (raw_spinlock_irqsave, &p->pi_lock)
602 set_cpus_allowed_force(p, new_mask: mask);
603
604 /* It's safe because the task is inactive. */
605 p->flags |= PF_NO_SETAFFINITY;
606}
607
608static void __kthread_bind(struct task_struct *p, unsigned int cpu, unsigned int state)
609{
610 __kthread_bind_mask(p, cpumask_of(cpu), state);
611}
612
613void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
614{
615 struct kthread *kthread = to_kthread(k: p);
616 __kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
617 WARN_ON_ONCE(kthread->started);
618}
619
620/**
621 * kthread_bind - bind a just-created kthread to a cpu.
622 * @p: thread created by kthread_create().
623 * @cpu: cpu (might not be online, must be possible) for @k to run on.
624 *
625 * Description: This function is equivalent to set_cpus_allowed(),
626 * except that @cpu doesn't need to be online, and the thread must be
627 * stopped (i.e., just returned from kthread_create()).
628 */
629void kthread_bind(struct task_struct *p, unsigned int cpu)
630{
631 struct kthread *kthread = to_kthread(k: p);
632 __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
633 WARN_ON_ONCE(kthread->started);
634}
635EXPORT_SYMBOL(kthread_bind);
636
637/**
638 * kthread_create_on_cpu - Create a cpu bound kthread
639 * @threadfn: the function to run until signal_pending(current).
640 * @data: data ptr for @threadfn.
641 * @cpu: The cpu on which the thread should be bound,
642 * @namefmt: printf-style name for the thread. Format is restricted
643 * to "name.*%u". Code fills in cpu number.
644 *
645 * Description: This helper function creates and names a kernel thread
646 */
647struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
648 void *data, unsigned int cpu,
649 const char *namefmt)
650{
651 struct task_struct *p;
652
653 p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
654 cpu);
655 if (IS_ERR(ptr: p))
656 return p;
657 kthread_bind(p, cpu);
658 /* CPU hotplug need to bind once again when unparking the thread. */
659 to_kthread(k: p)->cpu = cpu;
660 return p;
661}
662EXPORT_SYMBOL(kthread_create_on_cpu);
663
664void kthread_set_per_cpu(struct task_struct *k, int cpu)
665{
666 struct kthread *kthread = to_kthread(k);
667 if (!kthread)
668 return;
669
670 WARN_ON_ONCE(!(k->flags & PF_NO_SETAFFINITY));
671
672 if (cpu < 0) {
673 clear_bit(nr: KTHREAD_IS_PER_CPU, addr: &kthread->flags);
674 return;
675 }
676
677 kthread->cpu = cpu;
678 set_bit(nr: KTHREAD_IS_PER_CPU, addr: &kthread->flags);
679}
680
681bool kthread_is_per_cpu(struct task_struct *p)
682{
683 struct kthread *kthread = __to_kthread(p);
684 if (!kthread)
685 return false;
686
687 return test_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
688}
689
690/**
691 * kthread_unpark - unpark a thread created by kthread_create().
692 * @k: thread created by kthread_create().
693 *
694 * Sets kthread_should_park() for @k to return false, wakes it, and
695 * waits for it to return. If the thread is marked percpu then its
696 * bound to the cpu again.
697 */
698void kthread_unpark(struct task_struct *k)
699{
700 struct kthread *kthread = to_kthread(k);
701
702 if (!test_bit(KTHREAD_SHOULD_PARK, &kthread->flags))
703 return;
704 /*
705 * Newly created kthread was parked when the CPU was offline.
706 * The binding was lost and we need to set it again.
707 */
708 if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
709 __kthread_bind(p: k, cpu: kthread->cpu, TASK_PARKED);
710
711 clear_bit(nr: KTHREAD_SHOULD_PARK, addr: &kthread->flags);
712 /*
713 * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
714 */
715 wake_up_state(tsk: k, TASK_PARKED);
716}
717EXPORT_SYMBOL_GPL(kthread_unpark);
718
719/**
720 * kthread_park - park a thread created by kthread_create().
721 * @k: thread created by kthread_create().
722 *
723 * Sets kthread_should_park() for @k to return true, wakes it, and
724 * waits for it to return. This can also be called after kthread_create()
725 * instead of calling wake_up_process(): the thread will park without
726 * calling threadfn().
727 *
728 * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
729 * If called by the kthread itself just the park bit is set.
730 */
731int kthread_park(struct task_struct *k)
732{
733 struct kthread *kthread = to_kthread(k);
734
735 if (WARN_ON(k->flags & PF_EXITING))
736 return -ENOSYS;
737
738 if (WARN_ON_ONCE(test_bit(KTHREAD_SHOULD_PARK, &kthread->flags)))
739 return -EBUSY;
740
741 set_bit(nr: KTHREAD_SHOULD_PARK, addr: &kthread->flags);
742 if (k != current) {
743 wake_up_process(tsk: k);
744 /*
745 * Wait for __kthread_parkme() to complete(), this means we
746 * _will_ have TASK_PARKED and are about to call schedule().
747 */
748 wait_for_completion(&kthread->parked);
749 /*
750 * Now wait for that schedule() to complete and the task to
751 * get scheduled out.
752 */
753 WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
754 }
755
756 return 0;
757}
758EXPORT_SYMBOL_GPL(kthread_park);
759
760/**
761 * kthread_stop - stop a thread created by kthread_create().
762 * @k: thread created by kthread_create().
763 *
764 * Sets kthread_should_stop() for @k to return true, wakes it, and
765 * waits for it to exit. This can also be called after kthread_create()
766 * instead of calling wake_up_process(): the thread will exit without
767 * calling threadfn().
768 *
769 * If threadfn() may call kthread_exit() itself, the caller must ensure
770 * task_struct can't go away.
771 *
772 * Returns the result of threadfn(), or %-EINTR if wake_up_process()
773 * was never called.
774 */
775int kthread_stop(struct task_struct *k)
776{
777 struct kthread *kthread;
778 int ret;
779
780 trace_sched_kthread_stop(t: k);
781
782 get_task_struct(t: k);
783 kthread = to_kthread(k);
784 set_bit(nr: KTHREAD_SHOULD_STOP, addr: &kthread->flags);
785 kthread_unpark(k);
786 set_tsk_thread_flag(tsk: k, TIF_NOTIFY_SIGNAL);
787 wake_up_process(tsk: k);
788 wait_for_completion(&kthread->exited);
789 ret = kthread->result;
790 put_task_struct(t: k);
791
792 trace_sched_kthread_stop_ret(ret);
793 return ret;
794}
795EXPORT_SYMBOL(kthread_stop);
796
797/**
798 * kthread_stop_put - stop a thread and put its task struct
799 * @k: thread created by kthread_create().
800 *
801 * Stops a thread created by kthread_create() and put its task_struct.
802 * Only use when holding an extra task struct reference obtained by
803 * calling get_task_struct().
804 */
805int kthread_stop_put(struct task_struct *k)
806{
807 int ret;
808
809 ret = kthread_stop(k);
810 put_task_struct(t: k);
811 return ret;
812}
813EXPORT_SYMBOL(kthread_stop_put);
814
815int kthreadd(void *unused)
816{
817 static const char comm[TASK_COMM_LEN] = "kthreadd";
818 struct task_struct *tsk = current;
819
820 /* Setup a clean context for our children to inherit. */
821 set_task_comm(tsk, comm);
822 ignore_signals(tsk);
823 set_cpus_allowed_ptr(p: tsk, new_mask: housekeeping_cpumask(type: HK_TYPE_KTHREAD));
824 set_mems_allowed(node_states[N_MEMORY]);
825
826 current->flags |= PF_NOFREEZE;
827 cgroup_init_kthreadd();
828
829 for (;;) {
830 set_current_state(TASK_INTERRUPTIBLE);
831 if (list_empty(head: &kthread_create_list))
832 schedule();
833 __set_current_state(TASK_RUNNING);
834
835 spin_lock(lock: &kthread_create_lock);
836 while (!list_empty(head: &kthread_create_list)) {
837 struct kthread_create_info *create;
838
839 create = list_entry(kthread_create_list.next,
840 struct kthread_create_info, list);
841 list_del_init(entry: &create->list);
842 spin_unlock(lock: &kthread_create_lock);
843
844 create_kthread(create);
845
846 spin_lock(lock: &kthread_create_lock);
847 }
848 spin_unlock(lock: &kthread_create_lock);
849 }
850
851 return 0;
852}
853
854int kthread_affine_preferred(struct task_struct *p, const struct cpumask *mask)
855{
856 struct kthread *kthread = to_kthread(k: p);
857 cpumask_var_t affinity;
858 int ret = 0;
859
860 if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE) || kthread->started) {
861 WARN_ON(1);
862 return -EINVAL;
863 }
864
865 WARN_ON_ONCE(kthread->preferred_affinity);
866
867 if (!zalloc_cpumask_var(mask: &affinity, GFP_KERNEL))
868 return -ENOMEM;
869
870 kthread->preferred_affinity = kzalloc(sizeof(struct cpumask), GFP_KERNEL);
871 if (!kthread->preferred_affinity) {
872 ret = -ENOMEM;
873 goto out;
874 }
875
876 mutex_lock(&kthreads_hotplug_lock);
877 cpumask_copy(dstp: kthread->preferred_affinity, srcp: mask);
878 WARN_ON_ONCE(!list_empty(&kthread->hotplug_node));
879 list_add_tail(new: &kthread->hotplug_node, head: &kthreads_hotplug);
880 kthread_fetch_affinity(kthread, cpumask: affinity);
881
882 scoped_guard (raw_spinlock_irqsave, &p->pi_lock)
883 set_cpus_allowed_force(p, new_mask: affinity);
884
885 mutex_unlock(lock: &kthreads_hotplug_lock);
886out:
887 free_cpumask_var(mask: affinity);
888
889 return ret;
890}
891EXPORT_SYMBOL_GPL(kthread_affine_preferred);
892
893/*
894 * Re-affine kthreads according to their preferences
895 * and the newly online CPU. The CPU down part is handled
896 * by select_fallback_rq() which default re-affines to
897 * housekeepers from other nodes in case the preferred
898 * affinity doesn't apply anymore.
899 */
900static int kthreads_online_cpu(unsigned int cpu)
901{
902 cpumask_var_t affinity;
903 struct kthread *k;
904 int ret;
905
906 guard(mutex)(T: &kthreads_hotplug_lock);
907
908 if (list_empty(head: &kthreads_hotplug))
909 return 0;
910
911 if (!zalloc_cpumask_var(mask: &affinity, GFP_KERNEL))
912 return -ENOMEM;
913
914 ret = 0;
915
916 list_for_each_entry(k, &kthreads_hotplug, hotplug_node) {
917 if (WARN_ON_ONCE((k->task->flags & PF_NO_SETAFFINITY) ||
918 kthread_is_per_cpu(k->task))) {
919 ret = -EINVAL;
920 continue;
921 }
922 kthread_fetch_affinity(kthread: k, cpumask: affinity);
923 set_cpus_allowed_ptr(p: k->task, new_mask: affinity);
924 }
925
926 free_cpumask_var(mask: affinity);
927
928 return ret;
929}
930
931static int kthreads_init(void)
932{
933 return cpuhp_setup_state(state: CPUHP_AP_KTHREADS_ONLINE, name: "kthreads:online",
934 startup: kthreads_online_cpu, NULL);
935}
936early_initcall(kthreads_init);
937
938void __kthread_init_worker(struct kthread_worker *worker,
939 const char *name,
940 struct lock_class_key *key)
941{
942 memset(worker, 0, sizeof(struct kthread_worker));
943 raw_spin_lock_init(&worker->lock);
944 lockdep_set_class_and_name(&worker->lock, key, name);
945 INIT_LIST_HEAD(list: &worker->work_list);
946 INIT_LIST_HEAD(list: &worker->delayed_work_list);
947}
948EXPORT_SYMBOL_GPL(__kthread_init_worker);
949
950/**
951 * kthread_worker_fn - kthread function to process kthread_worker
952 * @worker_ptr: pointer to initialized kthread_worker
953 *
954 * This function implements the main cycle of kthread worker. It processes
955 * work_list until it is stopped with kthread_stop(). It sleeps when the queue
956 * is empty.
957 *
958 * The works are not allowed to keep any locks, disable preemption or interrupts
959 * when they finish. There is defined a safe point for freezing when one work
960 * finishes and before a new one is started.
961 *
962 * Also the works must not be handled by more than one worker at the same time,
963 * see also kthread_queue_work().
964 */
965int kthread_worker_fn(void *worker_ptr)
966{
967 struct kthread_worker *worker = worker_ptr;
968 struct kthread_work *work;
969
970 /*
971 * FIXME: Update the check and remove the assignment when all kthread
972 * worker users are created using kthread_create_worker*() functions.
973 */
974 WARN_ON(worker->task && worker->task != current);
975 worker->task = current;
976
977 if (worker->flags & KTW_FREEZABLE)
978 set_freezable();
979
980repeat:
981 set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
982
983 if (kthread_should_stop()) {
984 __set_current_state(TASK_RUNNING);
985 raw_spin_lock_irq(&worker->lock);
986 worker->task = NULL;
987 raw_spin_unlock_irq(&worker->lock);
988 return 0;
989 }
990
991 work = NULL;
992 raw_spin_lock_irq(&worker->lock);
993 if (!list_empty(head: &worker->work_list)) {
994 work = list_first_entry(&worker->work_list,
995 struct kthread_work, node);
996 list_del_init(entry: &work->node);
997 }
998 worker->current_work = work;
999 raw_spin_unlock_irq(&worker->lock);
1000
1001 if (work) {
1002 kthread_work_func_t func = work->func;
1003 __set_current_state(TASK_RUNNING);
1004 trace_sched_kthread_work_execute_start(work);
1005 work->func(work);
1006 /*
1007 * Avoid dereferencing work after this point. The trace
1008 * event only cares about the address.
1009 */
1010 trace_sched_kthread_work_execute_end(work, function: func);
1011 } else if (!freezing(current)) {
1012 schedule();
1013 } else {
1014 /*
1015 * Handle the case where the current remains
1016 * TASK_INTERRUPTIBLE. try_to_freeze() expects
1017 * the current to be TASK_RUNNING.
1018 */
1019 __set_current_state(TASK_RUNNING);
1020 }
1021
1022 try_to_freeze();
1023 cond_resched();
1024 goto repeat;
1025}
1026EXPORT_SYMBOL_GPL(kthread_worker_fn);
1027
1028static __printf(3, 0) struct kthread_worker *
1029__kthread_create_worker_on_node(unsigned int flags, int node,
1030 const char namefmt[], va_list args)
1031{
1032 struct kthread_worker *worker;
1033 struct task_struct *task;
1034
1035 worker = kzalloc(sizeof(*worker), GFP_KERNEL);
1036 if (!worker)
1037 return ERR_PTR(error: -ENOMEM);
1038
1039 kthread_init_worker(worker);
1040
1041 task = __kthread_create_on_node(threadfn: kthread_worker_fn, data: worker,
1042 node, namefmt, args);
1043 if (IS_ERR(ptr: task))
1044 goto fail_task;
1045
1046 worker->flags = flags;
1047 worker->task = task;
1048
1049 return worker;
1050
1051fail_task:
1052 kfree(objp: worker);
1053 return ERR_CAST(ptr: task);
1054}
1055
1056/**
1057 * kthread_create_worker_on_node - create a kthread worker
1058 * @flags: flags modifying the default behavior of the worker
1059 * @node: task structure for the thread is allocated on this node
1060 * @namefmt: printf-style name for the kthread worker (task).
1061 *
1062 * Returns a pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
1063 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
1064 * when the caller was killed by a fatal signal.
1065 */
1066struct kthread_worker *
1067kthread_create_worker_on_node(unsigned int flags, int node, const char namefmt[], ...)
1068{
1069 struct kthread_worker *worker;
1070 va_list args;
1071
1072 va_start(args, namefmt);
1073 worker = __kthread_create_worker_on_node(flags, node, namefmt, args);
1074 va_end(args);
1075
1076 return worker;
1077}
1078EXPORT_SYMBOL(kthread_create_worker_on_node);
1079
1080/**
1081 * kthread_create_worker_on_cpu - create a kthread worker and bind it
1082 * to a given CPU and the associated NUMA node.
1083 * @cpu: CPU number
1084 * @flags: flags modifying the default behavior of the worker
1085 * @namefmt: printf-style name for the thread. Format is restricted
1086 * to "name.*%u". Code fills in cpu number.
1087 *
1088 * Use a valid CPU number if you want to bind the kthread worker
1089 * to the given CPU and the associated NUMA node.
1090 *
1091 * A good practice is to add the cpu number also into the worker name.
1092 * For example, use kthread_create_worker_on_cpu(cpu, "helper/%d", cpu).
1093 *
1094 * CPU hotplug:
1095 * The kthread worker API is simple and generic. It just provides a way
1096 * to create, use, and destroy workers.
1097 *
1098 * It is up to the API user how to handle CPU hotplug. They have to decide
1099 * how to handle pending work items, prevent queuing new ones, and
1100 * restore the functionality when the CPU goes off and on. There are a
1101 * few catches:
1102 *
1103 * - CPU affinity gets lost when it is scheduled on an offline CPU.
1104 *
1105 * - The worker might not exist when the CPU was off when the user
1106 * created the workers.
1107 *
1108 * Good practice is to implement two CPU hotplug callbacks and to
1109 * destroy/create the worker when the CPU goes down/up.
1110 *
1111 * Return:
1112 * The pointer to the allocated worker on success, ERR_PTR(-ENOMEM)
1113 * when the needed structures could not get allocated, and ERR_PTR(-EINTR)
1114 * when the caller was killed by a fatal signal.
1115 */
1116struct kthread_worker *
1117kthread_create_worker_on_cpu(int cpu, unsigned int flags,
1118 const char namefmt[])
1119{
1120 struct kthread_worker *worker;
1121
1122 worker = kthread_create_worker_on_node(flags, cpu_to_node(cpu), namefmt, cpu);
1123 if (!IS_ERR(ptr: worker))
1124 kthread_bind(worker->task, cpu);
1125
1126 return worker;
1127}
1128EXPORT_SYMBOL(kthread_create_worker_on_cpu);
1129
1130/*
1131 * Returns true when the work could not be queued at the moment.
1132 * It happens when it is already pending in a worker list
1133 * or when it is being cancelled.
1134 */
1135static inline bool queuing_blocked(struct kthread_worker *worker,
1136 struct kthread_work *work)
1137{
1138 lockdep_assert_held(&worker->lock);
1139
1140 return !list_empty(head: &work->node) || work->canceling;
1141}
1142
1143static void kthread_insert_work_sanity_check(struct kthread_worker *worker,
1144 struct kthread_work *work)
1145{
1146 lockdep_assert_held(&worker->lock);
1147 WARN_ON_ONCE(!list_empty(&work->node));
1148 /* Do not use a work with >1 worker, see kthread_queue_work() */
1149 WARN_ON_ONCE(work->worker && work->worker != worker);
1150}
1151
1152/* insert @work before @pos in @worker */
1153static void kthread_insert_work(struct kthread_worker *worker,
1154 struct kthread_work *work,
1155 struct list_head *pos)
1156{
1157 kthread_insert_work_sanity_check(worker, work);
1158
1159 trace_sched_kthread_work_queue_work(worker, work);
1160
1161 list_add_tail(new: &work->node, head: pos);
1162 work->worker = worker;
1163 if (!worker->current_work && likely(worker->task))
1164 wake_up_process(tsk: worker->task);
1165}
1166
1167/**
1168 * kthread_queue_work - queue a kthread_work
1169 * @worker: target kthread_worker
1170 * @work: kthread_work to queue
1171 *
1172 * Queue @work to work processor @task for async execution. @task
1173 * must have been created with kthread_create_worker(). Returns %true
1174 * if @work was successfully queued, %false if it was already pending.
1175 *
1176 * Reinitialize the work if it needs to be used by another worker.
1177 * For example, when the worker was stopped and started again.
1178 */
1179bool kthread_queue_work(struct kthread_worker *worker,
1180 struct kthread_work *work)
1181{
1182 bool ret = false;
1183 unsigned long flags;
1184
1185 raw_spin_lock_irqsave(&worker->lock, flags);
1186 if (!queuing_blocked(worker, work)) {
1187 kthread_insert_work(worker, work, pos: &worker->work_list);
1188 ret = true;
1189 }
1190 raw_spin_unlock_irqrestore(&worker->lock, flags);
1191 return ret;
1192}
1193EXPORT_SYMBOL_GPL(kthread_queue_work);
1194
1195/**
1196 * kthread_delayed_work_timer_fn - callback that queues the associated kthread
1197 * delayed work when the timer expires.
1198 * @t: pointer to the expired timer
1199 *
1200 * The format of the function is defined by struct timer_list.
1201 * It should have been called from irqsafe timer with irq already off.
1202 */
1203void kthread_delayed_work_timer_fn(struct timer_list *t)
1204{
1205 struct kthread_delayed_work *dwork = timer_container_of(dwork, t,
1206 timer);
1207 struct kthread_work *work = &dwork->work;
1208 struct kthread_worker *worker = work->worker;
1209 unsigned long flags;
1210
1211 /*
1212 * This might happen when a pending work is reinitialized.
1213 * It means that it is used a wrong way.
1214 */
1215 if (WARN_ON_ONCE(!worker))
1216 return;
1217
1218 raw_spin_lock_irqsave(&worker->lock, flags);
1219 /* Work must not be used with >1 worker, see kthread_queue_work(). */
1220 WARN_ON_ONCE(work->worker != worker);
1221
1222 /* Move the work from worker->delayed_work_list. */
1223 WARN_ON_ONCE(list_empty(&work->node));
1224 list_del_init(entry: &work->node);
1225 if (!work->canceling)
1226 kthread_insert_work(worker, work, pos: &worker->work_list);
1227
1228 raw_spin_unlock_irqrestore(&worker->lock, flags);
1229}
1230EXPORT_SYMBOL(kthread_delayed_work_timer_fn);
1231
1232static void __kthread_queue_delayed_work(struct kthread_worker *worker,
1233 struct kthread_delayed_work *dwork,
1234 unsigned long delay)
1235{
1236 struct timer_list *timer = &dwork->timer;
1237 struct kthread_work *work = &dwork->work;
1238
1239 WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
1240
1241 /*
1242 * If @delay is 0, queue @dwork->work immediately. This is for
1243 * both optimization and correctness. The earliest @timer can
1244 * expire is on the closest next tick and delayed_work users depend
1245 * on that there's no such delay when @delay is 0.
1246 */
1247 if (!delay) {
1248 kthread_insert_work(worker, work, pos: &worker->work_list);
1249 return;
1250 }
1251
1252 /* Be paranoid and try to detect possible races already now. */
1253 kthread_insert_work_sanity_check(worker, work);
1254
1255 list_add(new: &work->node, head: &worker->delayed_work_list);
1256 work->worker = worker;
1257 timer->expires = jiffies + delay;
1258 add_timer(timer);
1259}
1260
1261/**
1262 * kthread_queue_delayed_work - queue the associated kthread work
1263 * after a delay.
1264 * @worker: target kthread_worker
1265 * @dwork: kthread_delayed_work to queue
1266 * @delay: number of jiffies to wait before queuing
1267 *
1268 * If the work has not been pending it starts a timer that will queue
1269 * the work after the given @delay. If @delay is zero, it queues the
1270 * work immediately.
1271 *
1272 * Return: %false if the @work has already been pending. It means that
1273 * either the timer was running or the work was queued. It returns %true
1274 * otherwise.
1275 */
1276bool kthread_queue_delayed_work(struct kthread_worker *worker,
1277 struct kthread_delayed_work *dwork,
1278 unsigned long delay)
1279{
1280 struct kthread_work *work = &dwork->work;
1281 unsigned long flags;
1282 bool ret = false;
1283
1284 raw_spin_lock_irqsave(&worker->lock, flags);
1285
1286 if (!queuing_blocked(worker, work)) {
1287 __kthread_queue_delayed_work(worker, dwork, delay);
1288 ret = true;
1289 }
1290
1291 raw_spin_unlock_irqrestore(&worker->lock, flags);
1292 return ret;
1293}
1294EXPORT_SYMBOL_GPL(kthread_queue_delayed_work);
1295
1296struct kthread_flush_work {
1297 struct kthread_work work;
1298 struct completion done;
1299};
1300
1301static void kthread_flush_work_fn(struct kthread_work *work)
1302{
1303 struct kthread_flush_work *fwork =
1304 container_of(work, struct kthread_flush_work, work);
1305 complete(&fwork->done);
1306}
1307
1308/**
1309 * kthread_flush_work - flush a kthread_work
1310 * @work: work to flush
1311 *
1312 * If @work is queued or executing, wait for it to finish execution.
1313 */
1314void kthread_flush_work(struct kthread_work *work)
1315{
1316 struct kthread_flush_work fwork = {
1317 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1318 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1319 };
1320 struct kthread_worker *worker;
1321 bool noop = false;
1322
1323 worker = work->worker;
1324 if (!worker)
1325 return;
1326
1327 raw_spin_lock_irq(&worker->lock);
1328 /* Work must not be used with >1 worker, see kthread_queue_work(). */
1329 WARN_ON_ONCE(work->worker != worker);
1330
1331 if (!list_empty(head: &work->node))
1332 kthread_insert_work(worker, work: &fwork.work, pos: work->node.next);
1333 else if (worker->current_work == work)
1334 kthread_insert_work(worker, work: &fwork.work,
1335 pos: worker->work_list.next);
1336 else
1337 noop = true;
1338
1339 raw_spin_unlock_irq(&worker->lock);
1340
1341 if (!noop)
1342 wait_for_completion(&fwork.done);
1343}
1344EXPORT_SYMBOL_GPL(kthread_flush_work);
1345
1346/*
1347 * Make sure that the timer is neither set nor running and could
1348 * not manipulate the work list_head any longer.
1349 *
1350 * The function is called under worker->lock. The lock is temporary
1351 * released but the timer can't be set again in the meantime.
1352 */
1353static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
1354 unsigned long *flags)
1355{
1356 struct kthread_delayed_work *dwork =
1357 container_of(work, struct kthread_delayed_work, work);
1358 struct kthread_worker *worker = work->worker;
1359
1360 /*
1361 * timer_delete_sync() must be called to make sure that the timer
1362 * callback is not running. The lock must be temporary released
1363 * to avoid a deadlock with the callback. In the meantime,
1364 * any queuing is blocked by setting the canceling counter.
1365 */
1366 work->canceling++;
1367 raw_spin_unlock_irqrestore(&worker->lock, *flags);
1368 timer_delete_sync(timer: &dwork->timer);
1369 raw_spin_lock_irqsave(&worker->lock, *flags);
1370 work->canceling--;
1371}
1372
1373/*
1374 * This function removes the work from the worker queue.
1375 *
1376 * It is called under worker->lock. The caller must make sure that
1377 * the timer used by delayed work is not running, e.g. by calling
1378 * kthread_cancel_delayed_work_timer().
1379 *
1380 * The work might still be in use when this function finishes. See the
1381 * current_work proceed by the worker.
1382 *
1383 * Return: %true if @work was pending and successfully canceled,
1384 * %false if @work was not pending
1385 */
1386static bool __kthread_cancel_work(struct kthread_work *work)
1387{
1388 /*
1389 * Try to remove the work from a worker list. It might either
1390 * be from worker->work_list or from worker->delayed_work_list.
1391 */
1392 if (!list_empty(head: &work->node)) {
1393 list_del_init(entry: &work->node);
1394 return true;
1395 }
1396
1397 return false;
1398}
1399
1400/**
1401 * kthread_mod_delayed_work - modify delay of or queue a kthread delayed work
1402 * @worker: kthread worker to use
1403 * @dwork: kthread delayed work to queue
1404 * @delay: number of jiffies to wait before queuing
1405 *
1406 * If @dwork is idle, equivalent to kthread_queue_delayed_work(). Otherwise,
1407 * modify @dwork's timer so that it expires after @delay. If @delay is zero,
1408 * @work is guaranteed to be queued immediately.
1409 *
1410 * Return: %false if @dwork was idle and queued, %true otherwise.
1411 *
1412 * A special case is when the work is being canceled in parallel.
1413 * It might be caused either by the real kthread_cancel_delayed_work_sync()
1414 * or yet another kthread_mod_delayed_work() call. We let the other command
1415 * win and return %true here. The return value can be used for reference
1416 * counting and the number of queued works stays the same. Anyway, the caller
1417 * is supposed to synchronize these operations a reasonable way.
1418 *
1419 * This function is safe to call from any context including IRQ handler.
1420 * See __kthread_cancel_work() and kthread_delayed_work_timer_fn()
1421 * for details.
1422 */
1423bool kthread_mod_delayed_work(struct kthread_worker *worker,
1424 struct kthread_delayed_work *dwork,
1425 unsigned long delay)
1426{
1427 struct kthread_work *work = &dwork->work;
1428 unsigned long flags;
1429 int ret;
1430
1431 raw_spin_lock_irqsave(&worker->lock, flags);
1432
1433 /* Do not bother with canceling when never queued. */
1434 if (!work->worker) {
1435 ret = false;
1436 goto fast_queue;
1437 }
1438
1439 /* Work must not be used with >1 worker, see kthread_queue_work() */
1440 WARN_ON_ONCE(work->worker != worker);
1441
1442 /*
1443 * Temporary cancel the work but do not fight with another command
1444 * that is canceling the work as well.
1445 *
1446 * It is a bit tricky because of possible races with another
1447 * mod_delayed_work() and cancel_delayed_work() callers.
1448 *
1449 * The timer must be canceled first because worker->lock is released
1450 * when doing so. But the work can be removed from the queue (list)
1451 * only when it can be queued again so that the return value can
1452 * be used for reference counting.
1453 */
1454 kthread_cancel_delayed_work_timer(work, flags: &flags);
1455 if (work->canceling) {
1456 /* The number of works in the queue does not change. */
1457 ret = true;
1458 goto out;
1459 }
1460 ret = __kthread_cancel_work(work);
1461
1462fast_queue:
1463 __kthread_queue_delayed_work(worker, dwork, delay);
1464out:
1465 raw_spin_unlock_irqrestore(&worker->lock, flags);
1466 return ret;
1467}
1468EXPORT_SYMBOL_GPL(kthread_mod_delayed_work);
1469
1470static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1471{
1472 struct kthread_worker *worker = work->worker;
1473 unsigned long flags;
1474 int ret = false;
1475
1476 if (!worker)
1477 goto out;
1478
1479 raw_spin_lock_irqsave(&worker->lock, flags);
1480 /* Work must not be used with >1 worker, see kthread_queue_work(). */
1481 WARN_ON_ONCE(work->worker != worker);
1482
1483 if (is_dwork)
1484 kthread_cancel_delayed_work_timer(work, flags: &flags);
1485
1486 ret = __kthread_cancel_work(work);
1487
1488 if (worker->current_work != work)
1489 goto out_fast;
1490
1491 /*
1492 * The work is in progress and we need to wait with the lock released.
1493 * In the meantime, block any queuing by setting the canceling counter.
1494 */
1495 work->canceling++;
1496 raw_spin_unlock_irqrestore(&worker->lock, flags);
1497 kthread_flush_work(work);
1498 raw_spin_lock_irqsave(&worker->lock, flags);
1499 work->canceling--;
1500
1501out_fast:
1502 raw_spin_unlock_irqrestore(&worker->lock, flags);
1503out:
1504 return ret;
1505}
1506
1507/**
1508 * kthread_cancel_work_sync - cancel a kthread work and wait for it to finish
1509 * @work: the kthread work to cancel
1510 *
1511 * Cancel @work and wait for its execution to finish. This function
1512 * can be used even if the work re-queues itself. On return from this
1513 * function, @work is guaranteed to be not pending or executing on any CPU.
1514 *
1515 * kthread_cancel_work_sync(&delayed_work->work) must not be used for
1516 * delayed_work's. Use kthread_cancel_delayed_work_sync() instead.
1517 *
1518 * The caller must ensure that the worker on which @work was last
1519 * queued can't be destroyed before this function returns.
1520 *
1521 * Return: %true if @work was pending, %false otherwise.
1522 */
1523bool kthread_cancel_work_sync(struct kthread_work *work)
1524{
1525 return __kthread_cancel_work_sync(work, is_dwork: false);
1526}
1527EXPORT_SYMBOL_GPL(kthread_cancel_work_sync);
1528
1529/**
1530 * kthread_cancel_delayed_work_sync - cancel a kthread delayed work and
1531 * wait for it to finish.
1532 * @dwork: the kthread delayed work to cancel
1533 *
1534 * This is kthread_cancel_work_sync() for delayed works.
1535 *
1536 * Return: %true if @dwork was pending, %false otherwise.
1537 */
1538bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *dwork)
1539{
1540 return __kthread_cancel_work_sync(work: &dwork->work, is_dwork: true);
1541}
1542EXPORT_SYMBOL_GPL(kthread_cancel_delayed_work_sync);
1543
1544/**
1545 * kthread_flush_worker - flush all current works on a kthread_worker
1546 * @worker: worker to flush
1547 *
1548 * Wait until all currently executing or pending works on @worker are
1549 * finished.
1550 */
1551void kthread_flush_worker(struct kthread_worker *worker)
1552{
1553 struct kthread_flush_work fwork = {
1554 KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
1555 COMPLETION_INITIALIZER_ONSTACK(fwork.done),
1556 };
1557
1558 kthread_queue_work(worker, &fwork.work);
1559 wait_for_completion(&fwork.done);
1560}
1561EXPORT_SYMBOL_GPL(kthread_flush_worker);
1562
1563/**
1564 * kthread_destroy_worker - destroy a kthread worker
1565 * @worker: worker to be destroyed
1566 *
1567 * Flush and destroy @worker. The simple flush is enough because the kthread
1568 * worker API is used only in trivial scenarios. There are no multi-step state
1569 * machines needed.
1570 *
1571 * Note that this function is not responsible for handling delayed work, so
1572 * caller should be responsible for queuing or canceling all delayed work items
1573 * before invoke this function.
1574 */
1575void kthread_destroy_worker(struct kthread_worker *worker)
1576{
1577 struct task_struct *task;
1578
1579 task = worker->task;
1580 if (WARN_ON(!task))
1581 return;
1582
1583 kthread_flush_worker(worker);
1584 kthread_stop(task);
1585 WARN_ON(!list_empty(&worker->delayed_work_list));
1586 WARN_ON(!list_empty(&worker->work_list));
1587 kfree(objp: worker);
1588}
1589EXPORT_SYMBOL(kthread_destroy_worker);
1590
1591/**
1592 * kthread_use_mm - make the calling kthread operate on an address space
1593 * @mm: address space to operate on
1594 */
1595void kthread_use_mm(struct mm_struct *mm)
1596{
1597 struct mm_struct *active_mm;
1598 struct task_struct *tsk = current;
1599
1600 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1601 WARN_ON_ONCE(tsk->mm);
1602 WARN_ON_ONCE(!mm->user_ns);
1603
1604 /*
1605 * It is possible for mm to be the same as tsk->active_mm, but
1606 * we must still mmgrab(mm) and mmdrop_lazy_tlb(active_mm),
1607 * because these references are not equivalent.
1608 */
1609 mmgrab(mm);
1610
1611 task_lock(p: tsk);
1612 /* Hold off tlb flush IPIs while switching mm's */
1613 local_irq_disable();
1614 active_mm = tsk->active_mm;
1615 tsk->active_mm = mm;
1616 tsk->mm = mm;
1617 membarrier_update_current_mm(next_mm: mm);
1618 switch_mm_irqs_off(prev: active_mm, next: mm, tsk);
1619 local_irq_enable();
1620 task_unlock(p: tsk);
1621#ifdef finish_arch_post_lock_switch
1622 finish_arch_post_lock_switch();
1623#endif
1624
1625 /*
1626 * When a kthread starts operating on an address space, the loop
1627 * in membarrier_{private,global}_expedited() may not observe
1628 * that tsk->mm, and not issue an IPI. Membarrier requires a
1629 * memory barrier after storing to tsk->mm, before accessing
1630 * user-space memory. A full memory barrier for membarrier
1631 * {PRIVATE,GLOBAL}_EXPEDITED is implicitly provided by
1632 * mmdrop_lazy_tlb().
1633 */
1634 mmdrop_lazy_tlb(mm: active_mm);
1635}
1636EXPORT_SYMBOL_GPL(kthread_use_mm);
1637
1638/**
1639 * kthread_unuse_mm - reverse the effect of kthread_use_mm()
1640 * @mm: address space to operate on
1641 */
1642void kthread_unuse_mm(struct mm_struct *mm)
1643{
1644 struct task_struct *tsk = current;
1645
1646 WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
1647 WARN_ON_ONCE(!tsk->mm);
1648
1649 task_lock(p: tsk);
1650 /*
1651 * When a kthread stops operating on an address space, the loop
1652 * in membarrier_{private,global}_expedited() may not observe
1653 * that tsk->mm, and not issue an IPI. Membarrier requires a
1654 * memory barrier after accessing user-space memory, before
1655 * clearing tsk->mm.
1656 */
1657 smp_mb__after_spinlock();
1658 local_irq_disable();
1659 tsk->mm = NULL;
1660 membarrier_update_current_mm(NULL);
1661 mmgrab_lazy_tlb(mm);
1662 /* active_mm is still 'mm' */
1663 enter_lazy_tlb(mm, tsk);
1664 local_irq_enable();
1665 task_unlock(p: tsk);
1666
1667 mmdrop(mm);
1668}
1669EXPORT_SYMBOL_GPL(kthread_unuse_mm);
1670
1671#ifdef CONFIG_BLK_CGROUP
1672/**
1673 * kthread_associate_blkcg - associate blkcg to current kthread
1674 * @css: the cgroup info
1675 *
1676 * Current thread must be a kthread. The thread is running jobs on behalf of
1677 * other threads. In some cases, we expect the jobs attach cgroup info of
1678 * original threads instead of that of current thread. This function stores
1679 * original thread's cgroup info in current kthread context for later
1680 * retrieval.
1681 */
1682void kthread_associate_blkcg(struct cgroup_subsys_state *css)
1683{
1684 struct kthread *kthread;
1685
1686 if (!(current->flags & PF_KTHREAD))
1687 return;
1688 kthread = to_kthread(current);
1689 if (!kthread)
1690 return;
1691
1692 if (kthread->blkcg_css) {
1693 css_put(css: kthread->blkcg_css);
1694 kthread->blkcg_css = NULL;
1695 }
1696 if (css) {
1697 css_get(css);
1698 kthread->blkcg_css = css;
1699 }
1700}
1701EXPORT_SYMBOL(kthread_associate_blkcg);
1702
1703/**
1704 * kthread_blkcg - get associated blkcg css of current kthread
1705 *
1706 * Current thread must be a kthread.
1707 */
1708struct cgroup_subsys_state *kthread_blkcg(void)
1709{
1710 struct kthread *kthread;
1711
1712 if (current->flags & PF_KTHREAD) {
1713 kthread = to_kthread(current);
1714 if (kthread)
1715 return kthread->blkcg_css;
1716 }
1717 return NULL;
1718}
1719#endif
1720

source code of linux/kernel/kthread.c