1// SPDX-License-Identifier: GPL-2.0
2#include <linux/irq_work.h>
3#include <linux/spinlock.h>
4#include <linux/task_work.h>
5#include <linux/resume_user_mode.h>
6
7static struct callback_head work_exited; /* all we need is ->next == NULL */
8
9#ifdef CONFIG_IRQ_WORK
10static void task_work_set_notify_irq(struct irq_work *entry)
11{
12 /*
13 * no-op IPI
14 *
15 * TWA_NMI_CURRENT will already have set the TIF flag, all
16 * this interrupt does it tickle the return-to-user path.
17 */
18}
19static DEFINE_PER_CPU(struct irq_work, irq_work_NMI_resume) =
20 IRQ_WORK_INIT_HARD(task_work_set_notify_irq);
21#endif
22
23/**
24 * task_work_add - ask the @task to execute @work->func()
25 * @task: the task which should run the callback
26 * @work: the callback to run
27 * @notify: how to notify the targeted task
28 *
29 * Queue @work for task_work_run() below and notify the @task if @notify
30 * is @TWA_RESUME, @TWA_SIGNAL, @TWA_SIGNAL_NO_IPI or @TWA_NMI_CURRENT.
31 *
32 * @TWA_SIGNAL works like signals, in that the it will interrupt the targeted
33 * task and run the task_work, regardless of whether the task is currently
34 * running in the kernel or userspace.
35 * @TWA_SIGNAL_NO_IPI works like @TWA_SIGNAL, except it doesn't send a
36 * reschedule IPI to force the targeted task to reschedule and run task_work.
37 * This can be advantageous if there's no strict requirement that the
38 * task_work be run as soon as possible, just whenever the task enters the
39 * kernel anyway.
40 * @TWA_RESUME work is run only when the task exits the kernel and returns to
41 * user mode, or before entering guest mode.
42 * @TWA_NMI_CURRENT works like @TWA_RESUME, except it can only be used for the
43 * current @task and if the current context is NMI.
44 *
45 * Fails if the @task is exiting/exited and thus it can't process this @work.
46 * Otherwise @work->func() will be called when the @task goes through one of
47 * the aforementioned transitions, or exits.
48 *
49 * If the targeted task is exiting, then an error is returned and the work item
50 * is not queued. It's up to the caller to arrange for an alternative mechanism
51 * in that case.
52 *
53 * Note: there is no ordering guarantee on works queued here. The task_work
54 * list is LIFO.
55 *
56 * RETURNS:
57 * 0 if succeeds or -ESRCH.
58 */
59int task_work_add(struct task_struct *task, struct callback_head *work,
60 enum task_work_notify_mode notify)
61{
62 struct callback_head *head;
63
64 if (notify == TWA_NMI_CURRENT) {
65 if (WARN_ON_ONCE(task != current))
66 return -EINVAL;
67 if (!IS_ENABLED(CONFIG_IRQ_WORK))
68 return -EINVAL;
69 } else {
70 kasan_record_aux_stack(ptr: work);
71 }
72
73 head = READ_ONCE(task->task_works);
74 do {
75 if (unlikely(head == &work_exited))
76 return -ESRCH;
77 work->next = head;
78 } while (!try_cmpxchg(&task->task_works, &head, work));
79
80 switch (notify) {
81 case TWA_NONE:
82 break;
83 case TWA_RESUME:
84 set_notify_resume(task);
85 break;
86 case TWA_SIGNAL:
87 set_notify_signal(task);
88 break;
89 case TWA_SIGNAL_NO_IPI:
90 __set_notify_signal(task);
91 break;
92#ifdef CONFIG_IRQ_WORK
93 case TWA_NMI_CURRENT:
94 set_tsk_thread_flag(current, TIF_NOTIFY_RESUME);
95 irq_work_queue(this_cpu_ptr(&irq_work_NMI_resume));
96 break;
97#endif
98 default:
99 WARN_ON_ONCE(1);
100 break;
101 }
102
103 return 0;
104}
105
106/**
107 * task_work_cancel_match - cancel a pending work added by task_work_add()
108 * @task: the task which should execute the work
109 * @match: match function to call
110 * @data: data to be passed in to match function
111 *
112 * RETURNS:
113 * The found work or NULL if not found.
114 */
115struct callback_head *
116task_work_cancel_match(struct task_struct *task,
117 bool (*match)(struct callback_head *, void *data),
118 void *data)
119{
120 struct callback_head **pprev = &task->task_works;
121 struct callback_head *work;
122 unsigned long flags;
123
124 if (likely(!task_work_pending(task)))
125 return NULL;
126 /*
127 * If cmpxchg() fails we continue without updating pprev.
128 * Either we raced with task_work_add() which added the
129 * new entry before this work, we will find it again. Or
130 * we raced with task_work_run(), *pprev == NULL/exited.
131 */
132 raw_spin_lock_irqsave(&task->pi_lock, flags);
133 work = READ_ONCE(*pprev);
134 while (work) {
135 if (!match(work, data)) {
136 pprev = &work->next;
137 work = READ_ONCE(*pprev);
138 } else if (try_cmpxchg(pprev, &work, work->next))
139 break;
140 }
141 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
142
143 return work;
144}
145
146static bool task_work_func_match(struct callback_head *cb, void *data)
147{
148 return cb->func == data;
149}
150
151/**
152 * task_work_cancel_func - cancel a pending work matching a function added by task_work_add()
153 * @task: the task which should execute the func's work
154 * @func: identifies the func to match with a work to remove
155 *
156 * Find the last queued pending work with ->func == @func and remove
157 * it from queue.
158 *
159 * RETURNS:
160 * The found work or NULL if not found.
161 */
162struct callback_head *
163task_work_cancel_func(struct task_struct *task, task_work_func_t func)
164{
165 return task_work_cancel_match(task, match: task_work_func_match, data: func);
166}
167
168static bool task_work_match(struct callback_head *cb, void *data)
169{
170 return cb == data;
171}
172
173/**
174 * task_work_cancel - cancel a pending work added by task_work_add()
175 * @task: the task which should execute the work
176 * @cb: the callback to remove if queued
177 *
178 * Remove a callback from a task's queue if queued.
179 *
180 * RETURNS:
181 * True if the callback was queued and got cancelled, false otherwise.
182 */
183bool task_work_cancel(struct task_struct *task, struct callback_head *cb)
184{
185 struct callback_head *ret;
186
187 ret = task_work_cancel_match(task, match: task_work_match, data: cb);
188
189 return ret == cb;
190}
191
192/**
193 * task_work_run - execute the works added by task_work_add()
194 *
195 * Flush the pending works. Should be used by the core kernel code.
196 * Called before the task returns to the user-mode or stops, or when
197 * it exits. In the latter case task_work_add() can no longer add the
198 * new work after task_work_run() returns.
199 */
200void task_work_run(void)
201{
202 struct task_struct *task = current;
203 struct callback_head *work, *head, *next;
204
205 for (;;) {
206 /*
207 * work->func() can do task_work_add(), do not set
208 * work_exited unless the list is empty.
209 */
210 work = READ_ONCE(task->task_works);
211 do {
212 head = NULL;
213 if (!work) {
214 if (task->flags & PF_EXITING)
215 head = &work_exited;
216 else
217 break;
218 }
219 } while (!try_cmpxchg(&task->task_works, &work, head));
220
221 if (!work)
222 break;
223 /*
224 * Synchronize with task_work_cancel_match(). It can not remove
225 * the first entry == work, cmpxchg(task_works) must fail.
226 * But it can remove another entry from the ->next list.
227 */
228 raw_spin_lock_irq(&task->pi_lock);
229 raw_spin_unlock_irq(&task->pi_lock);
230
231 do {
232 next = work->next;
233 work->func(work);
234 work = next;
235 cond_resched();
236 } while (work);
237 }
238}
239

source code of linux/kernel/task_work.c