| 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * kernel/locking/mutex.c |
| 4 | * |
| 5 | * Mutexes: blocking mutual exclusion locks |
| 6 | * |
| 7 | * Started by Ingo Molnar: |
| 8 | * |
| 9 | * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> |
| 10 | * |
| 11 | * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and |
| 12 | * David Howells for suggestions and improvements. |
| 13 | * |
| 14 | * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline |
| 15 | * from the -rt tree, where it was originally implemented for rtmutexes |
| 16 | * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale |
| 17 | * and Sven Dietrich. |
| 18 | * |
| 19 | * Also see Documentation/locking/mutex-design.rst. |
| 20 | */ |
| 21 | #include <linux/mutex.h> |
| 22 | #include <linux/ww_mutex.h> |
| 23 | #include <linux/sched/signal.h> |
| 24 | #include <linux/sched/rt.h> |
| 25 | #include <linux/sched/wake_q.h> |
| 26 | #include <linux/sched/debug.h> |
| 27 | #include <linux/export.h> |
| 28 | #include <linux/spinlock.h> |
| 29 | #include <linux/interrupt.h> |
| 30 | #include <linux/debug_locks.h> |
| 31 | #include <linux/osq_lock.h> |
| 32 | #include <linux/hung_task.h> |
| 33 | |
| 34 | #define CREATE_TRACE_POINTS |
| 35 | #include <trace/events/lock.h> |
| 36 | |
| 37 | #ifndef CONFIG_PREEMPT_RT |
| 38 | #include "mutex.h" |
| 39 | |
| 40 | #ifdef CONFIG_DEBUG_MUTEXES |
| 41 | # define MUTEX_WARN_ON(cond) DEBUG_LOCKS_WARN_ON(cond) |
| 42 | #else |
| 43 | # define MUTEX_WARN_ON(cond) |
| 44 | #endif |
| 45 | |
| 46 | static void __mutex_init_generic(struct mutex *lock) |
| 47 | { |
| 48 | atomic_long_set(v: &lock->owner, i: 0); |
| 49 | raw_spin_lock_init(&lock->wait_lock); |
| 50 | INIT_LIST_HEAD(list: &lock->wait_list); |
| 51 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
| 52 | osq_lock_init(lock: &lock->osq); |
| 53 | #endif |
| 54 | debug_mutex_init(lock); |
| 55 | } |
| 56 | |
| 57 | static inline struct task_struct *__owner_task(unsigned long owner) |
| 58 | { |
| 59 | return (struct task_struct *)(owner & ~MUTEX_FLAGS); |
| 60 | } |
| 61 | |
| 62 | bool mutex_is_locked(struct mutex *lock) |
| 63 | { |
| 64 | return __mutex_owner(lock) != NULL; |
| 65 | } |
| 66 | EXPORT_SYMBOL(mutex_is_locked); |
| 67 | |
| 68 | static inline unsigned long __owner_flags(unsigned long owner) |
| 69 | { |
| 70 | return owner & MUTEX_FLAGS; |
| 71 | } |
| 72 | |
| 73 | /* Do not use the return value as a pointer directly. */ |
| 74 | unsigned long mutex_get_owner(struct mutex *lock) |
| 75 | { |
| 76 | unsigned long owner = atomic_long_read(v: &lock->owner); |
| 77 | |
| 78 | return (unsigned long)__owner_task(owner); |
| 79 | } |
| 80 | |
| 81 | /* |
| 82 | * Returns: __mutex_owner(lock) on failure or NULL on success. |
| 83 | */ |
| 84 | static inline struct task_struct *__mutex_trylock_common(struct mutex *lock, bool handoff) |
| 85 | { |
| 86 | unsigned long owner, curr = (unsigned long)current; |
| 87 | |
| 88 | owner = atomic_long_read(v: &lock->owner); |
| 89 | for (;;) { /* must loop, can race against a flag */ |
| 90 | unsigned long flags = __owner_flags(owner); |
| 91 | unsigned long task = owner & ~MUTEX_FLAGS; |
| 92 | |
| 93 | if (task) { |
| 94 | if (flags & MUTEX_FLAG_PICKUP) { |
| 95 | if (task != curr) |
| 96 | break; |
| 97 | flags &= ~MUTEX_FLAG_PICKUP; |
| 98 | } else if (handoff) { |
| 99 | if (flags & MUTEX_FLAG_HANDOFF) |
| 100 | break; |
| 101 | flags |= MUTEX_FLAG_HANDOFF; |
| 102 | } else { |
| 103 | break; |
| 104 | } |
| 105 | } else { |
| 106 | MUTEX_WARN_ON(flags & (MUTEX_FLAG_HANDOFF | MUTEX_FLAG_PICKUP)); |
| 107 | task = curr; |
| 108 | } |
| 109 | |
| 110 | if (atomic_long_try_cmpxchg_acquire(v: &lock->owner, old: &owner, new: task | flags)) { |
| 111 | if (task == curr) |
| 112 | return NULL; |
| 113 | break; |
| 114 | } |
| 115 | } |
| 116 | |
| 117 | return __owner_task(owner); |
| 118 | } |
| 119 | |
| 120 | /* |
| 121 | * Trylock or set HANDOFF |
| 122 | */ |
| 123 | static inline bool __mutex_trylock_or_handoff(struct mutex *lock, bool handoff) |
| 124 | { |
| 125 | return !__mutex_trylock_common(lock, handoff); |
| 126 | } |
| 127 | |
| 128 | /* |
| 129 | * Actual trylock that will work on any unlocked state. |
| 130 | */ |
| 131 | static inline bool __mutex_trylock(struct mutex *lock) |
| 132 | { |
| 133 | return !__mutex_trylock_common(lock, handoff: false); |
| 134 | } |
| 135 | |
| 136 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
| 137 | /* |
| 138 | * Lockdep annotations are contained to the slow paths for simplicity. |
| 139 | * There is nothing that would stop spreading the lockdep annotations outwards |
| 140 | * except more code. |
| 141 | */ |
| 142 | void mutex_init_generic(struct mutex *lock) |
| 143 | { |
| 144 | __mutex_init_generic(lock); |
| 145 | } |
| 146 | EXPORT_SYMBOL(mutex_init_generic); |
| 147 | |
| 148 | /* |
| 149 | * Optimistic trylock that only works in the uncontended case. Make sure to |
| 150 | * follow with a __mutex_trylock() before failing. |
| 151 | */ |
| 152 | static __always_inline bool __mutex_trylock_fast(struct mutex *lock) |
| 153 | { |
| 154 | unsigned long curr = (unsigned long)current; |
| 155 | unsigned long zero = 0UL; |
| 156 | |
| 157 | MUTEX_WARN_ON(lock->magic != lock); |
| 158 | |
| 159 | if (atomic_long_try_cmpxchg_acquire(&lock->owner, &zero, curr)) |
| 160 | return true; |
| 161 | |
| 162 | return false; |
| 163 | } |
| 164 | |
| 165 | static __always_inline bool __mutex_unlock_fast(struct mutex *lock) |
| 166 | { |
| 167 | unsigned long curr = (unsigned long)current; |
| 168 | |
| 169 | return atomic_long_try_cmpxchg_release(&lock->owner, &curr, 0UL); |
| 170 | } |
| 171 | |
| 172 | #else /* !CONFIG_DEBUG_LOCK_ALLOC */ |
| 173 | |
| 174 | void mutex_init_lockep(struct mutex *lock, const char *name, struct lock_class_key *key) |
| 175 | { |
| 176 | __mutex_init_generic(lock); |
| 177 | |
| 178 | /* |
| 179 | * Make sure we are not reinitializing a held lock: |
| 180 | */ |
| 181 | debug_check_no_locks_freed(from: (void *)lock, len: sizeof(*lock)); |
| 182 | lockdep_init_map_wait(lock: &lock->dep_map, name, key, subclass: 0, inner: LD_WAIT_SLEEP); |
| 183 | } |
| 184 | EXPORT_SYMBOL(mutex_init_lockep); |
| 185 | #endif /* !CONFIG_DEBUG_LOCK_ALLOC */ |
| 186 | |
| 187 | static inline void __mutex_set_flag(struct mutex *lock, unsigned long flag) |
| 188 | { |
| 189 | atomic_long_or(i: flag, v: &lock->owner); |
| 190 | } |
| 191 | |
| 192 | static inline void __mutex_clear_flag(struct mutex *lock, unsigned long flag) |
| 193 | { |
| 194 | atomic_long_andnot(i: flag, v: &lock->owner); |
| 195 | } |
| 196 | |
| 197 | static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter) |
| 198 | { |
| 199 | return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter; |
| 200 | } |
| 201 | |
| 202 | /* |
| 203 | * Add @waiter to a given location in the lock wait_list and set the |
| 204 | * FLAG_WAITERS flag if it's the first waiter. |
| 205 | */ |
| 206 | static void |
| 207 | __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, |
| 208 | struct list_head *list) |
| 209 | { |
| 210 | hung_task_set_blocker(lock, BLOCKER_TYPE_MUTEX); |
| 211 | debug_mutex_add_waiter(lock, waiter, current); |
| 212 | |
| 213 | list_add_tail(new: &waiter->list, head: list); |
| 214 | if (__mutex_waiter_is_first(lock, waiter)) |
| 215 | __mutex_set_flag(lock, MUTEX_FLAG_WAITERS); |
| 216 | } |
| 217 | |
| 218 | static void |
| 219 | __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter) |
| 220 | { |
| 221 | list_del(entry: &waiter->list); |
| 222 | if (likely(list_empty(&lock->wait_list))) |
| 223 | __mutex_clear_flag(lock, MUTEX_FLAGS); |
| 224 | |
| 225 | debug_mutex_remove_waiter(lock, waiter, current); |
| 226 | hung_task_clear_blocker(); |
| 227 | } |
| 228 | |
| 229 | /* |
| 230 | * Give up ownership to a specific task, when @task = NULL, this is equivalent |
| 231 | * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves |
| 232 | * WAITERS. Provides RELEASE semantics like a regular unlock, the |
| 233 | * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff. |
| 234 | */ |
| 235 | static void __mutex_handoff(struct mutex *lock, struct task_struct *task) |
| 236 | { |
| 237 | unsigned long owner = atomic_long_read(v: &lock->owner); |
| 238 | |
| 239 | for (;;) { |
| 240 | unsigned long new; |
| 241 | |
| 242 | MUTEX_WARN_ON(__owner_task(owner) != current); |
| 243 | MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP); |
| 244 | |
| 245 | new = (owner & MUTEX_FLAG_WAITERS); |
| 246 | new |= (unsigned long)task; |
| 247 | if (task) |
| 248 | new |= MUTEX_FLAG_PICKUP; |
| 249 | |
| 250 | if (atomic_long_try_cmpxchg_release(v: &lock->owner, old: &owner, new)) |
| 251 | break; |
| 252 | } |
| 253 | } |
| 254 | |
| 255 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
| 256 | /* |
| 257 | * We split the mutex lock/unlock logic into separate fastpath and |
| 258 | * slowpath functions, to reduce the register pressure on the fastpath. |
| 259 | * We also put the fastpath first in the kernel image, to make sure the |
| 260 | * branch is predicted by the CPU as default-untaken. |
| 261 | */ |
| 262 | static void __sched __mutex_lock_slowpath(struct mutex *lock); |
| 263 | |
| 264 | /** |
| 265 | * mutex_lock - acquire the mutex |
| 266 | * @lock: the mutex to be acquired |
| 267 | * |
| 268 | * Lock the mutex exclusively for this task. If the mutex is not |
| 269 | * available right now, it will sleep until it can get it. |
| 270 | * |
| 271 | * The mutex must later on be released by the same task that |
| 272 | * acquired it. Recursive locking is not allowed. The task |
| 273 | * may not exit without first unlocking the mutex. Also, kernel |
| 274 | * memory where the mutex resides must not be freed with |
| 275 | * the mutex still locked. The mutex must first be initialized |
| 276 | * (or statically defined) before it can be locked. memset()-ing |
| 277 | * the mutex to 0 is not allowed. |
| 278 | * |
| 279 | * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging |
| 280 | * checks that will enforce the restrictions and will also do |
| 281 | * deadlock debugging) |
| 282 | * |
| 283 | * This function is similar to (but not equivalent to) down(). |
| 284 | */ |
| 285 | void __sched mutex_lock(struct mutex *lock) |
| 286 | { |
| 287 | might_sleep(); |
| 288 | |
| 289 | if (!__mutex_trylock_fast(lock)) |
| 290 | __mutex_lock_slowpath(lock); |
| 291 | } |
| 292 | EXPORT_SYMBOL(mutex_lock); |
| 293 | #endif |
| 294 | |
| 295 | #include "ww_mutex.h" |
| 296 | |
| 297 | #ifdef CONFIG_MUTEX_SPIN_ON_OWNER |
| 298 | |
| 299 | /* |
| 300 | * Trylock variant that returns the owning task on failure. |
| 301 | */ |
| 302 | static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock) |
| 303 | { |
| 304 | return __mutex_trylock_common(lock, handoff: false); |
| 305 | } |
| 306 | |
| 307 | static inline |
| 308 | bool ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, |
| 309 | struct mutex_waiter *waiter) |
| 310 | { |
| 311 | struct ww_mutex *ww; |
| 312 | |
| 313 | ww = container_of(lock, struct ww_mutex, base); |
| 314 | |
| 315 | /* |
| 316 | * If ww->ctx is set the contents are undefined, only |
| 317 | * by acquiring wait_lock there is a guarantee that |
| 318 | * they are not invalid when reading. |
| 319 | * |
| 320 | * As such, when deadlock detection needs to be |
| 321 | * performed the optimistic spinning cannot be done. |
| 322 | * |
| 323 | * Check this in every inner iteration because we may |
| 324 | * be racing against another thread's ww_mutex_lock. |
| 325 | */ |
| 326 | if (ww_ctx->acquired > 0 && READ_ONCE(ww->ctx)) |
| 327 | return false; |
| 328 | |
| 329 | /* |
| 330 | * If we aren't on the wait list yet, cancel the spin |
| 331 | * if there are waiters. We want to avoid stealing the |
| 332 | * lock from a waiter with an earlier stamp, since the |
| 333 | * other thread may already own a lock that we also |
| 334 | * need. |
| 335 | */ |
| 336 | if (!waiter && (atomic_long_read(v: &lock->owner) & MUTEX_FLAG_WAITERS)) |
| 337 | return false; |
| 338 | |
| 339 | /* |
| 340 | * Similarly, stop spinning if we are no longer the |
| 341 | * first waiter. |
| 342 | */ |
| 343 | if (waiter && !__mutex_waiter_is_first(lock, waiter)) |
| 344 | return false; |
| 345 | |
| 346 | return true; |
| 347 | } |
| 348 | |
| 349 | /* |
| 350 | * Look out! "owner" is an entirely speculative pointer access and not |
| 351 | * reliable. |
| 352 | * |
| 353 | * "noinline" so that this function shows up on perf profiles. |
| 354 | */ |
| 355 | static noinline |
| 356 | bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, |
| 357 | struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter) |
| 358 | { |
| 359 | bool ret = true; |
| 360 | |
| 361 | lockdep_assert_preemption_disabled(); |
| 362 | |
| 363 | while (__mutex_owner(lock) == owner) { |
| 364 | /* |
| 365 | * Ensure we emit the owner->on_cpu, dereference _after_ |
| 366 | * checking lock->owner still matches owner. And we already |
| 367 | * disabled preemption which is equal to the RCU read-side |
| 368 | * crital section in optimistic spinning code. Thus the |
| 369 | * task_strcut structure won't go away during the spinning |
| 370 | * period |
| 371 | */ |
| 372 | barrier(); |
| 373 | |
| 374 | /* |
| 375 | * Use vcpu_is_preempted to detect lock holder preemption issue. |
| 376 | */ |
| 377 | if (!owner_on_cpu(owner) || need_resched()) { |
| 378 | ret = false; |
| 379 | break; |
| 380 | } |
| 381 | |
| 382 | if (ww_ctx && !ww_mutex_spin_on_owner(lock, ww_ctx, waiter)) { |
| 383 | ret = false; |
| 384 | break; |
| 385 | } |
| 386 | |
| 387 | cpu_relax(); |
| 388 | } |
| 389 | |
| 390 | return ret; |
| 391 | } |
| 392 | |
| 393 | /* |
| 394 | * Initial check for entering the mutex spinning loop |
| 395 | */ |
| 396 | static inline int mutex_can_spin_on_owner(struct mutex *lock) |
| 397 | { |
| 398 | struct task_struct *owner; |
| 399 | int retval = 1; |
| 400 | |
| 401 | lockdep_assert_preemption_disabled(); |
| 402 | |
| 403 | if (need_resched()) |
| 404 | return 0; |
| 405 | |
| 406 | /* |
| 407 | * We already disabled preemption which is equal to the RCU read-side |
| 408 | * crital section in optimistic spinning code. Thus the task_strcut |
| 409 | * structure won't go away during the spinning period. |
| 410 | */ |
| 411 | owner = __mutex_owner(lock); |
| 412 | if (owner) |
| 413 | retval = owner_on_cpu(owner); |
| 414 | |
| 415 | /* |
| 416 | * If lock->owner is not set, the mutex has been released. Return true |
| 417 | * such that we'll trylock in the spin path, which is a faster option |
| 418 | * than the blocking slow path. |
| 419 | */ |
| 420 | return retval; |
| 421 | } |
| 422 | |
| 423 | /* |
| 424 | * Optimistic spinning. |
| 425 | * |
| 426 | * We try to spin for acquisition when we find that the lock owner |
| 427 | * is currently running on a (different) CPU and while we don't |
| 428 | * need to reschedule. The rationale is that if the lock owner is |
| 429 | * running, it is likely to release the lock soon. |
| 430 | * |
| 431 | * The mutex spinners are queued up using MCS lock so that only one |
| 432 | * spinner can compete for the mutex. However, if mutex spinning isn't |
| 433 | * going to happen, there is no point in going through the lock/unlock |
| 434 | * overhead. |
| 435 | * |
| 436 | * Returns true when the lock was taken, otherwise false, indicating |
| 437 | * that we need to jump to the slowpath and sleep. |
| 438 | * |
| 439 | * The waiter flag is set to true if the spinner is a waiter in the wait |
| 440 | * queue. The waiter-spinner will spin on the lock directly and concurrently |
| 441 | * with the spinner at the head of the OSQ, if present, until the owner is |
| 442 | * changed to itself. |
| 443 | */ |
| 444 | static __always_inline bool |
| 445 | mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, |
| 446 | struct mutex_waiter *waiter) |
| 447 | { |
| 448 | if (!waiter) { |
| 449 | /* |
| 450 | * The purpose of the mutex_can_spin_on_owner() function is |
| 451 | * to eliminate the overhead of osq_lock() and osq_unlock() |
| 452 | * in case spinning isn't possible. As a waiter-spinner |
| 453 | * is not going to take OSQ lock anyway, there is no need |
| 454 | * to call mutex_can_spin_on_owner(). |
| 455 | */ |
| 456 | if (!mutex_can_spin_on_owner(lock)) |
| 457 | goto fail; |
| 458 | |
| 459 | /* |
| 460 | * In order to avoid a stampede of mutex spinners trying to |
| 461 | * acquire the mutex all at once, the spinners need to take a |
| 462 | * MCS (queued) lock first before spinning on the owner field. |
| 463 | */ |
| 464 | if (!osq_lock(lock: &lock->osq)) |
| 465 | goto fail; |
| 466 | } |
| 467 | |
| 468 | for (;;) { |
| 469 | struct task_struct *owner; |
| 470 | |
| 471 | /* Try to acquire the mutex... */ |
| 472 | owner = __mutex_trylock_or_owner(lock); |
| 473 | if (!owner) |
| 474 | break; |
| 475 | |
| 476 | /* |
| 477 | * There's an owner, wait for it to either |
| 478 | * release the lock or go to sleep. |
| 479 | */ |
| 480 | if (!mutex_spin_on_owner(lock, owner, ww_ctx, waiter)) |
| 481 | goto fail_unlock; |
| 482 | |
| 483 | /* |
| 484 | * The cpu_relax() call is a compiler barrier which forces |
| 485 | * everything in this loop to be re-loaded. We don't need |
| 486 | * memory barriers as we'll eventually observe the right |
| 487 | * values at the cost of a few extra spins. |
| 488 | */ |
| 489 | cpu_relax(); |
| 490 | } |
| 491 | |
| 492 | if (!waiter) |
| 493 | osq_unlock(lock: &lock->osq); |
| 494 | |
| 495 | return true; |
| 496 | |
| 497 | |
| 498 | fail_unlock: |
| 499 | if (!waiter) |
| 500 | osq_unlock(lock: &lock->osq); |
| 501 | |
| 502 | fail: |
| 503 | /* |
| 504 | * If we fell out of the spin path because of need_resched(), |
| 505 | * reschedule now, before we try-lock the mutex. This avoids getting |
| 506 | * scheduled out right after we obtained the mutex. |
| 507 | */ |
| 508 | if (need_resched()) { |
| 509 | /* |
| 510 | * We _should_ have TASK_RUNNING here, but just in case |
| 511 | * we do not, make it so, otherwise we might get stuck. |
| 512 | */ |
| 513 | __set_current_state(TASK_RUNNING); |
| 514 | schedule_preempt_disabled(); |
| 515 | } |
| 516 | |
| 517 | return false; |
| 518 | } |
| 519 | #else |
| 520 | static __always_inline bool |
| 521 | mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, |
| 522 | struct mutex_waiter *waiter) |
| 523 | { |
| 524 | return false; |
| 525 | } |
| 526 | #endif |
| 527 | |
| 528 | static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip); |
| 529 | |
| 530 | /** |
| 531 | * mutex_unlock - release the mutex |
| 532 | * @lock: the mutex to be released |
| 533 | * |
| 534 | * Unlock a mutex that has been locked by this task previously. |
| 535 | * |
| 536 | * This function must not be used in interrupt context. Unlocking |
| 537 | * of a not locked mutex is not allowed. |
| 538 | * |
| 539 | * The caller must ensure that the mutex stays alive until this function has |
| 540 | * returned - mutex_unlock() can NOT directly be used to release an object such |
| 541 | * that another concurrent task can free it. |
| 542 | * Mutexes are different from spinlocks & refcounts in this aspect. |
| 543 | * |
| 544 | * This function is similar to (but not equivalent to) up(). |
| 545 | */ |
| 546 | void __sched mutex_unlock(struct mutex *lock) |
| 547 | { |
| 548 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
| 549 | if (__mutex_unlock_fast(lock)) |
| 550 | return; |
| 551 | #endif |
| 552 | __mutex_unlock_slowpath(lock, _RET_IP_); |
| 553 | } |
| 554 | EXPORT_SYMBOL(mutex_unlock); |
| 555 | |
| 556 | /** |
| 557 | * ww_mutex_unlock - release the w/w mutex |
| 558 | * @lock: the mutex to be released |
| 559 | * |
| 560 | * Unlock a mutex that has been locked by this task previously with any of the |
| 561 | * ww_mutex_lock* functions (with or without an acquire context). It is |
| 562 | * forbidden to release the locks after releasing the acquire context. |
| 563 | * |
| 564 | * This function must not be used in interrupt context. Unlocking |
| 565 | * of a unlocked mutex is not allowed. |
| 566 | */ |
| 567 | void __sched ww_mutex_unlock(struct ww_mutex *lock) |
| 568 | { |
| 569 | __ww_mutex_unlock(lock); |
| 570 | mutex_unlock(&lock->base); |
| 571 | } |
| 572 | EXPORT_SYMBOL(ww_mutex_unlock); |
| 573 | |
| 574 | /* |
| 575 | * Lock a mutex (possibly interruptible), slowpath: |
| 576 | */ |
| 577 | static __always_inline int __sched |
| 578 | __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass, |
| 579 | struct lockdep_map *nest_lock, unsigned long ip, |
| 580 | struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) |
| 581 | { |
| 582 | DEFINE_WAKE_Q(wake_q); |
| 583 | struct mutex_waiter waiter; |
| 584 | struct ww_mutex *ww; |
| 585 | unsigned long flags; |
| 586 | int ret; |
| 587 | |
| 588 | if (!use_ww_ctx) |
| 589 | ww_ctx = NULL; |
| 590 | |
| 591 | might_sleep(); |
| 592 | |
| 593 | MUTEX_WARN_ON(lock->magic != lock); |
| 594 | |
| 595 | ww = container_of(lock, struct ww_mutex, base); |
| 596 | if (ww_ctx) { |
| 597 | if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) |
| 598 | return -EALREADY; |
| 599 | |
| 600 | /* |
| 601 | * Reset the wounded flag after a kill. No other process can |
| 602 | * race and wound us here since they can't have a valid owner |
| 603 | * pointer if we don't have any locks held. |
| 604 | */ |
| 605 | if (ww_ctx->acquired == 0) |
| 606 | ww_ctx->wounded = 0; |
| 607 | |
| 608 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 609 | nest_lock = &ww_ctx->dep_map; |
| 610 | #endif |
| 611 | } |
| 612 | |
| 613 | preempt_disable(); |
| 614 | mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip); |
| 615 | |
| 616 | trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN); |
| 617 | if (__mutex_trylock(lock) || |
| 618 | mutex_optimistic_spin(lock, ww_ctx, NULL)) { |
| 619 | /* got the lock, yay! */ |
| 620 | lock_acquired(lock: &lock->dep_map, ip); |
| 621 | if (ww_ctx) |
| 622 | ww_mutex_set_context_fastpath(lock: ww, ctx: ww_ctx); |
| 623 | trace_contention_end(lock, ret: 0); |
| 624 | preempt_enable(); |
| 625 | return 0; |
| 626 | } |
| 627 | |
| 628 | raw_spin_lock_irqsave(&lock->wait_lock, flags); |
| 629 | /* |
| 630 | * After waiting to acquire the wait_lock, try again. |
| 631 | */ |
| 632 | if (__mutex_trylock(lock)) { |
| 633 | if (ww_ctx) |
| 634 | __ww_mutex_check_waiters(lock, ww_ctx, wake_q: &wake_q); |
| 635 | |
| 636 | goto skip_wait; |
| 637 | } |
| 638 | |
| 639 | debug_mutex_lock_common(lock, waiter: &waiter); |
| 640 | waiter.task = current; |
| 641 | if (use_ww_ctx) |
| 642 | waiter.ww_ctx = ww_ctx; |
| 643 | |
| 644 | lock_contended(lock: &lock->dep_map, ip); |
| 645 | |
| 646 | if (!use_ww_ctx) { |
| 647 | /* add waiting tasks to the end of the waitqueue (FIFO): */ |
| 648 | __mutex_add_waiter(lock, waiter: &waiter, list: &lock->wait_list); |
| 649 | } else { |
| 650 | /* |
| 651 | * Add in stamp order, waking up waiters that must kill |
| 652 | * themselves. |
| 653 | */ |
| 654 | ret = __ww_mutex_add_waiter(waiter: &waiter, lock, ww_ctx, wake_q: &wake_q); |
| 655 | if (ret) |
| 656 | goto err_early_kill; |
| 657 | } |
| 658 | |
| 659 | __set_task_blocked_on(current, m: lock); |
| 660 | set_current_state(state); |
| 661 | trace_contention_begin(lock, LCB_F_MUTEX); |
| 662 | for (;;) { |
| 663 | bool first; |
| 664 | |
| 665 | /* |
| 666 | * Once we hold wait_lock, we're serialized against |
| 667 | * mutex_unlock() handing the lock off to us, do a trylock |
| 668 | * before testing the error conditions to make sure we pick up |
| 669 | * the handoff. |
| 670 | */ |
| 671 | if (__mutex_trylock(lock)) |
| 672 | goto acquired; |
| 673 | |
| 674 | /* |
| 675 | * Check for signals and kill conditions while holding |
| 676 | * wait_lock. This ensures the lock cancellation is ordered |
| 677 | * against mutex_unlock() and wake-ups do not go missing. |
| 678 | */ |
| 679 | if (signal_pending_state(state, current)) { |
| 680 | ret = -EINTR; |
| 681 | goto err; |
| 682 | } |
| 683 | |
| 684 | if (ww_ctx) { |
| 685 | ret = __ww_mutex_check_kill(lock, waiter: &waiter, ctx: ww_ctx); |
| 686 | if (ret) |
| 687 | goto err; |
| 688 | } |
| 689 | |
| 690 | raw_spin_unlock_irqrestore_wake(lock: &lock->wait_lock, flags, wake_q: &wake_q); |
| 691 | |
| 692 | schedule_preempt_disabled(); |
| 693 | |
| 694 | first = __mutex_waiter_is_first(lock, waiter: &waiter); |
| 695 | |
| 696 | /* |
| 697 | * As we likely have been woken up by task |
| 698 | * that has cleared our blocked_on state, re-set |
| 699 | * it to the lock we are trying to acquire. |
| 700 | */ |
| 701 | set_task_blocked_on(current, m: lock); |
| 702 | set_current_state(state); |
| 703 | /* |
| 704 | * Here we order against unlock; we must either see it change |
| 705 | * state back to RUNNING and fall through the next schedule(), |
| 706 | * or we must see its unlock and acquire. |
| 707 | */ |
| 708 | if (__mutex_trylock_or_handoff(lock, handoff: first)) |
| 709 | break; |
| 710 | |
| 711 | if (first) { |
| 712 | trace_contention_begin(lock, LCB_F_MUTEX | LCB_F_SPIN); |
| 713 | /* |
| 714 | * mutex_optimistic_spin() can call schedule(), so |
| 715 | * clear blocked on so we don't become unselectable |
| 716 | * to run. |
| 717 | */ |
| 718 | clear_task_blocked_on(current, m: lock); |
| 719 | if (mutex_optimistic_spin(lock, ww_ctx, waiter: &waiter)) |
| 720 | break; |
| 721 | set_task_blocked_on(current, m: lock); |
| 722 | trace_contention_begin(lock, LCB_F_MUTEX); |
| 723 | } |
| 724 | |
| 725 | raw_spin_lock_irqsave(&lock->wait_lock, flags); |
| 726 | } |
| 727 | raw_spin_lock_irqsave(&lock->wait_lock, flags); |
| 728 | acquired: |
| 729 | __clear_task_blocked_on(current, m: lock); |
| 730 | __set_current_state(TASK_RUNNING); |
| 731 | |
| 732 | if (ww_ctx) { |
| 733 | /* |
| 734 | * Wound-Wait; we stole the lock (!first_waiter), check the |
| 735 | * waiters as anyone might want to wound us. |
| 736 | */ |
| 737 | if (!ww_ctx->is_wait_die && |
| 738 | !__mutex_waiter_is_first(lock, waiter: &waiter)) |
| 739 | __ww_mutex_check_waiters(lock, ww_ctx, wake_q: &wake_q); |
| 740 | } |
| 741 | |
| 742 | __mutex_remove_waiter(lock, waiter: &waiter); |
| 743 | |
| 744 | debug_mutex_free_waiter(waiter: &waiter); |
| 745 | |
| 746 | skip_wait: |
| 747 | /* got the lock - cleanup and rejoice! */ |
| 748 | lock_acquired(lock: &lock->dep_map, ip); |
| 749 | trace_contention_end(lock, ret: 0); |
| 750 | |
| 751 | if (ww_ctx) |
| 752 | ww_mutex_lock_acquired(ww, ww_ctx); |
| 753 | |
| 754 | raw_spin_unlock_irqrestore_wake(lock: &lock->wait_lock, flags, wake_q: &wake_q); |
| 755 | preempt_enable(); |
| 756 | return 0; |
| 757 | |
| 758 | err: |
| 759 | __clear_task_blocked_on(current, m: lock); |
| 760 | __set_current_state(TASK_RUNNING); |
| 761 | __mutex_remove_waiter(lock, waiter: &waiter); |
| 762 | err_early_kill: |
| 763 | WARN_ON(__get_task_blocked_on(current)); |
| 764 | trace_contention_end(lock, ret); |
| 765 | raw_spin_unlock_irqrestore_wake(lock: &lock->wait_lock, flags, wake_q: &wake_q); |
| 766 | debug_mutex_free_waiter(waiter: &waiter); |
| 767 | mutex_release(&lock->dep_map, ip); |
| 768 | preempt_enable(); |
| 769 | return ret; |
| 770 | } |
| 771 | |
| 772 | static int __sched |
| 773 | __mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass, |
| 774 | struct lockdep_map *nest_lock, unsigned long ip) |
| 775 | { |
| 776 | return __mutex_lock_common(lock, state, subclass, nest_lock, ip, NULL, use_ww_ctx: false); |
| 777 | } |
| 778 | |
| 779 | static int __sched |
| 780 | __ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass, |
| 781 | unsigned long ip, struct ww_acquire_ctx *ww_ctx) |
| 782 | { |
| 783 | return __mutex_lock_common(lock, state, subclass, NULL, ip, ww_ctx, use_ww_ctx: true); |
| 784 | } |
| 785 | |
| 786 | /** |
| 787 | * ww_mutex_trylock - tries to acquire the w/w mutex with optional acquire context |
| 788 | * @ww: mutex to lock |
| 789 | * @ww_ctx: optional w/w acquire context |
| 790 | * |
| 791 | * Trylocks a mutex with the optional acquire context; no deadlock detection is |
| 792 | * possible. Returns 1 if the mutex has been acquired successfully, 0 otherwise. |
| 793 | * |
| 794 | * Unlike ww_mutex_lock, no deadlock handling is performed. However, if a @ctx is |
| 795 | * specified, -EALREADY handling may happen in calls to ww_mutex_trylock. |
| 796 | * |
| 797 | * A mutex acquired with this function must be released with ww_mutex_unlock. |
| 798 | */ |
| 799 | int ww_mutex_trylock(struct ww_mutex *ww, struct ww_acquire_ctx *ww_ctx) |
| 800 | { |
| 801 | if (!ww_ctx) |
| 802 | return mutex_trylock(&ww->base); |
| 803 | |
| 804 | MUTEX_WARN_ON(ww->base.magic != &ww->base); |
| 805 | |
| 806 | /* |
| 807 | * Reset the wounded flag after a kill. No other process can |
| 808 | * race and wound us here, since they can't have a valid owner |
| 809 | * pointer if we don't have any locks held. |
| 810 | */ |
| 811 | if (ww_ctx->acquired == 0) |
| 812 | ww_ctx->wounded = 0; |
| 813 | |
| 814 | if (__mutex_trylock(lock: &ww->base)) { |
| 815 | ww_mutex_set_context_fastpath(lock: ww, ctx: ww_ctx); |
| 816 | mutex_acquire_nest(&ww->base.dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_); |
| 817 | return 1; |
| 818 | } |
| 819 | |
| 820 | return 0; |
| 821 | } |
| 822 | EXPORT_SYMBOL(ww_mutex_trylock); |
| 823 | |
| 824 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 825 | void __sched |
| 826 | mutex_lock_nested(struct mutex *lock, unsigned int subclass) |
| 827 | { |
| 828 | __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_); |
| 829 | } |
| 830 | |
| 831 | EXPORT_SYMBOL_GPL(mutex_lock_nested); |
| 832 | |
| 833 | void __sched |
| 834 | _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) |
| 835 | { |
| 836 | __mutex_lock(lock, TASK_UNINTERRUPTIBLE, subclass: 0, nest_lock: nest, _RET_IP_); |
| 837 | } |
| 838 | EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock); |
| 839 | |
| 840 | int __sched |
| 841 | _mutex_lock_killable(struct mutex *lock, unsigned int subclass, |
| 842 | struct lockdep_map *nest) |
| 843 | { |
| 844 | return __mutex_lock(lock, TASK_KILLABLE, subclass, nest_lock: nest, _RET_IP_); |
| 845 | } |
| 846 | EXPORT_SYMBOL_GPL(_mutex_lock_killable); |
| 847 | |
| 848 | int __sched |
| 849 | mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) |
| 850 | { |
| 851 | return __mutex_lock(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_); |
| 852 | } |
| 853 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); |
| 854 | |
| 855 | void __sched |
| 856 | mutex_lock_io_nested(struct mutex *lock, unsigned int subclass) |
| 857 | { |
| 858 | int token; |
| 859 | |
| 860 | might_sleep(); |
| 861 | |
| 862 | token = io_schedule_prepare(); |
| 863 | __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, |
| 864 | subclass, NULL, _RET_IP_, NULL, use_ww_ctx: 0); |
| 865 | io_schedule_finish(token); |
| 866 | } |
| 867 | EXPORT_SYMBOL_GPL(mutex_lock_io_nested); |
| 868 | |
| 869 | static inline int |
| 870 | ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
| 871 | { |
| 872 | #ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH |
| 873 | unsigned tmp; |
| 874 | |
| 875 | if (ctx->deadlock_inject_countdown-- == 0) { |
| 876 | tmp = ctx->deadlock_inject_interval; |
| 877 | if (tmp > UINT_MAX/4) |
| 878 | tmp = UINT_MAX; |
| 879 | else |
| 880 | tmp = tmp*2 + tmp + tmp/2; |
| 881 | |
| 882 | ctx->deadlock_inject_interval = tmp; |
| 883 | ctx->deadlock_inject_countdown = tmp; |
| 884 | ctx->contending_lock = lock; |
| 885 | |
| 886 | ww_mutex_unlock(lock); |
| 887 | |
| 888 | return -EDEADLK; |
| 889 | } |
| 890 | #endif |
| 891 | |
| 892 | return 0; |
| 893 | } |
| 894 | |
| 895 | int __sched |
| 896 | ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
| 897 | { |
| 898 | int ret; |
| 899 | |
| 900 | might_sleep(); |
| 901 | ret = __ww_mutex_lock(lock: &lock->base, TASK_UNINTERRUPTIBLE, |
| 902 | subclass: 0, _RET_IP_, ww_ctx: ctx); |
| 903 | if (!ret && ctx && ctx->acquired > 1) |
| 904 | return ww_mutex_deadlock_injection(lock, ctx); |
| 905 | |
| 906 | return ret; |
| 907 | } |
| 908 | EXPORT_SYMBOL_GPL(ww_mutex_lock); |
| 909 | |
| 910 | int __sched |
| 911 | ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
| 912 | { |
| 913 | int ret; |
| 914 | |
| 915 | might_sleep(); |
| 916 | ret = __ww_mutex_lock(lock: &lock->base, TASK_INTERRUPTIBLE, |
| 917 | subclass: 0, _RET_IP_, ww_ctx: ctx); |
| 918 | |
| 919 | if (!ret && ctx && ctx->acquired > 1) |
| 920 | return ww_mutex_deadlock_injection(lock, ctx); |
| 921 | |
| 922 | return ret; |
| 923 | } |
| 924 | EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible); |
| 925 | |
| 926 | #endif |
| 927 | |
| 928 | /* |
| 929 | * Release the lock, slowpath: |
| 930 | */ |
| 931 | static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip) |
| 932 | { |
| 933 | struct task_struct *next = NULL; |
| 934 | DEFINE_WAKE_Q(wake_q); |
| 935 | unsigned long owner; |
| 936 | unsigned long flags; |
| 937 | |
| 938 | mutex_release(&lock->dep_map, ip); |
| 939 | |
| 940 | /* |
| 941 | * Release the lock before (potentially) taking the spinlock such that |
| 942 | * other contenders can get on with things ASAP. |
| 943 | * |
| 944 | * Except when HANDOFF, in that case we must not clear the owner field, |
| 945 | * but instead set it to the top waiter. |
| 946 | */ |
| 947 | owner = atomic_long_read(v: &lock->owner); |
| 948 | for (;;) { |
| 949 | MUTEX_WARN_ON(__owner_task(owner) != current); |
| 950 | MUTEX_WARN_ON(owner & MUTEX_FLAG_PICKUP); |
| 951 | |
| 952 | if (owner & MUTEX_FLAG_HANDOFF) |
| 953 | break; |
| 954 | |
| 955 | if (atomic_long_try_cmpxchg_release(v: &lock->owner, old: &owner, new: __owner_flags(owner))) { |
| 956 | if (owner & MUTEX_FLAG_WAITERS) |
| 957 | break; |
| 958 | |
| 959 | return; |
| 960 | } |
| 961 | } |
| 962 | |
| 963 | raw_spin_lock_irqsave(&lock->wait_lock, flags); |
| 964 | debug_mutex_unlock(lock); |
| 965 | if (!list_empty(head: &lock->wait_list)) { |
| 966 | /* get the first entry from the wait-list: */ |
| 967 | struct mutex_waiter *waiter = |
| 968 | list_first_entry(&lock->wait_list, |
| 969 | struct mutex_waiter, list); |
| 970 | |
| 971 | next = waiter->task; |
| 972 | |
| 973 | debug_mutex_wake_waiter(lock, waiter); |
| 974 | __clear_task_blocked_on(p: next, m: lock); |
| 975 | wake_q_add(head: &wake_q, task: next); |
| 976 | } |
| 977 | |
| 978 | if (owner & MUTEX_FLAG_HANDOFF) |
| 979 | __mutex_handoff(lock, task: next); |
| 980 | |
| 981 | raw_spin_unlock_irqrestore_wake(lock: &lock->wait_lock, flags, wake_q: &wake_q); |
| 982 | } |
| 983 | |
| 984 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
| 985 | /* |
| 986 | * Here come the less common (and hence less performance-critical) APIs: |
| 987 | * mutex_lock_interruptible() and mutex_trylock(). |
| 988 | */ |
| 989 | static noinline int __sched |
| 990 | __mutex_lock_killable_slowpath(struct mutex *lock); |
| 991 | |
| 992 | static noinline int __sched |
| 993 | __mutex_lock_interruptible_slowpath(struct mutex *lock); |
| 994 | |
| 995 | /** |
| 996 | * mutex_lock_interruptible() - Acquire the mutex, interruptible by signals. |
| 997 | * @lock: The mutex to be acquired. |
| 998 | * |
| 999 | * Lock the mutex like mutex_lock(). If a signal is delivered while the |
| 1000 | * process is sleeping, this function will return without acquiring the |
| 1001 | * mutex. |
| 1002 | * |
| 1003 | * Context: Process context. |
| 1004 | * Return: 0 if the lock was successfully acquired or %-EINTR if a |
| 1005 | * signal arrived. |
| 1006 | */ |
| 1007 | int __sched mutex_lock_interruptible(struct mutex *lock) |
| 1008 | { |
| 1009 | might_sleep(); |
| 1010 | |
| 1011 | if (__mutex_trylock_fast(lock)) |
| 1012 | return 0; |
| 1013 | |
| 1014 | return __mutex_lock_interruptible_slowpath(lock); |
| 1015 | } |
| 1016 | |
| 1017 | EXPORT_SYMBOL(mutex_lock_interruptible); |
| 1018 | |
| 1019 | /** |
| 1020 | * mutex_lock_killable() - Acquire the mutex, interruptible by fatal signals. |
| 1021 | * @lock: The mutex to be acquired. |
| 1022 | * |
| 1023 | * Lock the mutex like mutex_lock(). If a signal which will be fatal to |
| 1024 | * the current process is delivered while the process is sleeping, this |
| 1025 | * function will return without acquiring the mutex. |
| 1026 | * |
| 1027 | * Context: Process context. |
| 1028 | * Return: 0 if the lock was successfully acquired or %-EINTR if a |
| 1029 | * fatal signal arrived. |
| 1030 | */ |
| 1031 | int __sched mutex_lock_killable(struct mutex *lock) |
| 1032 | { |
| 1033 | might_sleep(); |
| 1034 | |
| 1035 | if (__mutex_trylock_fast(lock)) |
| 1036 | return 0; |
| 1037 | |
| 1038 | return __mutex_lock_killable_slowpath(lock); |
| 1039 | } |
| 1040 | EXPORT_SYMBOL(mutex_lock_killable); |
| 1041 | |
| 1042 | /** |
| 1043 | * mutex_lock_io() - Acquire the mutex and mark the process as waiting for I/O |
| 1044 | * @lock: The mutex to be acquired. |
| 1045 | * |
| 1046 | * Lock the mutex like mutex_lock(). While the task is waiting for this |
| 1047 | * mutex, it will be accounted as being in the IO wait state by the |
| 1048 | * scheduler. |
| 1049 | * |
| 1050 | * Context: Process context. |
| 1051 | */ |
| 1052 | void __sched mutex_lock_io(struct mutex *lock) |
| 1053 | { |
| 1054 | int token; |
| 1055 | |
| 1056 | token = io_schedule_prepare(); |
| 1057 | mutex_lock(lock); |
| 1058 | io_schedule_finish(token); |
| 1059 | } |
| 1060 | EXPORT_SYMBOL_GPL(mutex_lock_io); |
| 1061 | |
| 1062 | static noinline void __sched |
| 1063 | __mutex_lock_slowpath(struct mutex *lock) |
| 1064 | { |
| 1065 | __mutex_lock(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_); |
| 1066 | } |
| 1067 | |
| 1068 | static noinline int __sched |
| 1069 | __mutex_lock_killable_slowpath(struct mutex *lock) |
| 1070 | { |
| 1071 | return __mutex_lock(lock, TASK_KILLABLE, 0, NULL, _RET_IP_); |
| 1072 | } |
| 1073 | |
| 1074 | static noinline int __sched |
| 1075 | __mutex_lock_interruptible_slowpath(struct mutex *lock) |
| 1076 | { |
| 1077 | return __mutex_lock(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_); |
| 1078 | } |
| 1079 | |
| 1080 | static noinline int __sched |
| 1081 | __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
| 1082 | { |
| 1083 | return __ww_mutex_lock(&lock->base, TASK_UNINTERRUPTIBLE, 0, |
| 1084 | _RET_IP_, ctx); |
| 1085 | } |
| 1086 | |
| 1087 | static noinline int __sched |
| 1088 | __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, |
| 1089 | struct ww_acquire_ctx *ctx) |
| 1090 | { |
| 1091 | return __ww_mutex_lock(&lock->base, TASK_INTERRUPTIBLE, 0, |
| 1092 | _RET_IP_, ctx); |
| 1093 | } |
| 1094 | |
| 1095 | #endif |
| 1096 | |
| 1097 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
| 1098 | /** |
| 1099 | * mutex_trylock - try to acquire the mutex, without waiting |
| 1100 | * @lock: the mutex to be acquired |
| 1101 | * |
| 1102 | * Try to acquire the mutex atomically. Returns 1 if the mutex |
| 1103 | * has been acquired successfully, and 0 on contention. |
| 1104 | * |
| 1105 | * NOTE: this function follows the spin_trylock() convention, so |
| 1106 | * it is negated from the down_trylock() return values! Be careful |
| 1107 | * about this when converting semaphore users to mutexes. |
| 1108 | * |
| 1109 | * This function must not be used in interrupt context. The |
| 1110 | * mutex must be released by the same task that acquired it. |
| 1111 | */ |
| 1112 | int __sched mutex_trylock(struct mutex *lock) |
| 1113 | { |
| 1114 | MUTEX_WARN_ON(lock->magic != lock); |
| 1115 | return __mutex_trylock(lock); |
| 1116 | } |
| 1117 | EXPORT_SYMBOL(mutex_trylock); |
| 1118 | #else |
| 1119 | int __sched _mutex_trylock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock) |
| 1120 | { |
| 1121 | bool locked; |
| 1122 | |
| 1123 | MUTEX_WARN_ON(lock->magic != lock); |
| 1124 | locked = __mutex_trylock(lock); |
| 1125 | if (locked) |
| 1126 | mutex_acquire_nest(&lock->dep_map, 0, 1, nest_lock, _RET_IP_); |
| 1127 | |
| 1128 | return locked; |
| 1129 | } |
| 1130 | EXPORT_SYMBOL(_mutex_trylock_nest_lock); |
| 1131 | #endif |
| 1132 | |
| 1133 | #ifndef CONFIG_DEBUG_LOCK_ALLOC |
| 1134 | int __sched |
| 1135 | ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
| 1136 | { |
| 1137 | might_sleep(); |
| 1138 | |
| 1139 | if (__mutex_trylock_fast(&lock->base)) { |
| 1140 | if (ctx) |
| 1141 | ww_mutex_set_context_fastpath(lock, ctx); |
| 1142 | return 0; |
| 1143 | } |
| 1144 | |
| 1145 | return __ww_mutex_lock_slowpath(lock, ctx); |
| 1146 | } |
| 1147 | EXPORT_SYMBOL(ww_mutex_lock); |
| 1148 | |
| 1149 | int __sched |
| 1150 | ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) |
| 1151 | { |
| 1152 | might_sleep(); |
| 1153 | |
| 1154 | if (__mutex_trylock_fast(&lock->base)) { |
| 1155 | if (ctx) |
| 1156 | ww_mutex_set_context_fastpath(lock, ctx); |
| 1157 | return 0; |
| 1158 | } |
| 1159 | |
| 1160 | return __ww_mutex_lock_interruptible_slowpath(lock, ctx); |
| 1161 | } |
| 1162 | EXPORT_SYMBOL(ww_mutex_lock_interruptible); |
| 1163 | |
| 1164 | #endif /* !CONFIG_DEBUG_LOCK_ALLOC */ |
| 1165 | #endif /* !CONFIG_PREEMPT_RT */ |
| 1166 | |
| 1167 | EXPORT_TRACEPOINT_SYMBOL_GPL(contention_begin); |
| 1168 | EXPORT_TRACEPOINT_SYMBOL_GPL(contention_end); |
| 1169 | |
| 1170 | /** |
| 1171 | * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 |
| 1172 | * @cnt: the atomic which we are to dec |
| 1173 | * @lock: the mutex to return holding if we dec to 0 |
| 1174 | * |
| 1175 | * return true and hold lock if we dec to 0, return false otherwise |
| 1176 | */ |
| 1177 | int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) |
| 1178 | { |
| 1179 | /* dec if we can't possibly hit 0 */ |
| 1180 | if (atomic_add_unless(v: cnt, a: -1, u: 1)) |
| 1181 | return 0; |
| 1182 | /* we might hit 0, so take the lock */ |
| 1183 | mutex_lock(lock); |
| 1184 | if (!atomic_dec_and_test(v: cnt)) { |
| 1185 | /* when we actually did the dec, we didn't hit 0 */ |
| 1186 | mutex_unlock(lock); |
| 1187 | return 0; |
| 1188 | } |
| 1189 | /* we hit 0, and we hold the lock */ |
| 1190 | return 1; |
| 1191 | } |
| 1192 | EXPORT_SYMBOL(atomic_dec_and_mutex_lock); |
| 1193 | |