| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef _LINUX_CONTEXT_TRACKING_H |
| 3 | #define _LINUX_CONTEXT_TRACKING_H |
| 4 | |
| 5 | #include <linux/sched.h> |
| 6 | #include <linux/vtime.h> |
| 7 | #include <linux/context_tracking_state.h> |
| 8 | #include <linux/instrumentation.h> |
| 9 | |
| 10 | #include <asm/ptrace.h> |
| 11 | |
| 12 | |
| 13 | #ifdef CONFIG_CONTEXT_TRACKING_USER |
| 14 | extern void ct_cpu_track_user(int cpu); |
| 15 | |
| 16 | /* Called with interrupts disabled. */ |
| 17 | extern void __ct_user_enter(enum ctx_state state); |
| 18 | extern void __ct_user_exit(enum ctx_state state); |
| 19 | |
| 20 | extern void ct_user_enter(enum ctx_state state); |
| 21 | extern void ct_user_exit(enum ctx_state state); |
| 22 | |
| 23 | extern void user_enter_callable(void); |
| 24 | extern void user_exit_callable(void); |
| 25 | |
| 26 | static inline void user_enter(void) |
| 27 | { |
| 28 | if (context_tracking_enabled()) |
| 29 | ct_user_enter(CT_STATE_USER); |
| 30 | |
| 31 | } |
| 32 | static inline void user_exit(void) |
| 33 | { |
| 34 | if (context_tracking_enabled()) |
| 35 | ct_user_exit(CT_STATE_USER); |
| 36 | } |
| 37 | |
| 38 | /* Called with interrupts disabled. */ |
| 39 | static __always_inline void user_enter_irqoff(void) |
| 40 | { |
| 41 | if (context_tracking_enabled()) |
| 42 | __ct_user_enter(CT_STATE_USER); |
| 43 | |
| 44 | } |
| 45 | static __always_inline void user_exit_irqoff(void) |
| 46 | { |
| 47 | if (context_tracking_enabled()) |
| 48 | __ct_user_exit(CT_STATE_USER); |
| 49 | } |
| 50 | |
| 51 | static inline enum ctx_state exception_enter(void) |
| 52 | { |
| 53 | enum ctx_state prev_ctx; |
| 54 | |
| 55 | if (IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) || |
| 56 | !context_tracking_enabled()) |
| 57 | return 0; |
| 58 | |
| 59 | prev_ctx = __ct_state(); |
| 60 | if (prev_ctx != CT_STATE_KERNEL) |
| 61 | ct_user_exit(prev_ctx); |
| 62 | |
| 63 | return prev_ctx; |
| 64 | } |
| 65 | |
| 66 | static inline void exception_exit(enum ctx_state prev_ctx) |
| 67 | { |
| 68 | if (!IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) && |
| 69 | context_tracking_enabled()) { |
| 70 | if (prev_ctx != CT_STATE_KERNEL) |
| 71 | ct_user_enter(prev_ctx); |
| 72 | } |
| 73 | } |
| 74 | |
| 75 | static __always_inline bool context_tracking_guest_enter(void) |
| 76 | { |
| 77 | if (context_tracking_enabled()) |
| 78 | __ct_user_enter(CT_STATE_GUEST); |
| 79 | |
| 80 | return context_tracking_enabled_this_cpu(); |
| 81 | } |
| 82 | |
| 83 | static __always_inline bool context_tracking_guest_exit(void) |
| 84 | { |
| 85 | if (context_tracking_enabled()) |
| 86 | __ct_user_exit(CT_STATE_GUEST); |
| 87 | |
| 88 | return context_tracking_enabled_this_cpu(); |
| 89 | } |
| 90 | |
| 91 | #define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond)) |
| 92 | |
| 93 | #else |
| 94 | static inline void user_enter(void) { } |
| 95 | static inline void user_exit(void) { } |
| 96 | static inline void user_enter_irqoff(void) { } |
| 97 | static inline void user_exit_irqoff(void) { } |
| 98 | static inline int exception_enter(void) { return 0; } |
| 99 | static inline void exception_exit(enum ctx_state prev_ctx) { } |
| 100 | static inline int ct_state(void) { return -1; } |
| 101 | static inline int __ct_state(void) { return -1; } |
| 102 | static __always_inline bool context_tracking_guest_enter(void) { return false; } |
| 103 | static __always_inline bool context_tracking_guest_exit(void) { return false; } |
| 104 | #define CT_WARN_ON(cond) do { } while (0) |
| 105 | #endif /* !CONFIG_CONTEXT_TRACKING_USER */ |
| 106 | |
| 107 | #ifdef CONFIG_CONTEXT_TRACKING_USER_FORCE |
| 108 | extern void context_tracking_init(void); |
| 109 | #else |
| 110 | static inline void context_tracking_init(void) { } |
| 111 | #endif /* CONFIG_CONTEXT_TRACKING_USER_FORCE */ |
| 112 | |
| 113 | #ifdef CONFIG_CONTEXT_TRACKING_IDLE |
| 114 | extern void ct_idle_enter(void); |
| 115 | extern void ct_idle_exit(void); |
| 116 | |
| 117 | /* |
| 118 | * Is RCU watching the current CPU (IOW, it is not in an extended quiescent state)? |
| 119 | * |
| 120 | * Note that this returns the actual boolean data (watching / not watching), |
| 121 | * whereas ct_rcu_watching() returns the RCU_WATCHING subvariable of |
| 122 | * context_tracking.state. |
| 123 | * |
| 124 | * No ordering, as we are sampling CPU-local information. |
| 125 | */ |
| 126 | static __always_inline bool rcu_is_watching_curr_cpu(void) |
| 127 | { |
| 128 | return raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_RCU_WATCHING; |
| 129 | } |
| 130 | |
| 131 | /* |
| 132 | * Increment the current CPU's context_tracking structure's ->state field |
| 133 | * with ordering. Return the new value. |
| 134 | */ |
| 135 | static __always_inline unsigned long ct_state_inc(int incby) |
| 136 | { |
| 137 | return raw_atomic_add_return(i: incby, this_cpu_ptr(&context_tracking.state)); |
| 138 | } |
| 139 | |
| 140 | static __always_inline bool warn_rcu_enter(void) |
| 141 | { |
| 142 | bool ret = false; |
| 143 | |
| 144 | /* |
| 145 | * Horrible hack to shut up recursive RCU isn't watching fail since |
| 146 | * lots of the actual reporting also relies on RCU. |
| 147 | */ |
| 148 | preempt_disable_notrace(); |
| 149 | if (!rcu_is_watching_curr_cpu()) { |
| 150 | ret = true; |
| 151 | ct_state_inc(CT_RCU_WATCHING); |
| 152 | } |
| 153 | |
| 154 | return ret; |
| 155 | } |
| 156 | |
| 157 | static __always_inline void warn_rcu_exit(bool rcu) |
| 158 | { |
| 159 | if (rcu) |
| 160 | ct_state_inc(CT_RCU_WATCHING); |
| 161 | preempt_enable_notrace(); |
| 162 | } |
| 163 | |
| 164 | #else |
| 165 | static inline void ct_idle_enter(void) { } |
| 166 | static inline void ct_idle_exit(void) { } |
| 167 | |
| 168 | static __always_inline bool warn_rcu_enter(void) { return false; } |
| 169 | static __always_inline void warn_rcu_exit(bool rcu) { } |
| 170 | #endif /* !CONFIG_CONTEXT_TRACKING_IDLE */ |
| 171 | |
| 172 | #endif |
| 173 | |