1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
|
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Read-Copy Update mechanism for mutual exclusion, adapted for tracing.
*
* Copyright (C) 2020 Paul E. McKenney.
*/
#ifndef __LINUX_RCUPDATE_TRACE_H
#define __LINUX_RCUPDATE_TRACE_H
#include <linux/sched.h>
#include <linux/rcupdate.h>
#include <linux/cleanup.h>
#ifdef CONFIG_TASKS_TRACE_RCU
extern struct srcu_struct rcu_tasks_trace_srcu_struct;
#endif // #ifdef CONFIG_TASKS_TRACE_RCU
#if defined(CONFIG_DEBUG_LOCK_ALLOC) && defined(CONFIG_TASKS_TRACE_RCU)
static inline int rcu_read_lock_trace_held(void)
{
return srcu_read_lock_held(&rcu_tasks_trace_srcu_struct);
}
#else // #if defined(CONFIG_DEBUG_LOCK_ALLOC) && defined(CONFIG_TASKS_TRACE_RCU)
static inline int rcu_read_lock_trace_held(void)
{
return 1;
}
#endif // #else // #if defined(CONFIG_DEBUG_LOCK_ALLOC) && defined(CONFIG_TASKS_TRACE_RCU)
#ifdef CONFIG_TASKS_TRACE_RCU
/**
* rcu_read_lock_tasks_trace - mark beginning of RCU-trace read-side critical section
*
* When synchronize_rcu_tasks_trace() is invoked by one task, then that
* task is guaranteed to block until all other tasks exit their read-side
* critical sections. Similarly, if call_rcu_trace() is invoked on one
* task while other tasks are within RCU read-side critical sections,
* invocation of the corresponding RCU callback is deferred until after
* the all the other tasks exit their critical sections.
*
* For more details, please see the documentation for
* srcu_read_lock_fast(). For a description of how implicit RCU
* readers provide the needed ordering for architectures defining the
* ARCH_WANTS_NO_INSTR Kconfig option (and thus promising never to trace
* code where RCU is not watching), please see the __srcu_read_lock_fast()
* (non-kerneldoc) header comment. Otherwise, the smp_mb() below provided
* the needed ordering.
*/
static inline struct srcu_ctr __percpu *rcu_read_lock_tasks_trace(void)
{
struct srcu_ctr __percpu *ret = __srcu_read_lock_fast(&rcu_tasks_trace_srcu_struct);
rcu_try_lock_acquire(&rcu_tasks_trace_srcu_struct.dep_map);
if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_NO_MB))
smp_mb(); // Provide ordering on noinstr-incomplete architectures.
return ret;
}
/**
* rcu_read_unlock_tasks_trace - mark end of RCU-trace read-side critical section
* @scp: return value from corresponding rcu_read_lock_tasks_trace().
*
* Pairs with the preceding call to rcu_read_lock_tasks_trace() that
* returned the value passed in via scp.
*
* For more details, please see the documentation for rcu_read_unlock().
* For memory-ordering information, please see the header comment for the
* rcu_read_lock_tasks_trace() function.
*/
static inline void rcu_read_unlock_tasks_trace(struct srcu_ctr __percpu *scp)
{
if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_NO_MB))
smp_mb(); // Provide ordering on noinstr-incomplete architectures.
__srcu_read_unlock_fast(&rcu_tasks_trace_srcu_struct, scp);
srcu_lock_release(&rcu_tasks_trace_srcu_struct.dep_map);
}
/**
* rcu_read_lock_trace - mark beginning of RCU-trace read-side critical section
*
* When synchronize_rcu_tasks_trace() is invoked by one task, then that
* task is guaranteed to block until all other tasks exit their read-side
* critical sections. Similarly, if call_rcu_trace() is invoked on one
* task while other tasks are within RCU read-side critical sections,
* invocation of the corresponding RCU callback is deferred until after
* the all the other tasks exit their critical sections.
*
* For more details, please see the documentation for rcu_read_lock().
*/
static inline void rcu_read_lock_trace(void)
{
struct task_struct *t = current;
rcu_try_lock_acquire(&rcu_tasks_trace_srcu_struct.dep_map);
if (t->trc_reader_nesting++) {
// In case we interrupted a Tasks Trace RCU reader.
return;
}
barrier(); // nesting before scp to protect against interrupt handler.
t->trc_reader_scp = __srcu_read_lock_fast(&rcu_tasks_trace_srcu_struct);
if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_NO_MB))
smp_mb(); // Placeholder for more selective ordering
}
/**
* rcu_read_unlock_trace - mark end of RCU-trace read-side critical section
*
* Pairs with a preceding call to rcu_read_lock_trace(), and nesting is
* allowed. Invoking a rcu_read_unlock_trace() when there is no matching
* rcu_read_lock_trace() is verboten, and will result in lockdep complaints.
*
* For more details, please see the documentation for rcu_read_unlock().
*/
static inline void rcu_read_unlock_trace(void)
{
struct srcu_ctr __percpu *scp;
struct task_struct *t = current;
scp = t->trc_reader_scp;
barrier(); // scp before nesting to protect against interrupt handler.
if (!--t->trc_reader_nesting) {
if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU_NO_MB))
smp_mb(); // Placeholder for more selective ordering
__srcu_read_unlock_fast(&rcu_tasks_trace_srcu_struct, scp);
}
srcu_lock_release(&rcu_tasks_trace_srcu_struct.dep_map);
}
/**
* call_rcu_tasks_trace() - Queue a callback trace task-based grace period
* @rhp: structure to be used for queueing the RCU updates.
* @func: actual callback function to be invoked after the grace period
*
* The callback function will be invoked some time after a trace rcu-tasks
* grace period elapses, in other words after all currently executing
* trace rcu-tasks read-side critical sections have completed. These
* read-side critical sections are delimited by calls to rcu_read_lock_trace()
* and rcu_read_unlock_trace().
*
* See the description of call_rcu() for more detailed information on
* memory ordering guarantees.
*/
static inline void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func)
{
call_srcu(&rcu_tasks_trace_srcu_struct, rhp, func);
}
/**
* synchronize_rcu_tasks_trace - wait for a trace rcu-tasks grace period
*
* Control will return to the caller some time after a trace rcu-tasks
* grace period has elapsed, in other words after all currently executing
* trace rcu-tasks read-side critical sections have elapsed. These read-side
* critical sections are delimited by calls to rcu_read_lock_trace()
* and rcu_read_unlock_trace().
*
* This is a very specialized primitive, intended only for a few uses in
* tracing and other situations requiring manipulation of function preambles
* and profiling hooks. The synchronize_rcu_tasks_trace() function is not
* (yet) intended for heavy use from multiple CPUs.
*
* See the description of synchronize_rcu() for more detailed information
* on memory ordering guarantees.
*/
static inline void synchronize_rcu_tasks_trace(void)
{
synchronize_srcu(&rcu_tasks_trace_srcu_struct);
}
/**
* rcu_barrier_tasks_trace - Wait for in-flight call_rcu_tasks_trace() callbacks.
*
* Note that rcu_barrier_tasks_trace() is not obligated to actually wait,
* for example, if there are no pending callbacks.
*/
static inline void rcu_barrier_tasks_trace(void)
{
srcu_barrier(&rcu_tasks_trace_srcu_struct);
}
/**
* rcu_tasks_trace_expedite_current - Expedite the current Tasks Trace RCU grace period
*
* Cause the current Tasks Trace RCU grace period to become expedited.
* The grace period following the current one might also be expedited.
* If there is no current grace period, one might be created. If the
* current grace period is currently sleeping, that sleep will complete
* before expediting will take effect.
*/
static inline void rcu_tasks_trace_expedite_current(void)
{
srcu_expedite_current(&rcu_tasks_trace_srcu_struct);
}
// Placeholders to enable stepwise transition.
void __init rcu_tasks_trace_suppress_unused(void);
#else
/*
* The BPF JIT forms these addresses even when it doesn't call these
* functions, so provide definitions that result in runtime errors.
*/
static inline void call_rcu_tasks_trace(struct rcu_head *rhp, rcu_callback_t func) { BUG(); }
static inline void rcu_read_lock_trace(void) { BUG(); }
static inline void rcu_read_unlock_trace(void) { BUG(); }
#endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
DEFINE_LOCK_GUARD_0(rcu_tasks_trace,
rcu_read_lock_trace(),
rcu_read_unlock_trace())
#endif /* __LINUX_RCUPDATE_TRACE_H */
|