summaryrefslogtreecommitdiff
path: root/include/linux/stop_machine.h
blob: 72820503514cc2b9c57df2512344d282ff69e41f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_STOP_MACHINE
#define _LINUX_STOP_MACHINE

#include <linux/cpu.h>
#include <linux/cpumask_types.h>
#include <linux/smp.h>
#include <linux/list.h>

/*
 * stop_cpu[s]() is simplistic per-cpu maximum priority cpu
 * monopolization mechanism.  The caller can specify a non-sleeping
 * function to be executed on a single or multiple cpus preempting all
 * other processes and monopolizing those cpus until it finishes.
 *
 * Resources for this mechanism are preallocated when a cpu is brought
 * up and requests are guaranteed to be served as long as the target
 * cpus are online.
 */
typedef int (*cpu_stop_fn_t)(void *arg);

#ifdef CONFIG_SMP

struct cpu_stop_work {
	struct list_head	list;		/* cpu_stopper->works */
	cpu_stop_fn_t		fn;
	unsigned long		caller;
	void			*arg;
	struct cpu_stop_done	*done;
};

int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg);
int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg);
bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
			 struct cpu_stop_work *work_buf);
void stop_machine_park(int cpu);
void stop_machine_unpark(int cpu);
void stop_machine_yield(const struct cpumask *cpumask);

extern void print_stop_info(const char *log_lvl, struct task_struct *task);

#else	/* CONFIG_SMP */

#include <linux/workqueue.h>

struct cpu_stop_work {
	struct work_struct	work;
	cpu_stop_fn_t		fn;
	void			*arg;
};

static inline int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
{
	int ret = -ENOENT;
	preempt_disable();
	if (cpu == smp_processor_id())
		ret = fn(arg);
	preempt_enable();
	return ret;
}

static void stop_one_cpu_nowait_workfn(struct work_struct *work)
{
	struct cpu_stop_work *stwork =
		container_of(work, struct cpu_stop_work, work);
	preempt_disable();
	stwork->fn(stwork->arg);
	preempt_enable();
}

static inline bool stop_one_cpu_nowait(unsigned int cpu,
				       cpu_stop_fn_t fn, void *arg,
				       struct cpu_stop_work *work_buf)
{
	if (cpu == smp_processor_id()) {
		INIT_WORK(&work_buf->work, stop_one_cpu_nowait_workfn);
		work_buf->fn = fn;
		work_buf->arg = arg;
		schedule_work(&work_buf->work);
		return true;
	}

	return false;
}

static inline void print_stop_info(const char *log_lvl, struct task_struct *task) { }

#endif	/* CONFIG_SMP */

/*
 * stop_machine "Bogolock": stop the entire machine, disable interrupts.
 * This is a very heavy lock, which is equivalent to grabbing every raw
 * spinlock (and more).  So the "read" side to such a lock is anything
 * which disables preemption.
 */
#if defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)

/**
 * stop_machine: freeze the machine on all CPUs and run this function
 * @fn: the function to run
 * @data: the data ptr to pass to @fn()
 * @cpus: the cpus to run @fn() on (NULL = run on each online CPU)
 *
 * Description: This causes a thread to be scheduled on every CPU, which
 * will run with interrupts disabled.  Each CPU specified by @cpus will
 * run @fn.  While @fn is executing, there will no other CPUs holding
 * a raw spinlock or running within any other type of preempt-disabled
 * region of code.
 *
 * When @cpus specifies only a single CPU, this can be thought of as
 * a reader-writer lock where readers disable preemption (for example,
 * by holding a raw spinlock) and where the insanely heavy writers run
 * @fn while also preventing any other CPU from doing any useful work.
 * These writers can also be thought of as having implicitly grabbed every
 * raw spinlock in the kernel.
 *
 * When @fn is a no-op, this can be thought of as an RCU implementation
 * where readers again disable preemption and writers use stop_machine()
 * in place of synchronize_rcu(), albeit with orders of magnitude more
 * disruption than even that of synchronize_rcu_expedited().
 *
 * Although only one stop_machine() operation can proceed at a time,
 * the possibility of blocking in cpus_read_lock() means that the caller
 * cannot usefully rely on this serialization.
 *
 * Return: 0 if all invocations of @fn return zero.  Otherwise, the
 * value returned by an arbitrarily chosen member of the set of calls to
 * @fn that returned non-zero.
 */
int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);

/**
 * stop_machine_cpuslocked: freeze the machine on all CPUs and run this function
 * @fn: the function to run
 * @data: the data ptr to pass to @fn()
 * @cpus: the cpus to run @fn() on (NULL = run on each online CPU)
 *
 * Same as above.  Avoids nested calls to cpus_read_lock().
 *
 * Context: Must be called from within a cpus_read_lock() protected region.
 */
int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);

/**
 * stop_core_cpuslocked: - stop all threads on just one core
 * @cpu: any cpu in the targeted core
 * @fn: the function to run on each CPU in the core containing @cpu
 * @data: the data ptr to pass to @fn()
 *
 * Same as above, but instead of every CPU, only the logical CPUs of the
 * single core containing @cpu are affected.
 *
 * Context: Must be called from within a cpus_read_lock() protected region.
 *
 * Return: 0 if all invocations of @fn return zero.  Otherwise, the
 * value returned by an arbitrarily chosen member of the set of calls to
 * @fn that returned non-zero.
 */
int stop_core_cpuslocked(unsigned int cpu, cpu_stop_fn_t fn, void *data);

int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
				   const struct cpumask *cpus);
#else	/* CONFIG_SMP || CONFIG_HOTPLUG_CPU */

static __always_inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
					  const struct cpumask *cpus)
{
	unsigned long flags;
	int ret;
	local_irq_save(flags);
	ret = fn(data);
	local_irq_restore(flags);
	return ret;
}

static __always_inline int
stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
{
	return stop_machine_cpuslocked(fn, data, cpus);
}

static __always_inline int
stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
			       const struct cpumask *cpus)
{
	return stop_machine(fn, data, cpus);
}

#endif	/* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
#endif	/* _LINUX_STOP_MACHINE */