summaryrefslogtreecommitdiff
path: root/kernel/bpf/bpf_insn_array.c
blob: c96630cb75bf7a0cc1f2ee060ad8459667d41946 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2025 Isovalent */

#include <linux/bpf.h>

struct bpf_insn_array {
	struct bpf_map map;
	atomic_t used;
	long *ips;
	DECLARE_FLEX_ARRAY(struct bpf_insn_array_value, values);
};

#define cast_insn_array(MAP_PTR) \
	container_of((MAP_PTR), struct bpf_insn_array, map)

#define INSN_DELETED ((u32)-1)

static inline u64 insn_array_alloc_size(u32 max_entries)
{
	const u64 base_size = sizeof(struct bpf_insn_array);
	const u64 entry_size = sizeof(struct bpf_insn_array_value);

	return base_size + max_entries * (entry_size + sizeof(long));
}

static int insn_array_alloc_check(union bpf_attr *attr)
{
	u32 value_size = sizeof(struct bpf_insn_array_value);

	if (attr->max_entries == 0 || attr->key_size != 4 ||
	    attr->value_size != value_size || attr->map_flags != 0)
		return -EINVAL;

	return 0;
}

static void insn_array_free(struct bpf_map *map)
{
	struct bpf_insn_array *insn_array = cast_insn_array(map);

	bpf_map_area_free(insn_array);
}

static struct bpf_map *insn_array_alloc(union bpf_attr *attr)
{
	u64 size = insn_array_alloc_size(attr->max_entries);
	struct bpf_insn_array *insn_array;

	insn_array = bpf_map_area_alloc(size, NUMA_NO_NODE);
	if (!insn_array)
		return ERR_PTR(-ENOMEM);

	/* ips are allocated right after the insn_array->values[] array */
	insn_array->ips = (void *)&insn_array->values[attr->max_entries];

	bpf_map_init_from_attr(&insn_array->map, attr);

	/* BPF programs aren't allowed to write to the map */
	insn_array->map.map_flags |= BPF_F_RDONLY_PROG;

	return &insn_array->map;
}

static void *insn_array_lookup_elem(struct bpf_map *map, void *key)
{
	struct bpf_insn_array *insn_array = cast_insn_array(map);
	u32 index = *(u32 *)key;

	if (unlikely(index >= insn_array->map.max_entries))
		return NULL;

	return &insn_array->values[index];
}

static long insn_array_update_elem(struct bpf_map *map, void *key, void *value, u64 map_flags)
{
	struct bpf_insn_array *insn_array = cast_insn_array(map);
	u32 index = *(u32 *)key;
	struct bpf_insn_array_value val = {};

	if (unlikely(index >= insn_array->map.max_entries))
		return -E2BIG;

	if (unlikely(map_flags & BPF_NOEXIST))
		return -EEXIST;

	copy_map_value(map, &val, value);
	if (val.jitted_off || val.xlated_off)
		return -EINVAL;

	insn_array->values[index].orig_off = val.orig_off;

	return 0;
}

static long insn_array_delete_elem(struct bpf_map *map, void *key)
{
	return -EINVAL;
}

static int insn_array_check_btf(const struct bpf_map *map,
			      const struct btf *btf,
			      const struct btf_type *key_type,
			      const struct btf_type *value_type)
{
	if (!btf_type_is_i32(key_type))
		return -EINVAL;

	if (!btf_type_is_i64(value_type))
		return -EINVAL;

	return 0;
}

static u64 insn_array_mem_usage(const struct bpf_map *map)
{
	return insn_array_alloc_size(map->max_entries);
}

static int insn_array_map_direct_value_addr(const struct bpf_map *map, u64 *imm, u32 off)
{
	struct bpf_insn_array *insn_array = cast_insn_array(map);

	if ((off % sizeof(long)) != 0 ||
	    (off / sizeof(long)) >= map->max_entries)
		return -EINVAL;

	/* from BPF's point of view, this map is a jump table */
	*imm = (unsigned long)insn_array->ips + off;

	return 0;
}

BTF_ID_LIST_SINGLE(insn_array_btf_ids, struct, bpf_insn_array)

const struct bpf_map_ops insn_array_map_ops = {
	.map_alloc_check = insn_array_alloc_check,
	.map_alloc = insn_array_alloc,
	.map_free = insn_array_free,
	.map_get_next_key = bpf_array_get_next_key,
	.map_lookup_elem = insn_array_lookup_elem,
	.map_update_elem = insn_array_update_elem,
	.map_delete_elem = insn_array_delete_elem,
	.map_check_btf = insn_array_check_btf,
	.map_mem_usage = insn_array_mem_usage,
	.map_direct_value_addr = insn_array_map_direct_value_addr,
	.map_btf_id = &insn_array_btf_ids[0],
};

static inline bool is_frozen(struct bpf_map *map)
{
	guard(mutex)(&map->freeze_mutex);

	return map->frozen;
}

static bool is_insn_array(const struct bpf_map *map)
{
	return map->map_type == BPF_MAP_TYPE_INSN_ARRAY;
}

static inline bool valid_offsets(const struct bpf_insn_array *insn_array,
				 const struct bpf_prog *prog)
{
	u32 off;
	int i;

	for (i = 0; i < insn_array->map.max_entries; i++) {
		off = insn_array->values[i].orig_off;

		if (off >= prog->len)
			return false;

		if (off > 0) {
			if (prog->insnsi[off-1].code == (BPF_LD | BPF_DW | BPF_IMM))
				return false;
		}
	}

	return true;
}

int bpf_insn_array_init(struct bpf_map *map, const struct bpf_prog *prog)
{
	struct bpf_insn_array *insn_array = cast_insn_array(map);
	struct bpf_insn_array_value *values = insn_array->values;
	int i;

	if (!is_frozen(map))
		return -EINVAL;

	if (!valid_offsets(insn_array, prog))
		return -EINVAL;

	/*
	 * There can be only one program using the map
	 */
	if (atomic_xchg(&insn_array->used, 1))
		return -EBUSY;

	/*
	 * Reset all the map indexes to the original values.  This is needed,
	 * e.g., when a replay of verification with different log level should
	 * be performed.
	 */
	for (i = 0; i < map->max_entries; i++)
		values[i].xlated_off = values[i].orig_off;

	return 0;
}

int bpf_insn_array_ready(struct bpf_map *map)
{
	struct bpf_insn_array *insn_array = cast_insn_array(map);
	int i;

	for (i = 0; i < map->max_entries; i++) {
		if (insn_array->values[i].xlated_off == INSN_DELETED)
			continue;
		if (!insn_array->ips[i])
			return -EFAULT;
	}

	return 0;
}

void bpf_insn_array_release(struct bpf_map *map)
{
	struct bpf_insn_array *insn_array = cast_insn_array(map);

	atomic_set(&insn_array->used, 0);
}

void bpf_insn_array_adjust(struct bpf_map *map, u32 off, u32 len)
{
	struct bpf_insn_array *insn_array = cast_insn_array(map);
	int i;

	if (len <= 1)
		return;

	for (i = 0; i < map->max_entries; i++) {
		if (insn_array->values[i].xlated_off <= off)
			continue;
		if (insn_array->values[i].xlated_off == INSN_DELETED)
			continue;
		insn_array->values[i].xlated_off += len - 1;
	}
}

void bpf_insn_array_adjust_after_remove(struct bpf_map *map, u32 off, u32 len)
{
	struct bpf_insn_array *insn_array = cast_insn_array(map);
	int i;

	for (i = 0; i < map->max_entries; i++) {
		if (insn_array->values[i].xlated_off < off)
			continue;
		if (insn_array->values[i].xlated_off == INSN_DELETED)
			continue;
		if (insn_array->values[i].xlated_off < off + len)
			insn_array->values[i].xlated_off = INSN_DELETED;
		else
			insn_array->values[i].xlated_off -= len;
	}
}

/*
 * This function is called by JITs. The image is the real program
 * image, the offsets array set up the xlated -> jitted mapping.
 * The offsets[xlated] offset should point to the beginning of
 * the jitted instruction.
 */
void bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image)
{
	struct bpf_insn_array *insn_array;
	struct bpf_map *map;
	u32 xlated_off;
	int i, j;

	if (!offsets || !image)
		return;

	for (i = 0; i < prog->aux->used_map_cnt; i++) {
		map = prog->aux->used_maps[i];
		if (!is_insn_array(map))
			continue;

		insn_array = cast_insn_array(map);
		for (j = 0; j < map->max_entries; j++) {
			xlated_off = insn_array->values[j].xlated_off;
			if (xlated_off == INSN_DELETED)
				continue;
			if (xlated_off < prog->aux->subprog_start)
				continue;
			xlated_off -= prog->aux->subprog_start;
			if (xlated_off >= prog->len)
				continue;

			insn_array->values[j].jitted_off = offsets[xlated_off];
			insn_array->ips[j] = (long)(image + offsets[xlated_off]);
		}
	}
}