summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_validation.c
blob: 826cd09966ef904c53132d083399c04c0ac7184f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
// SPDX-License-Identifier: MIT
/*
 * Copyright © 2024 Intel Corporation
 */
#include "xe_bo.h"
#include <drm/drm_exec.h>
#include <drm/drm_gem.h>
#include <drm/drm_gpuvm.h>

#include "xe_assert.h"
#include "xe_validation.h"

#ifdef CONFIG_DRM_XE_DEBUG
/**
 * xe_validation_assert_exec() - Assert that the drm_exec pointer is suitable
 * for validation.
 * @xe: Pointer to the xe device.
 * @exec: The drm_exec pointer to check.
 * @obj: Pointer to the object subject to validation.
 *
 * NULL exec pointers are not allowed.
 * For XE_VALIDATION_UNIMPLEMENTED, no checking.
 * For XE_VLIDATION_OPT_OUT, check that the caller is a kunit test
 * For XE_VALIDATION_UNSUPPORTED, check that the object subject to
 * validation is a dma-buf, for which support for ww locking is
 * not in place in the dma-buf layer.
 */
void xe_validation_assert_exec(const struct xe_device *xe,
			       const struct drm_exec *exec,
			       const struct drm_gem_object *obj)
{
	xe_assert(xe, exec);
	if (IS_ERR(exec)) {
		switch (PTR_ERR(exec)) {
		case __XE_VAL_UNIMPLEMENTED:
			break;
		case __XE_VAL_UNSUPPORTED:
			xe_assert(xe, !!obj->dma_buf);
			break;
#if IS_ENABLED(CONFIG_KUNIT)
		case __XE_VAL_OPT_OUT:
			xe_assert(xe, current->kunit_test);
			break;
#endif
		default:
			xe_assert(xe, false);
		}
	}
}
#endif

static int xe_validation_lock(struct xe_validation_ctx *ctx)
{
	struct xe_validation_device *val = ctx->val;
	int ret = 0;

	if (ctx->val_flags.interruptible) {
		if (ctx->request_exclusive)
			ret = down_write_killable(&val->lock);
		else
			ret = down_read_interruptible(&val->lock);
	} else {
		if (ctx->request_exclusive)
			down_write(&val->lock);
		else
			down_read(&val->lock);
	}

	if (!ret) {
		ctx->lock_held = true;
		ctx->lock_held_exclusive = ctx->request_exclusive;
	}

	return ret;
}

static int xe_validation_trylock(struct xe_validation_ctx *ctx)
{
	struct xe_validation_device *val = ctx->val;
	bool locked;

	if (ctx->request_exclusive)
		locked = down_write_trylock(&val->lock);
	else
		locked = down_read_trylock(&val->lock);

	if (locked) {
		ctx->lock_held = true;
		ctx->lock_held_exclusive = ctx->request_exclusive;
	}

	return locked ? 0 : -EWOULDBLOCK;
}

static void xe_validation_unlock(struct xe_validation_ctx *ctx)
{
	if (!ctx->lock_held)
		return;

	if (ctx->lock_held_exclusive)
		up_write(&ctx->val->lock);
	else
		up_read(&ctx->val->lock);

	ctx->lock_held = false;
}

/**
 * xe_validation_ctx_init() - Initialize an xe_validation_ctx
 * @ctx: The xe_validation_ctx to initialize.
 * @val: The xe_validation_device representing the validation domain.
 * @exec: The struct drm_exec to use for the transaction. May be NULL.
 * @flags: The flags to use for initialization.
 *
 * Initialize and lock a an xe_validation transaction using the validation domain
 * represented by @val. Also initialize the drm_exec object forwarding parts of
 * @flags to the drm_exec initialization. The @flags.exclusive flag should
 * typically be set to false to avoid locking out other validators from the
 * domain until an OOM is hit. For testing- or final attempt purposes it can,
 * however, be set to true.
 *
 * Return: %0 on success, %-EINTR if interruptible initial locking failed with a
 * signal pending. If @flags.no_block is set to true, a failed trylock
 * returns %-EWOULDBLOCK.
 */
int xe_validation_ctx_init(struct xe_validation_ctx *ctx, struct xe_validation_device *val,
			   struct drm_exec *exec, const struct xe_val_flags flags)
{
	int ret;

	ctx->exec = exec;
	ctx->val = val;
	ctx->lock_held = false;
	ctx->lock_held_exclusive = false;
	ctx->request_exclusive = flags.exclusive;
	ctx->val_flags = flags;
	ctx->exec_flags = 0;
	ctx->nr = 0;

	if (flags.no_block)
		ret = xe_validation_trylock(ctx);
	else
		ret = xe_validation_lock(ctx);
	if (ret)
		return ret;

	if (exec) {
		if (flags.interruptible)
			ctx->exec_flags |= DRM_EXEC_INTERRUPTIBLE_WAIT;
		if (flags.exec_ignore_duplicates)
			ctx->exec_flags |= DRM_EXEC_IGNORE_DUPLICATES;
		drm_exec_init(exec, ctx->exec_flags, ctx->nr);
	}

	return 0;
}

#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
/*
 * This abuses both drm_exec and ww_mutex internals and should be
 * replaced by checking for -EDEADLK when we can make TTM
 * stop converting -EDEADLK to -ENOMEM.
 * An alternative is to not have exhaustive eviction with
 * CONFIG_DEBUG_WW_MUTEX_SLOWPATH until that happens.
 */
static bool xe_validation_contention_injected(struct drm_exec *exec)
{
	return !!exec->ticket.contending_lock;
}

#else

static bool xe_validation_contention_injected(struct drm_exec *exec)
{
	return false;
}

#endif

static bool __xe_validation_should_retry(struct xe_validation_ctx *ctx, int ret)
{
	if (ret == -ENOMEM &&
	    ((ctx->request_exclusive &&
	      xe_validation_contention_injected(ctx->exec)) ||
	     !ctx->request_exclusive)) {
		ctx->request_exclusive = true;
		return true;
	}

	return false;
}

/**
 * xe_validation_exec_lock() - Perform drm_gpuvm_exec_lock within a validation
 * transaction.
 * @ctx: An uninitialized xe_validation_ctx.
 * @vm_exec: An initialized struct vm_exec.
 * @val: The validation domain.
 *
 * The drm_gpuvm_exec_lock() function internally initializes its drm_exec
 * transaction and therefore doesn't lend itself very well to be using
 * xe_validation_ctx_init(). Provide a helper that takes an uninitialized
 * xe_validation_ctx and calls drm_gpuvm_exec_lock() with OOM retry.
 *
 * Return: %0 on success, negative error code on failure.
 */
int xe_validation_exec_lock(struct xe_validation_ctx *ctx,
			    struct drm_gpuvm_exec *vm_exec,
			    struct xe_validation_device *val)
{
	int ret;

	memset(ctx, 0, sizeof(*ctx));
	ctx->exec = &vm_exec->exec;
	ctx->exec_flags = vm_exec->flags;
	ctx->val = val;
	if (ctx->exec_flags & DRM_EXEC_INTERRUPTIBLE_WAIT)
		ctx->val_flags.interruptible = 1;
	if (ctx->exec_flags & DRM_EXEC_IGNORE_DUPLICATES)
		ctx->val_flags.exec_ignore_duplicates = 1;
retry:
	ret = xe_validation_lock(ctx);
	if (ret)
		return ret;

	ret = drm_gpuvm_exec_lock(vm_exec);
	if (ret) {
		xe_validation_unlock(ctx);
		if (__xe_validation_should_retry(ctx, ret))
			goto retry;
	}

	return ret;
}

/**
 * xe_validation_ctx_fini() - Finalize a validation transaction
 * @ctx: The Validation transaction to finalize.
 *
 * Finalize a validation transaction and its related drm_exec transaction.
 */
void xe_validation_ctx_fini(struct xe_validation_ctx *ctx)
{
	if (ctx->exec)
		drm_exec_fini(ctx->exec);
	xe_validation_unlock(ctx);
}

/**
 * xe_validation_should_retry() - Determine if a validation transaction should retry
 * @ctx: The validation transaction.
 * @ret: Pointer to a return value variable.
 *
 * Determines whether a validation transaction should retry based on the
 * internal transaction state and the return value pointed to by @ret.
 * If a validation should be retried, the transaction is prepared for that,
 * and the validation locked might be re-locked in exclusive mode, and *@ret
 * is set to %0. If the re-locking errors, typically due to interruptible
 * locking with signal pending, *@ret is instead set to -EINTR and the
 * function returns %false.
 *
 * Return: %true if validation should be retried, %false otherwise.
 */
bool xe_validation_should_retry(struct xe_validation_ctx *ctx, int *ret)
{
	if (__xe_validation_should_retry(ctx, *ret)) {
		drm_exec_fini(ctx->exec);
		*ret = 0;
		if (ctx->request_exclusive != ctx->lock_held_exclusive) {
			xe_validation_unlock(ctx);
			*ret = xe_validation_lock(ctx);
		}
		drm_exec_init(ctx->exec, ctx->exec_flags, ctx->nr);
		return !*ret;
	}

	return false;
}