1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
|
// SPDX-License-Identifier: MIT
/* Copyright © 2024 Intel Corporation */
#include <drm/drm_cache.h>
#include <drm/drm_gem.h>
#include <drm/drm_panic.h>
#include "intel_fb.h"
#include "intel_display_types.h"
#include "xe_bo.h"
#include "intel_bo.h"
bool intel_bo_is_tiled(struct drm_gem_object *obj)
{
/* legacy tiling is unused */
return false;
}
bool intel_bo_is_userptr(struct drm_gem_object *obj)
{
/* xe does not have userptr bos */
return false;
}
bool intel_bo_is_shmem(struct drm_gem_object *obj)
{
return false;
}
bool intel_bo_is_protected(struct drm_gem_object *obj)
{
return xe_bo_is_protected(gem_to_xe_bo(obj));
}
void intel_bo_flush_if_display(struct drm_gem_object *obj)
{
}
int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
return drm_gem_prime_mmap(obj, vma);
}
int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size)
{
struct xe_bo *bo = gem_to_xe_bo(obj);
return xe_bo_read(bo, offset, dst, size);
}
struct intel_frontbuffer *intel_bo_get_frontbuffer(struct drm_gem_object *obj)
{
return NULL;
}
struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj,
struct intel_frontbuffer *front)
{
return front;
}
void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
{
/* FIXME */
}
struct xe_panic_data {
struct page **pages;
int page;
void *vaddr;
};
struct xe_framebuffer {
struct intel_framebuffer base;
struct xe_panic_data panic;
};
static inline struct xe_panic_data *to_xe_panic_data(struct intel_framebuffer *fb)
{
return &container_of_const(fb, struct xe_framebuffer, base)->panic;
}
static void xe_panic_kunmap(struct xe_panic_data *panic)
{
if (panic->vaddr) {
drm_clflush_virt_range(panic->vaddr, PAGE_SIZE);
kunmap_local(panic->vaddr);
panic->vaddr = NULL;
}
}
/*
* The scanout buffer pages are not mapped, so for each pixel,
* use kmap_local_page_try_from_panic() to map the page, and write the pixel.
* Try to keep the map from the previous pixel, to avoid too much map/unmap.
*/
static void xe_panic_page_set_pixel(struct drm_scanout_buffer *sb, unsigned int x,
unsigned int y, u32 color)
{
struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
struct xe_panic_data *panic = to_xe_panic_data(fb);
struct xe_bo *bo = gem_to_xe_bo(intel_fb_bo(&fb->base));
unsigned int new_page;
unsigned int offset;
if (fb->panic_tiling)
offset = fb->panic_tiling(sb->width, x, y);
else
offset = y * sb->pitch[0] + x * sb->format->cpp[0];
new_page = offset >> PAGE_SHIFT;
offset = offset % PAGE_SIZE;
if (new_page != panic->page) {
xe_panic_kunmap(panic);
panic->page = new_page;
panic->vaddr = ttm_bo_kmap_try_from_panic(&bo->ttm,
panic->page);
}
if (panic->vaddr) {
u32 *pix = panic->vaddr + offset;
*pix = color;
}
}
struct intel_framebuffer *intel_bo_alloc_framebuffer(void)
{
struct xe_framebuffer *xe_fb;
xe_fb = kzalloc(sizeof(*xe_fb), GFP_KERNEL);
if (xe_fb)
return &xe_fb->base;
return NULL;
}
int intel_bo_panic_setup(struct drm_scanout_buffer *sb)
{
struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
struct xe_panic_data *panic = to_xe_panic_data(fb);
panic->page = -1;
sb->set_pixel = xe_panic_page_set_pixel;
return 0;
}
void intel_bo_panic_finish(struct intel_framebuffer *fb)
{
struct xe_panic_data *panic = to_xe_panic_data(fb);
xe_panic_kunmap(panic);
panic->page = -1;
}
|