1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
|
// SPDX-License-Identifier: MIT
/*
* Copyright(c) 2025, Intel Corporation. All rights reserved.
*/
#include "regs/xe_irq_regs.h"
#include "regs/xe_mert_regs.h"
#include "xe_device.h"
#include "xe_mert.h"
#include "xe_mmio.h"
#include "xe_sriov_printk.h"
#include "xe_tile.h"
/**
* xe_mert_init_early() - Initialize MERT data
* @xe: the &xe_device with MERT to init
*/
void xe_mert_init_early(struct xe_device *xe)
{
struct xe_tile *tile = xe_device_get_root_tile(xe);
struct xe_mert *mert = &tile->mert;
spin_lock_init(&mert->lock);
init_completion(&mert->tlb_inv_done);
}
/**
* xe_mert_invalidate_lmtt() - Invalidate MERT LMTT
* @xe: the &xe_device with MERT
*
* Trigger invalidation of the MERT LMTT and wait for completion.
*
* Return: 0 on success or -ETIMEDOUT in case of a timeout.
*/
int xe_mert_invalidate_lmtt(struct xe_device *xe)
{
struct xe_tile *tile = xe_device_get_root_tile(xe);
struct xe_mert *mert = &tile->mert;
const long timeout = HZ / 4;
unsigned long flags;
xe_assert(xe, xe_device_has_mert(xe));
spin_lock_irqsave(&mert->lock, flags);
if (!mert->tlb_inv_triggered) {
mert->tlb_inv_triggered = true;
reinit_completion(&mert->tlb_inv_done);
xe_mmio_write32(&tile->mmio, MERT_TLB_INV_DESC_A, MERT_TLB_INV_DESC_A_VALID);
}
spin_unlock_irqrestore(&mert->lock, flags);
if (!wait_for_completion_timeout(&mert->tlb_inv_done, timeout))
return -ETIMEDOUT;
return 0;
}
static void mert_handle_cat_error(struct xe_device *xe)
{
struct xe_tile *tile = xe_device_get_root_tile(xe);
u32 reg_val, vfid, code;
reg_val = xe_mmio_read32(&tile->mmio, MERT_TLB_CT_INTR_ERR_ID_PORT);
if (!reg_val)
return;
xe_mmio_write32(&tile->mmio, MERT_TLB_CT_INTR_ERR_ID_PORT, 0);
vfid = FIELD_GET(CATERR_VFID, reg_val);
code = FIELD_GET(CATERR_CODES, reg_val);
switch (code) {
case CATERR_NO_ERROR:
break;
case CATERR_UNMAPPED_GGTT:
xe_sriov_err(xe, "MERT: CAT_ERR: Access to an unmapped GGTT!\n");
xe_device_declare_wedged(xe);
break;
case CATERR_LMTT_FAULT:
xe_sriov_dbg(xe, "MERT: CAT_ERR: VF%u LMTT fault!\n", vfid);
/* XXX: track/report malicious VF activity */
break;
default:
xe_sriov_err(xe, "MERT: Unexpected CAT_ERR code=%#x!\n", code);
xe_device_declare_wedged(xe);
break;
}
}
/**
* xe_mert_irq_handler - Handler for MERT interrupts
* @xe: the &xe_device
* @master_ctl: interrupt register
*
* Handle interrupts generated by MERT.
*/
void xe_mert_irq_handler(struct xe_device *xe, u32 master_ctl)
{
struct xe_tile *tile = xe_device_get_root_tile(xe);
struct xe_mert *mert = &tile->mert;
unsigned long flags;
u32 reg_val;
if (!(master_ctl & SOC_H2DMEMINT_IRQ))
return;
mert_handle_cat_error(xe);
spin_lock_irqsave(&mert->lock, flags);
if (mert->tlb_inv_triggered) {
reg_val = xe_mmio_read32(&tile->mmio, MERT_TLB_INV_DESC_A);
if (!(reg_val & MERT_TLB_INV_DESC_A_VALID)) {
mert->tlb_inv_triggered = false;
complete_all(&mert->tlb_inv_done);
}
}
spin_unlock_irqrestore(&mert->lock, flags);
}
|