summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2026-02-11 12:55:44 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2026-02-11 12:55:44 -0800
commit939faf71cf7ca9ab3d1bd2912ac0e203d4d7156a (patch)
treebf81a5b8c95c7095983719a3e0273a73dbe79c34 /include
parentb7ef56a07672e0d7ebe71c9d9b45f959f0c2f8e8 (diff)
parent2f5db9b4002470ea19380326c5a390647c56e780 (diff)
Merge tag 'drm-next-2026-02-11' of https://gitlab.freedesktop.org/drm/kernel
Pull drm updates from Dave Airlie: "Highlights: - amdgpu support for lots of new IP blocks which means newer GPUs - xe has a lot of SR-IOV and SVM improvements - lots of intel display refactoring across i915/xe - msm has more support for gen8 platforms - Given up on kgdb/kms integration, it's too hard on modern hw core: - drop kgdb support - replace system workqueue with percpu - account for property blobs in memcg - MAINTAINERS updates for xe + buddy rust: - Fix documentation for Registration constructors - Use pin_init::zeroed() for fops initialization - Annotate DRM helpers with __rust_helper - Improve safety documentation for gem::Object::new() - Update AlwaysRefCounted imports - mm: Prevent integer overflow in page_align() atomic: - add drm_device pointer to drm_private_obj - introduce gamma/degamma LUT size check buddy: - fix free_trees memory leak - prevent BUG_ON bridge: - introduce drm_bridge_unplug/enter/exit - add connector argument to .hpd_notify - lots of recounting conversions - convert rockchip inno hdmi to bridge - lontium-lt9611uxc: switch to HDMI audio helpers - dw-hdmi-qp: add support for HPD-less setups - Algoltek AG6311 support panels: - edp: CSW MNE007QB3-1, AUO B140HAN06.4, AUO B140QAX01.H - st75751: add SPI support - Sitronix ST7920, Samsung LTL106HL02 - LG LH546WF1-ED01, HannStar HSD156J - BOE NV130WUM-T08 - Innolux G150XGE-L05 - Anbernic RG-DS dma-buf: - improve sg_table debugging - add tracepoints - call clear_page instead of memset - start to introduce cgroup memory accounting in heaps - remove sysfs stats dma-fence: - add new helpers dp: - mst: avoid oob access with vcpi=0 hdmi: - limit infoframes exposure to userspace gem: - reduce page table overhead with THP - fix leak in drm_gem_get_unmapped_area gpuvm: - API sanitation for rust bindings sched: - introduce new helpers panic: - report invalid panic modes - add kunit tests i915/xe display: - Expose sharpness only if num_scalers is >= 2 - Add initial Xe3P_LPD for NVL - BMG FBC support - Add MTL+ platforms to support dpll framework _ fix DIMM_S DRM decoding on ICL - Return to using AUX interrupts - PSR/Panel replay refactoring - use consolidation HDMI tables - Xe3_LPD CD2X dividier changes xe: - vfio: add vfio_pci for intel GPU - multi queue support - dynamic pagemaps and multi-device SVM - expose temp attribs in hwmon - NO_COMPRESSION bo flag - expose MERT OA unit - sysfs survivability refactor - SRIOV PF: add MERT support - enable SR-IOV VF migration - Enable I2C/NVM on Crescent Island - Xe3p page reclaimation support - introduce SRIOV scheduler groups - add SoC remappt support in system controller - insert compiler barriers in GuC code - define NVL GuC firmware - handle GT resume failure - fix drm scheduler layering violations - enable GSC loading and PXP for PTL - disable GuC Power DCC strategy on PTL - unregister drm device on probe error i915: - move to kernel standard fault injection - bump recommended GuC version for DG2 and MTL amdgpu: - SMUIO 15.x, PSP 15.x support - IH 6.1.1/7.1 support - MMHUB 3.4/4.2 support - GC 11.5.4/12.1 support - SDMA 6.1.4/7.1/7.11.4 support - JPEG 5.3 support - UserQ updates - GC 9 gfx queue reset support - TTM memory ops parallelization - convert legacy logging to new helpers - DC analog fixes amdkfd: - GC 11.5.4/12.1 suppport - SDMA 6.1.4/7.1 support - per context support - increase kfd process hash table - Reserved SDMA rework radeon: - convert legacy logging to new helpers - use devm for i2c adapters msm: - GPU - Document a612/RGMU dt bindings - UBWC 6.0 support (for A840 / Kaanapali) - a225 support - DPU: - Switch to use virtual planes by default - Fix DSI CMD panels on DPU 3.x - Rewrite format handling to remove intermediate representation - Fix watchdog on DPU 8.x+ - Fix TE / Vsync source setting on DPU 8.x+ - Add 3D_Mux on SC7280 - Kaanapali platform support - Fix UBWC register programming - Make RM reserve DSPP-enabled mixers for CRTCs with LMs - Gamma correction support - DP: - Enable support for eDP 1.4+ link rate tables - Fix MDSS1 DP indices on SA8775P, making them to work - Fix msm_dp_ctrl_config_msa() to work with LLVM 20 - DSI: - Document QCS8300 as compatible with SA8775P - Kaanapali platform support - DSI PHY: - switch to divider_determine_rate() - MDP5: - Drop support for MSM8998, SDM660 and SDM630 (switch over to DPU) - MDSS: - Kaanapali platform support - Fixed UBWC register programming nova-core: - Prepare for Turing support. This includes parsing and handling Turing-specific firmware headers and sections as well as a Turing Falcon HAL implementation - Get rid of the Result<impl PinInit<T, E>> anti-pattern - Relocate initializer-specific code into the appropriate initializer - Use CStr::from_bytes_until_nul() to remove custom helpers - Improve handling of unexpected firmware values - Clean up redundant debug prints - Replace c_str!() with native Rust C-string literals - Update nova-core task list nova: - Align GEM object size to system page size tyr: - Use generated uAPI bindings for GpuInfo - Replace manual sleeps with read_poll_timeout() - Replace c_str!() with native Rust C-string literals - Suppress warnings for unread fields - Fix incorrect register name in print statement nouveau: - fix big page table support races in PTE management - improve reclocking on tegra 186+ amdxdna: - fix suspend race conditions - improve handling of zero tail pointers - fix cu_idx overwritten during command setup - enable hardware context priority - remove NPU2 support - update message buffer allocation requirements - update firmware version check ast: - support imported cursor buffers - big endian fixes etnaviv: - add PPU flop reset support imagination: - add AM62P support - introduce hw version checks ivpu: - implement warm boot flow panfrost: - add bo sync ioctl - add GPU_PM_RT support for RZ/G3E SoC panthor: - add bo sync ioctl - enable timestamp propagation - scheduler robustness improvements - VM termination fixes - huge page support rockchip: - RK3368 HDMI Support - get rid of atomic_check fixups - RK3506 support - RK3576/RK3588 improved HPD handling rz-du: - RZ/V2H(P) MIPI-DSI Support v3d: - fix DMA segment size - convert to new logging helpers mediatek: - move DP training to hotplug thread - convert logging to new helpers - add support for HS speed DSI - Genio 510/700/1200-EVK, Radxa NIO-12L HDMI support atmel-hlcdc: - switch to drmm resource - support nomodeset - use newer helpers hisilicon: - fix various DP bugs renesas: - fix kernel panic on reboot exynos: - fix vidi_connection_ioctl using wrong device - fix vidi_connection deref user ptr - fix concurrency regression with vidi_context vkms: - add configfs support for display configuration * tag 'drm-next-2026-02-11' of https://gitlab.freedesktop.org/drm/kernel: (1610 commits) drm/xe/pm: Disable D3Cold for BMG only on specific platforms drm/xe: Fix kerneldoc for xe_tlb_inval_job_alloc_dep drm/xe: Fix kerneldoc for xe_gt_tlb_inval_init_early drm/xe: Fix kerneldoc for xe_migrate_exec_queue drm/xe/query: Fix topology query pointer advance drm/xe/guc: Fix kernel-doc warning in GuC scheduler ABI header drm/xe/guc: Fix CFI violation in debugfs access. accel/amdxdna: Move RPM resume into job run function accel/amdxdna: Fix incorrect DPM level after suspend/resume nouveau/vmm: start tracking if the LPT PTE is valid. (v6) nouveau/vmm: increase size of vmm pte tracker struct to u32 (v2) nouveau/vmm: rewrite pte tracker using a struct and bitfields. accel/amdxdna: Fix incorrect error code returned for failed chain command accel/amdxdna: Remove hardware context status drm/bridge: imx8qxp-pixel-combiner: Fix bailout for imx8qxp_pc_bridge_probe() drm/panel: ilitek-ili9882t: Remove duplicate initializers in tianma_il79900a_dsc drm/i915/display: fix the pixel normalization handling for xe3p_lpd drm/exynos: vidi: use ctx->lock to protect struct vidi_context member variables related to memory alloc/free drm/exynos: vidi: fix to avoid directly dereferencing user pointer drm/exynos: vidi: use priv->vidi_dev for ctx lookup in vidi_connection_ioctl() ...
Diffstat (limited to 'include')
-rw-r--r--include/drm/bridge/inno_hdmi.h35
-rw-r--r--include/drm/bridge/samsung-dsim.h1
-rw-r--r--include/drm/display/drm_dp_helper.h3
-rw-r--r--include/drm/drm_atomic.h44
-rw-r--r--include/drm/drm_bridge.h156
-rw-r--r--include/drm/drm_connector.h105
-rw-r--r--include/drm/drm_device.h15
-rw-r--r--include/drm/drm_fb_helper.h21
-rw-r--r--include/drm/drm_file.h7
-rw-r--r--include/drm/drm_gem.h45
-rw-r--r--include/drm/drm_gem_shmem_helper.h14
-rw-r--r--include/drm/drm_gpusvm.h29
-rw-r--r--include/drm/drm_gpuvm.h12
-rw-r--r--include/drm/drm_mode_object.h3
-rw-r--r--include/drm/drm_modeset_helper_vtables.h23
-rw-r--r--include/drm/drm_of.h6
-rw-r--r--include/drm/drm_pagemap.h106
-rw-r--r--include/drm/drm_pagemap_util.h92
-rw-r--r--include/drm/drm_property.h1
-rw-r--r--include/drm/drm_vblank.h3
-rw-r--r--include/drm/gpu_scheduler.h52
-rw-r--r--include/drm/intel/display_parent_interface.h104
-rw-r--r--include/drm/intel/intel_lb_mei_interface.h3
-rw-r--r--include/linux/console.h8
-rw-r--r--include/linux/dma-buf.h12
-rw-r--r--include/linux/dma-fence.h35
-rw-r--r--include/linux/dma-heap.h2
-rw-r--r--include/linux/fb.h4
-rw-r--r--include/linux/host1x.h2
-rw-r--r--include/trace/events/dma_buf.h159
-rw-r--r--include/uapi/drm/amdgpu_drm.h25
-rw-r--r--include/uapi/drm/amdxdna_accel.h8
-rw-r--r--include/uapi/drm/panfrost_drm.h76
-rw-r--r--include/uapi/drm/panthor_drm.h157
-rw-r--r--include/uapi/drm/rocket_accel.h98
-rw-r--r--include/uapi/drm/xe_drm.h95
-rw-r--r--include/uapi/linux/kfd_ioctl.h16
-rw-r--r--include/uapi/linux/kfd_sysfs.h3
38 files changed, 1362 insertions, 218 deletions
diff --git a/include/drm/bridge/inno_hdmi.h b/include/drm/bridge/inno_hdmi.h
new file mode 100644
index 000000000000..5bbcaeea94e2
--- /dev/null
+++ b/include/drm/bridge/inno_hdmi.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2025 Rockchip Electronics Co., Ltd.
+ */
+
+#ifndef __INNO_HDMI__
+#define __INNO_HDMI__
+
+#include <linux/types.h>
+
+struct device;
+struct drm_encoder;
+struct drm_display_mode;
+struct inno_hdmi;
+
+struct inno_hdmi_plat_ops {
+ void (*enable)(struct device *pdev, struct drm_display_mode *mode);
+};
+
+struct inno_hdmi_phy_config {
+ unsigned long pixelclock;
+ u8 pre_emphasis;
+ u8 voltage_level_control;
+};
+
+struct inno_hdmi_plat_data {
+ const struct inno_hdmi_plat_ops *ops;
+ struct inno_hdmi_phy_config *phy_configs;
+ struct inno_hdmi_phy_config *default_phy_config;
+};
+
+struct inno_hdmi *inno_hdmi_bind(struct device *pdev,
+ struct drm_encoder *encoder,
+ const struct inno_hdmi_plat_data *plat_data);
+#endif /* __INNO_HDMI__ */
diff --git a/include/drm/bridge/samsung-dsim.h b/include/drm/bridge/samsung-dsim.h
index 31d7ed589233..03005e474704 100644
--- a/include/drm/bridge/samsung-dsim.h
+++ b/include/drm/bridge/samsung-dsim.h
@@ -100,7 +100,6 @@ struct samsung_dsim_plat_data {
struct samsung_dsim {
struct mipi_dsi_host dsi_host;
struct drm_bridge bridge;
- struct drm_bridge *out_bridge;
struct device *dev;
struct drm_display_mode mode;
diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h
index 14d2859f0bda..1d0acd58f486 100644
--- a/include/drm/display/drm_dp_helper.h
+++ b/include/drm/display/drm_dp_helper.h
@@ -206,6 +206,9 @@ drm_dp_is_branch(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
/* DP/eDP DSC support */
u8 drm_dp_dsc_sink_bpp_incr(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]);
+u32 drm_dp_dsc_slice_count_to_mask(int slice_count);
+u32 drm_dp_dsc_sink_slice_count_mask(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
+ bool is_edp);
u8 drm_dp_dsc_sink_max_slice_count(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE],
bool is_edp);
u8 drm_dp_dsc_sink_line_buf_depth(const u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE]);
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 43783891d359..178f8f62c80f 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -30,7 +30,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_util.h>
-#include <drm/drm_colorop.h>
/**
* struct drm_crtc_commit - track modeset commits on a CRTC
@@ -340,6 +339,11 @@ struct drm_private_state_funcs {
*/
struct drm_private_obj {
/**
+ * @dev: parent DRM device
+ */
+ struct drm_device *dev;
+
+ /**
* @head: List entry used to attach a private object to a &drm_device
* (queued to &drm_mode_config.privobj_list).
*/
@@ -707,6 +711,14 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
struct drm_colorop_state *
drm_atomic_get_colorop_state(struct drm_atomic_state *state,
struct drm_colorop *colorop);
+
+struct drm_colorop_state *
+drm_atomic_get_old_colorop_state(struct drm_atomic_state *state,
+ struct drm_colorop *colorop);
+struct drm_colorop_state *
+drm_atomic_get_new_colorop_state(struct drm_atomic_state *state,
+ struct drm_colorop *colorop);
+
struct drm_connector_state * __must_check
drm_atomic_get_connector_state(struct drm_atomic_state *state,
struct drm_connector *connector);
@@ -804,36 +816,6 @@ drm_atomic_get_new_plane_state(const struct drm_atomic_state *state,
}
/**
- * drm_atomic_get_old_colorop_state - get colorop state, if it exists
- * @state: global atomic state object
- * @colorop: colorop to grab
- *
- * This function returns the old colorop state for the given colorop, or
- * NULL if the colorop is not part of the global atomic state.
- */
-static inline struct drm_colorop_state *
-drm_atomic_get_old_colorop_state(struct drm_atomic_state *state,
- struct drm_colorop *colorop)
-{
- return state->colorops[drm_colorop_index(colorop)].old_state;
-}
-
-/**
- * drm_atomic_get_new_colorop_state - get colorop state, if it exists
- * @state: global atomic state object
- * @colorop: colorop to grab
- *
- * This function returns the new colorop state for the given colorop, or
- * NULL if the colorop is not part of the global atomic state.
- */
-static inline struct drm_colorop_state *
-drm_atomic_get_new_colorop_state(struct drm_atomic_state *state,
- struct drm_colorop *colorop)
-{
- return state->colorops[drm_colorop_index(colorop)].new_state;
-}
-
-/**
* drm_atomic_get_old_connector_state - get connector state, if it exists
* @state: global atomic state object
* @connector: connector to grab
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index dbafe136833f..4f19f7064ee3 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -614,6 +614,7 @@ struct drm_bridge_funcs {
* controllers for HDMI bridges.
*/
void (*hpd_notify)(struct drm_bridge *bridge,
+ struct drm_connector *connector,
enum drm_connector_status status);
/**
@@ -667,29 +668,113 @@ struct drm_bridge_funcs {
unsigned long long tmds_rate);
/**
- * @hdmi_clear_infoframe:
+ * @hdmi_clear_avi_infoframe:
*
* This callback clears the infoframes in the hardware during commit.
- * It will be called multiple times, once for every disabled infoframe
- * type.
*
* This callback is optional but it must be implemented by bridges that
* set the DRM_BRIDGE_OP_HDMI flag in their &drm_bridge->ops.
*/
- int (*hdmi_clear_infoframe)(struct drm_bridge *bridge,
- enum hdmi_infoframe_type type);
+ int (*hdmi_clear_avi_infoframe)(struct drm_bridge *bridge);
+
+ /**
+ * @hdmi_write_avi_infoframe:
+ *
+ * Program the infoframe into the hardware.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the DRM_BRIDGE_OP_HDMI flag in their &drm_bridge->ops.
+ */
+ int (*hdmi_write_avi_infoframe)(struct drm_bridge *bridge,
+ const u8 *buffer, size_t len);
+
/**
- * @hdmi_write_infoframe:
+ * @hdmi_clear_hdmi_infoframe:
*
- * Program the infoframe into the hardware. It will be called multiple
- * times, once for every updated infoframe type.
+ * This callback clears the infoframes in the hardware during commit.
*
* This callback is optional but it must be implemented by bridges that
* set the DRM_BRIDGE_OP_HDMI flag in their &drm_bridge->ops.
*/
- int (*hdmi_write_infoframe)(struct drm_bridge *bridge,
- enum hdmi_infoframe_type type,
- const u8 *buffer, size_t len);
+ int (*hdmi_clear_hdmi_infoframe)(struct drm_bridge *bridge);
+
+ /**
+ * @hdmi_write_hdmi_infoframe:
+ *
+ * Program the infoframe into the hardware.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the DRM_BRIDGE_OP_HDMI flag in their &drm_bridge->ops.
+ */
+ int (*hdmi_write_hdmi_infoframe)(struct drm_bridge *bridge,
+ const u8 *buffer, size_t len);
+
+ /**
+ * @hdmi_clear_hdr_drm_infoframe:
+ *
+ * This callback clears the infoframes in the hardware during commit.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the DRM_BRIDGE_OP_HDMI_HDR_DRM_INFOFRAME flag in their
+ * &drm_bridge->ops.
+ */
+ int (*hdmi_clear_hdr_drm_infoframe)(struct drm_bridge *bridge);
+
+ /**
+ * @hdmi_write_hdr_drm_infoframe:
+ *
+ * Program the infoframe into the hardware.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the DRM_BRIDGE_OP_HDMI_HDR_DRM_INFOFRAME flag in their
+ * &drm_bridge->ops.
+ */
+ int (*hdmi_write_hdr_drm_infoframe)(struct drm_bridge *bridge,
+ const u8 *buffer, size_t len);
+
+ /**
+ * @hdmi_clear_spd_infoframe:
+ *
+ * This callback clears the infoframes in the hardware during commit.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the DRM_BRIDGE_OP_HDMI_SPD_INFOFRAME flag in their
+ * &drm_bridge->ops.
+ */
+ int (*hdmi_clear_spd_infoframe)(struct drm_bridge *bridge);
+
+ /**
+ * @hdmi_write_spd_infoframe:
+ *
+ * Program the infoframe into the hardware.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the DRM_BRIDGE_OP_HDMI_SPD_INFOFRAME flag in their
+ * &drm_bridge->ops.
+ */
+ int (*hdmi_write_spd_infoframe)(struct drm_bridge *bridge,
+ const u8 *buffer, size_t len);
+
+ /**
+ * @hdmi_clear_audio_infoframe:
+ *
+ * This callback clears the infoframes in the hardware during commit.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the DRM_BRIDGE_OP_HDMI_AUDIO flag in their &drm_bridge->ops.
+ */
+ int (*hdmi_clear_audio_infoframe)(struct drm_bridge *bridge);
+
+ /**
+ * @hdmi_write_audio_infoframe:
+ *
+ * Program the infoframe into the hardware.
+ *
+ * This callback is optional but it must be implemented by bridges that
+ * set the DRM_BRIDGE_OP_HDMI_AUDIO flag in their &drm_bridge->ops.
+ */
+ int (*hdmi_write_audio_infoframe)(struct drm_bridge *bridge,
+ const u8 *buffer, size_t len);
/**
* @hdmi_audio_startup:
@@ -945,7 +1030,11 @@ enum drm_bridge_ops {
/**
* @DRM_BRIDGE_OP_HDMI: The bridge provides HDMI connector operations,
* including infoframes support. Bridges that set this flag must
- * implement the &drm_bridge_funcs->write_infoframe callback.
+ * provide HDMI-related information and implement the
+ * &drm_bridge_funcs->clear_avi_infoframe,
+ * &drm_bridge_funcs->write_avi_infoframe,
+ * &drm_bridge_funcs->clear_hdmi_infoframe and
+ * &drm_bridge_funcs->write_hdmi_infoframe callbacks.
*
* Note: currently there can be at most one bridge in a chain that sets
* this bit. This is to simplify corresponding glue code in connector
@@ -957,6 +1046,9 @@ enum drm_bridge_ops {
* Bridges that set this flag must implement the
* &drm_bridge_funcs->hdmi_audio_prepare and
* &drm_bridge_funcs->hdmi_audio_shutdown callbacks.
+ * If the bridge implements @DRM_BRIDGE_OP_HDMI, it also must implement
+ * &drm_bridge_funcs->hdmi_write_audio_infoframe and
+ * &drm_bridge_funcs->hdmi_cleaer_audio_infoframe callbacks.
*
* Note: currently there can be at most one bridge in a chain that sets
* this bit. This is to simplify corresponding glue code in connector
@@ -988,6 +1080,18 @@ enum drm_bridge_ops {
* to be present.
*/
DRM_BRIDGE_OP_HDMI_CEC_ADAPTER = BIT(8),
+ /**
+ * @DRM_BRIDGE_OP_HDMI_HDR_DRM_INFOFRAME: The bridge supports
+ * &drm_bridge_funcs->hdmi_write_hdr_drm_infoframe and
+ * &drm_bridge_funcs->hdmi_clear_hdr_drm_infoframe callbacks.
+ */
+ DRM_BRIDGE_OP_HDMI_HDR_DRM_INFOFRAME = BIT(9),
+ /**
+ * @DRM_BRIDGE_OP_HDMI_SPD_INFOFRAME: The bridge supports
+ * &drm_bridge_funcs->hdmi_write_spd_infoframe and
+ * &drm_bridge_funcs->hdmi_clear_spd_infoframe callbacks.
+ */
+ DRM_BRIDGE_OP_HDMI_SPD_INFOFRAME = BIT(10),
};
/**
@@ -1026,6 +1130,14 @@ struct drm_bridge {
*/
struct kref refcount;
+ /**
+ * @unplugged:
+ *
+ * Flag to tell if the bridge has been unplugged.
+ * See drm_bridge_enter() and drm_bridge_unplug().
+ */
+ bool unplugged;
+
/** @driver_private: pointer to the bridge driver's internal context */
void *driver_private;
/** @ops: bitmask of operations supported by the bridge */
@@ -1153,6 +1265,17 @@ struct drm_bridge {
* @hpd_cb.
*/
void *hpd_data;
+
+ /**
+ * @next_bridge: Pointer to the following bridge, automatically put
+ * when this bridge is freed (i.e. at destroy time). This is for
+ * drivers needing to store a pointer to the next bridge in the
+ * chain, and ensures any code still holding a reference to this
+ * bridge after its removal cannot use-after-free the next
+ * bridge. Any other bridge pointers stored by the driver must be
+ * put in the .destroy callback by driver code.
+ */
+ struct drm_bridge *next_bridge;
};
static inline struct drm_bridge *
@@ -1161,6 +1284,10 @@ drm_priv_to_bridge(struct drm_private_obj *priv)
return container_of(priv, struct drm_bridge, base);
}
+bool drm_bridge_enter(struct drm_bridge *bridge, int *idx);
+void drm_bridge_exit(int idx);
+void drm_bridge_unplug(struct drm_bridge *bridge);
+
struct drm_bridge *drm_bridge_get(struct drm_bridge *bridge);
void drm_bridge_put(struct drm_bridge *bridge);
@@ -1196,8 +1323,13 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags);
#ifdef CONFIG_OF
+struct drm_bridge *of_drm_find_and_get_bridge(struct device_node *np);
struct drm_bridge *of_drm_find_bridge(struct device_node *np);
#else
+static inline struct drm_bridge *of_drm_find_and_get_bridge(struct device_node *np)
+{
+ return NULL;
+}
static inline struct drm_bridge *of_drm_find_bridge(struct device_node *np)
{
return NULL;
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 8f34f4b8183d..7eaec37ae1c7 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -1222,44 +1222,24 @@ struct drm_connector_cec_funcs {
};
/**
- * struct drm_connector_hdmi_funcs - drm_hdmi_connector control functions
+ * struct drm_connector_infoframe_funcs - InfoFrame-related functions
*/
-struct drm_connector_hdmi_funcs {
- /**
- * @tmds_char_rate_valid:
- *
- * This callback is invoked at atomic_check time to figure out
- * whether a particular TMDS character rate is supported by the
- * driver.
- *
- * The @tmds_char_rate_valid callback is optional.
- *
- * Returns:
- *
- * Either &drm_mode_status.MODE_OK or one of the failure reasons
- * in &enum drm_mode_status.
- */
- enum drm_mode_status
- (*tmds_char_rate_valid)(const struct drm_connector *connector,
- const struct drm_display_mode *mode,
- unsigned long long tmds_rate);
-
+struct drm_connector_infoframe_funcs {
/**
* @clear_infoframe:
*
* This callback is invoked through
* @drm_atomic_helper_connector_hdmi_update_infoframes during a
* commit to clear the infoframes into the hardware. It will be
- * called multiple times, once for every disabled infoframe
- * type.
+ * called once for each frame type to be disabled.
*
- * The @clear_infoframe callback is optional.
+ * The @clear_infoframe callback is mandatory for AVI and HDMI-VS
+ * InfoFrame types.
*
* Returns:
* 0 on success, a negative error code otherwise
*/
- int (*clear_infoframe)(struct drm_connector *connector,
- enum hdmi_infoframe_type type);
+ int (*clear_infoframe)(struct drm_connector *connector);
/**
* @write_infoframe:
@@ -1267,18 +1247,42 @@ struct drm_connector_hdmi_funcs {
* This callback is invoked through
* @drm_atomic_helper_connector_hdmi_update_infoframes during a
* commit to program the infoframes into the hardware. It will
- * be called multiple times, once for every updated infoframe
- * type.
+ * be called for every updated infoframe type.
*
- * The @write_infoframe callback is mandatory.
+ * The @write_infoframe callback is mandatory for AVI and HDMI-VS
+ * InfoFrame types.
*
* Returns:
* 0 on success, a negative error code otherwise
*/
int (*write_infoframe)(struct drm_connector *connector,
- enum hdmi_infoframe_type type,
const u8 *buffer, size_t len);
+};
+
+/**
+ * struct drm_connector_hdmi_funcs - drm_hdmi_connector control functions
+ */
+struct drm_connector_hdmi_funcs {
+ /**
+ * @tmds_char_rate_valid:
+ *
+ * This callback is invoked at atomic_check time to figure out
+ * whether a particular TMDS character rate is supported by the
+ * driver.
+ *
+ * The @tmds_char_rate_valid callback is optional.
+ *
+ * Returns:
+ *
+ * Either &drm_mode_status.MODE_OK or one of the failure reasons
+ * in &enum drm_mode_status.
+ */
+ enum drm_mode_status
+ (*tmds_char_rate_valid)(const struct drm_connector *connector,
+ const struct drm_display_mode *mode,
+ unsigned long long tmds_rate);
+
/**
* @read_edid:
*
@@ -1293,6 +1297,47 @@ struct drm_connector_hdmi_funcs {
* Valid EDID on success, NULL in case of failure.
*/
const struct drm_edid *(*read_edid)(struct drm_connector *connector);
+
+ /**
+ * @avi:
+ *
+ * Set of callbacks for handling the AVI InfoFrame. These callbacks are
+ * mandatory.
+ */
+ struct drm_connector_infoframe_funcs avi;
+
+ /**
+ * @hdmi:
+ *
+ * Set of callbacks for handling the HDMI Vendor-Specific InfoFrame.
+ * These callbacks are mandatory.
+ */
+ struct drm_connector_infoframe_funcs hdmi;
+
+ /**
+ * @audio:
+ *
+ * Set of callbacks for handling the Audio InfoFrame. These callbacks
+ * are optional, but they are required for drivers which use
+ * drm_atomic_helper_connector_hdmi_update_audio_infoframe().
+ */
+ struct drm_connector_infoframe_funcs audio;
+
+ /**
+ * @hdr_drm:
+ *
+ * Set of callbacks for handling the HDR DRM InfoFrame. These callbacks
+ * are mandatory if HDR output is to be supported.
+ */
+ struct drm_connector_infoframe_funcs hdr_drm;
+
+ /**
+ * @spd:
+ *
+ * Set of callbacks for handling the SPD InfoFrame. These callbacks are
+ * optional.
+ */
+ struct drm_connector_infoframe_funcs spd;
};
/**
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index 5af49c5c3778..bc78fb77cc27 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -3,6 +3,9 @@
#include <linux/list.h>
#include <linux/kref.h>
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#include <linux/mount.h>
+#endif
#include <linux/mutex.h>
#include <linux/idr.h>
#include <linux/sched.h>
@@ -168,6 +171,18 @@ struct drm_device {
*/
struct drm_master *master;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ /**
+ * @huge_mnt:
+ *
+ * Huge tmpfs mountpoint used at GEM object initialization
+ * drm_gem_object_init(). Drivers can call drm_gem_huge_mnt_create() to
+ * create, mount and use it. The default tmpfs mountpoint (`shm_mnt`) is
+ * used if NULL.
+ */
+ struct vfsmount *huge_mnt;
+#endif
+
/**
* @driver_features: per-device driver features
*
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index dd9a18f8de5a..05cca77b7249 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -167,13 +167,6 @@ struct drm_fb_helper {
struct mutex lock;
/**
- * @kernel_fb_list:
- *
- * Entry on the global kernel_fb_helper_list, used for kgdb entry/exit.
- */
- struct list_head kernel_fb_list;
-
- /**
* @delayed_hotplug:
*
* A hotplug was received while fbdev wasn't in control of the DRM
@@ -236,8 +229,6 @@ drm_fb_helper_from_client(struct drm_client_dev *client)
.fb_setcmap = drm_fb_helper_setcmap, \
.fb_blank = drm_fb_helper_blank, \
.fb_pan_display = drm_fb_helper_pan_display, \
- .fb_debug_enter = drm_fb_helper_debug_enter, \
- .fb_debug_leave = drm_fb_helper_debug_leave, \
.fb_ioctl = drm_fb_helper_ioctl
#ifdef CONFIG_DRM_FBDEV_EMULATION
@@ -280,8 +271,6 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper);
-int drm_fb_helper_debug_enter(struct fb_info *info);
-int drm_fb_helper_debug_leave(struct fb_info *info);
#else
static inline void drm_fb_helper_prepare(struct drm_device *dev,
struct drm_fb_helper *helper,
@@ -387,16 +376,6 @@ static inline int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper)
{
return 0;
}
-
-static inline int drm_fb_helper_debug_enter(struct fb_info *info)
-{
- return 0;
-}
-
-static inline int drm_fb_helper_debug_leave(struct fb_info *info)
-{
- return 0;
-}
#endif
#endif
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 1a3018e4a537..6ee70ad65e1f 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -33,6 +33,7 @@
#include <linux/types.h>
#include <linux/completion.h>
#include <linux/idr.h>
+#include <linux/xarray.h>
#include <uapi/drm/drm.h>
@@ -316,10 +317,8 @@ struct drm_file {
/** @table_lock: Protects @object_idr. */
spinlock_t table_lock;
- /** @syncobj_idr: Mapping of sync object handles to object pointers. */
- struct idr syncobj_idr;
- /** @syncobj_table_lock: Protects @syncobj_idr. */
- spinlock_t syncobj_table_lock;
+ /** @syncobj_xa: Mapping of sync object handles to object pointers. */
+ struct xarray syncobj_xa;
/** @filp: Pointer to the core file structure. */
struct file *filp;
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 8d48d2af2649..86f5846154f7 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -40,6 +40,9 @@
#include <linux/list.h>
#include <linux/mutex.h>
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#include <drm/drm_device.h>
+#endif
#include <drm/drm_vma_manager.h>
struct iosys_map;
@@ -469,6 +472,7 @@ struct drm_gem_object {
.poll = drm_poll,\
.read = drm_read,\
.llseek = noop_llseek,\
+ .get_unmapped_area = drm_gem_get_unmapped_area,\
.mmap = drm_gem_mmap, \
.fop_flags = FOP_UNSIGNED_OFFSET
@@ -491,13 +495,40 @@ struct drm_gem_object {
DRM_GEM_FOPS,\
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+int drm_gem_huge_mnt_create(struct drm_device *dev, const char *value);
+#else
+static inline int drm_gem_huge_mnt_create(struct drm_device *dev,
+ const char *value)
+{
+ return 0;
+}
+#endif
+
+/**
+ * drm_gem_get_huge_mnt - Get the huge tmpfs mountpoint used by a DRM device
+ * @dev: DRM device
+ *
+ * This function gets the huge tmpfs mountpoint used by DRM device @dev. A huge
+ * tmpfs mountpoint is used instead of `shm_mnt` after a successful call to
+ * drm_gem_huge_mnt_create() when CONFIG_TRANSPARENT_HUGEPAGE is enabled.
+ *
+ * Returns:
+ * The huge tmpfs mountpoint in use, NULL otherwise.
+ */
+static inline struct vfsmount *drm_gem_get_huge_mnt(struct drm_device *dev)
+{
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ return dev->huge_mnt;
+#else
+ return NULL;
+#endif
+}
+
void drm_gem_object_release(struct drm_gem_object *obj);
void drm_gem_object_free(struct kref *kref);
int drm_gem_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size);
-int drm_gem_object_init_with_mnt(struct drm_device *dev,
- struct drm_gem_object *obj, size_t size,
- struct vfsmount *gemfs);
void drm_gem_private_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size);
void drm_gem_private_object_fini(struct drm_gem_object *obj);
@@ -507,6 +538,14 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
struct vm_area_struct *vma);
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+#ifdef CONFIG_MMU
+unsigned long drm_gem_get_unmapped_area(struct file *filp, unsigned long uaddr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
+#else
+#define drm_gem_get_unmapped_area NULL
+#endif
+
/**
* drm_gem_object_get - acquire a GEM buffer object reference
* @obj: GEM buffer object
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index 589f7bfe7506..5ccdae21b94a 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -109,9 +109,6 @@ struct drm_gem_shmem_object {
int drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shmem, size_t size);
struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
-struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *dev,
- size_t size,
- struct vfsmount *gemfs);
void drm_gem_shmem_release(struct drm_gem_shmem_object *shmem);
void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem);
@@ -303,4 +300,15 @@ struct drm_gem_object *drm_gem_shmem_prime_import_no_map(struct drm_device *dev,
.gem_prime_import = drm_gem_shmem_prime_import_no_map, \
.dumb_create = drm_gem_shmem_dumb_create
+/*
+ * Kunit helpers
+ */
+
+#if IS_ENABLED(CONFIG_KUNIT)
+int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, struct iosys_map *map);
+void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, struct iosys_map *map);
+int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv);
+int drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem);
+#endif
+
#endif /* __DRM_GEM_SHMEM_HELPER_H__ */
diff --git a/include/drm/drm_gpusvm.h b/include/drm/drm_gpusvm.h
index 632e100e6efb..2578ac92a8d4 100644
--- a/include/drm/drm_gpusvm.h
+++ b/include/drm/drm_gpusvm.h
@@ -328,6 +328,35 @@ void drm_gpusvm_free_pages(struct drm_gpusvm *gpusvm,
struct drm_gpusvm_pages *svm_pages,
unsigned long npages);
+/**
+ * enum drm_gpusvm_scan_result - Scan result from the drm_gpusvm_scan_mm() function.
+ * @DRM_GPUSVM_SCAN_UNPOPULATED: At least one page was not present or inaccessible.
+ * @DRM_GPUSVM_SCAN_EQUAL: All pages belong to the struct dev_pagemap indicated as
+ * the @pagemap argument to the drm_gpusvm_scan_mm() function.
+ * @DRM_GPUSVM_SCAN_OTHER: All pages belong to exactly one dev_pagemap, which is
+ * *NOT* the @pagemap argument to the drm_gpusvm_scan_mm(). All pages belong to
+ * the same device private owner.
+ * @DRM_GPUSVM_SCAN_SYSTEM: All pages are present and system pages.
+ * @DRM_GPUSVM_SCAN_MIXED_DEVICE: All pages are device pages and belong to at least
+ * two different struct dev_pagemaps. All pages belong to the same device private
+ * owner.
+ * @DRM_GPUSVM_SCAN_MIXED: Pages are present and are a mix of system pages
+ * and device-private pages. All device-private pages belong to the same device
+ * private owner.
+ */
+enum drm_gpusvm_scan_result {
+ DRM_GPUSVM_SCAN_UNPOPULATED,
+ DRM_GPUSVM_SCAN_EQUAL,
+ DRM_GPUSVM_SCAN_OTHER,
+ DRM_GPUSVM_SCAN_SYSTEM,
+ DRM_GPUSVM_SCAN_MIXED_DEVICE,
+ DRM_GPUSVM_SCAN_MIXED,
+};
+
+enum drm_gpusvm_scan_result drm_gpusvm_scan_mm(struct drm_gpusvm_range *range,
+ void *dev_private_owner,
+ const struct dev_pagemap *pagemap);
+
#ifdef CONFIG_LOCKDEP
/**
* drm_gpusvm_driver_set_lock() - Set the lock protecting accesses to GPU SVM
diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
index fdfc575b2603..655bd9104ffb 100644
--- a/include/drm/drm_gpuvm.h
+++ b/include/drm/drm_gpuvm.h
@@ -736,8 +736,8 @@ drm_gpuvm_bo_create(struct drm_gpuvm *gpuvm,
struct drm_gem_object *obj);
struct drm_gpuvm_bo *
-drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm,
- struct drm_gem_object *obj);
+drm_gpuvm_bo_obtain_locked(struct drm_gpuvm *gpuvm,
+ struct drm_gem_object *obj);
struct drm_gpuvm_bo *
drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *vm_bo);
@@ -1121,7 +1121,7 @@ void drm_gpuva_ops_free(struct drm_gpuvm *gpuvm,
struct drm_gpuva_ops *ops);
static inline void drm_gpuva_init_from_op(struct drm_gpuva *va,
- struct drm_gpuva_op_map *op)
+ const struct drm_gpuva_op_map *op)
{
va->va.addr = op->va.addr;
va->va.range = op->va.range;
@@ -1265,13 +1265,13 @@ int drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
void drm_gpuva_map(struct drm_gpuvm *gpuvm,
struct drm_gpuva *va,
- struct drm_gpuva_op_map *op);
+ const struct drm_gpuva_op_map *op);
void drm_gpuva_remap(struct drm_gpuva *prev,
struct drm_gpuva *next,
- struct drm_gpuva_op_remap *op);
+ const struct drm_gpuva_op_remap *op);
-void drm_gpuva_unmap(struct drm_gpuva_op_unmap *op);
+void drm_gpuva_unmap(const struct drm_gpuva_op_unmap *op);
/**
* drm_gpuva_op_remap_to_unmap_range() - Helper to get the start and range of
diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h
index c68edbd126d0..44a0d6f8d01f 100644
--- a/include/drm/drm_mode_object.h
+++ b/include/drm/drm_mode_object.h
@@ -133,6 +133,9 @@ int drm_object_property_get_value(struct drm_mode_object *obj,
int drm_object_property_get_default_value(struct drm_mode_object *obj,
struct drm_property *property,
uint64_t *val);
+int drm_object_immutable_property_get_value(struct drm_mode_object *obj,
+ struct drm_property *property,
+ uint64_t *val);
void drm_object_attach_property(struct drm_mode_object *obj,
struct drm_property *property,
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index fe32854b7ffe..3e68213958dd 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -52,11 +52,6 @@ struct drm_scanout_buffer;
struct drm_writeback_connector;
struct drm_writeback_job;
-enum mode_set_atomic {
- LEAVE_ATOMIC_MODE_SET,
- ENTER_ATOMIC_MODE_SET,
-};
-
/**
* struct drm_crtc_helper_funcs - helper operations for CRTCs
*
@@ -254,24 +249,6 @@ struct drm_crtc_helper_funcs {
struct drm_framebuffer *old_fb);
/**
- * @mode_set_base_atomic:
- *
- * This callback is used by the fbdev helpers to set a new framebuffer
- * and scanout without sleeping, i.e. from an atomic calling context. It
- * is only used to implement kgdb support.
- *
- * This callback is optional and only needed for kgdb support in the fbdev
- * helpers.
- *
- * RETURNS:
- *
- * 0 on success or a negative error code on failure.
- */
- int (*mode_set_base_atomic)(struct drm_crtc *crtc,
- struct drm_framebuffer *fb, int x, int y,
- enum mode_set_atomic);
-
- /**
* @disable:
*
* This callback should be used to disable the CRTC. With the atomic
diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h
index 7f0256dae3f1..f2f2bf82eff9 100644
--- a/include/drm/drm_of.h
+++ b/include/drm/drm_of.h
@@ -5,6 +5,7 @@
#include <linux/err.h>
#include <linux/of_graph.h>
#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_DRM_PANEL_BRIDGE)
+#include <linux/of.h>
#include <drm/drm_bridge.h>
#endif
@@ -170,9 +171,12 @@ static inline int drm_of_panel_bridge_remove(const struct device_node *np,
if (!remote)
return -ENODEV;
- bridge = of_drm_find_bridge(remote);
+ bridge = of_drm_find_and_get_bridge(remote);
drm_panel_bridge_remove(bridge);
+ drm_bridge_put(bridge);
+ of_node_put(remote);
+
return 0;
#else
return -EINVAL;
diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h
index eb29e5309f0a..2baf0861f78f 100644
--- a/include/drm/drm_pagemap.h
+++ b/include/drm/drm_pagemap.h
@@ -10,6 +10,8 @@
struct dma_fence;
struct drm_pagemap;
+struct drm_pagemap_cache;
+struct drm_pagemap_dev_hold;
struct drm_pagemap_zdd;
struct device;
@@ -124,17 +126,49 @@ struct drm_pagemap_ops {
unsigned long start, unsigned long end,
struct mm_struct *mm,
unsigned long timeslice_ms);
+ /**
+ * @destroy: Destroy the drm_pagemap and associated resources.
+ * @dpagemap: The drm_pagemap to destroy.
+ * @is_atomic_or_reclaim: The function may be called from
+ * atomic- or reclaim context.
+ *
+ * The implementation should take care not to attempt to
+ * destroy resources that may already have been destroyed
+ * using devm_ callbacks, since this function may be called
+ * after the underlying struct device has been unbound.
+ * If the implementation defers the execution to a work item
+ * to avoid locking issues, then it must make sure the work
+ * items are flushed before module exit. If the destroy call
+ * happens after the provider's pci_remove() callback has
+ * been executed, a module reference and drm device reference is
+ * held across the destroy callback.
+ */
+ void (*destroy)(struct drm_pagemap *dpagemap,
+ bool is_atomic_or_reclaim);
};
/**
* struct drm_pagemap: Additional information for a struct dev_pagemap
* used for device p2p handshaking.
* @ops: The struct drm_pagemap_ops.
- * @dev: The struct drevice owning the device-private memory.
+ * @ref: Reference count.
+ * @drm: The struct drm device owning the device-private memory.
+ * @pagemap: Pointer to the underlying dev_pagemap.
+ * @dev_hold: Pointer to a struct drm_pagemap_dev_hold for
+ * device referencing.
+ * @cache: Back-pointer to the &struct drm_pagemap_cache used for this
+ * &struct drm_pagemap. May be NULL if no cache is used.
+ * @shrink_link: Link into the shrinker's list of drm_pagemaps. Only
+ * used if also using a pagemap cache.
*/
struct drm_pagemap {
const struct drm_pagemap_ops *ops;
- struct device *dev;
+ struct kref ref;
+ struct drm_device *drm;
+ struct dev_pagemap *pagemap;
+ struct drm_pagemap_dev_hold *dev_hold;
+ struct drm_pagemap_cache *cache;
+ struct list_head shrink_link;
};
struct drm_pagemap_devmem;
@@ -211,8 +245,19 @@ struct drm_pagemap_devmem_ops {
#if IS_ENABLED(CONFIG_ZONE_DEVICE)
+int drm_pagemap_init(struct drm_pagemap *dpagemap,
+ struct dev_pagemap *pagemap,
+ struct drm_device *drm,
+ const struct drm_pagemap_ops *ops);
+
+struct drm_pagemap *drm_pagemap_create(struct drm_device *drm,
+ struct dev_pagemap *pagemap,
+ const struct drm_pagemap_ops *ops);
+
struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page);
+void drm_pagemap_put(struct drm_pagemap *dpagemap);
+
#else
static inline struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page)
@@ -220,9 +265,42 @@ static inline struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page
return NULL;
}
+static inline void drm_pagemap_put(struct drm_pagemap *dpagemap)
+{
+}
+
#endif /* IS_ENABLED(CONFIG_ZONE_DEVICE) */
/**
+ * drm_pagemap_get() - Obtain a reference on a struct drm_pagemap
+ * @dpagemap: Pointer to the struct drm_pagemap, or NULL.
+ *
+ * Return: Pointer to the struct drm_pagemap, or NULL.
+ */
+static inline struct drm_pagemap *
+drm_pagemap_get(struct drm_pagemap *dpagemap)
+{
+ if (likely(dpagemap))
+ kref_get(&dpagemap->ref);
+
+ return dpagemap;
+}
+
+/**
+ * drm_pagemap_get_unless_zero() - Obtain a reference on a struct drm_pagemap
+ * unless the current reference count is zero.
+ * @dpagemap: Pointer to the drm_pagemap or NULL.
+ *
+ * Return: A pointer to @dpagemap if the reference count was successfully
+ * incremented. NULL if @dpagemap was NULL, or its refcount was 0.
+ */
+static inline struct drm_pagemap * __must_check
+drm_pagemap_get_unless_zero(struct drm_pagemap *dpagemap)
+{
+ return (dpagemap && kref_get_unless_zero(&dpagemap->ref)) ? dpagemap : NULL;
+}
+
+/**
* struct drm_pagemap_devmem - Structure representing a GPU SVM device memory allocation
*
* @dev: Pointer to the device structure which device memory allocation belongs to
@@ -246,13 +324,29 @@ struct drm_pagemap_devmem {
struct dma_fence *pre_migrate_fence;
};
+/**
+ * struct drm_pagemap_migrate_details - Details to govern migration.
+ * @timeslice_ms: The time requested for the migrated pagemap pages to
+ * be present in @mm before being allowed to be migrated back.
+ * @can_migrate_same_pagemap: Whether the copy function as indicated by
+ * the @source_peer_migrates flag, can migrate device pages within a
+ * single drm_pagemap.
+ * @source_peer_migrates: Whether on p2p migration, The source drm_pagemap
+ * should use the copy_to_ram() callback rather than the destination
+ * drm_pagemap should use the copy_to_devmem() callback.
+ */
+struct drm_pagemap_migrate_details {
+ unsigned long timeslice_ms;
+ u32 can_migrate_same_pagemap : 1;
+ u32 source_peer_migrates : 1;
+};
+
#if IS_ENABLED(CONFIG_ZONE_DEVICE)
int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
struct mm_struct *mm,
unsigned long start, unsigned long end,
- unsigned long timeslice_ms,
- void *pgmap_owner);
+ const struct drm_pagemap_migrate_details *mdetails);
int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation);
@@ -269,6 +363,10 @@ int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
struct mm_struct *mm,
unsigned long timeslice_ms);
+void drm_pagemap_destroy(struct drm_pagemap *dpagemap, bool is_atomic_or_reclaim);
+
+int drm_pagemap_reinit(struct drm_pagemap *dpagemap);
+
#endif /* IS_ENABLED(CONFIG_ZONE_DEVICE) */
#endif
diff --git a/include/drm/drm_pagemap_util.h b/include/drm/drm_pagemap_util.h
new file mode 100644
index 000000000000..19169b42b891
--- /dev/null
+++ b/include/drm/drm_pagemap_util.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2025 Intel Corporation
+ */
+
+#ifndef _DRM_PAGEMAP_UTIL_H_
+#define _DRM_PAGEMAP_UTIL_H_
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+struct drm_device;
+struct drm_pagemap;
+struct drm_pagemap_cache;
+struct drm_pagemap_owner;
+struct drm_pagemap_shrinker;
+
+/**
+ * struct drm_pagemap_peer - Structure representing a fast interconnect peer
+ * @list: Pointer to a &struct drm_pagemap_owner_list used to keep track of peers
+ * @link: List link for @list's list of peers.
+ * @owner: Pointer to a &struct drm_pagemap_owner, common for a set of peers having
+ * fast interconnects.
+ * @private: Pointer private to the struct embedding this struct.
+ */
+struct drm_pagemap_peer {
+ struct drm_pagemap_owner_list *list;
+ struct list_head link;
+ struct drm_pagemap_owner *owner;
+ void *private;
+};
+
+/**
+ * struct drm_pagemap_owner_list - Keeping track of peers and owners
+ * @peer: List of peers.
+ *
+ * The owner list defines the scope where we identify peers having fast interconnects
+ * and a common owner. Typically a driver has a single global owner list to
+ * keep track of common owners for the driver's pagemaps.
+ */
+struct drm_pagemap_owner_list {
+ /** @lock: Mutex protecting the @peers list. */
+ struct mutex lock;
+ /** @peers: List of peers. */
+ struct list_head peers;
+};
+
+/*
+ * Convenience macro to define an owner list.
+ * Typically the owner list statically declared
+ * driver-wide.
+ */
+#define DRM_PAGEMAP_OWNER_LIST_DEFINE(_name) \
+ struct drm_pagemap_owner_list _name = { \
+ .lock = __MUTEX_INITIALIZER((_name).lock), \
+ .peers = LIST_HEAD_INIT((_name).peers) }
+
+void drm_pagemap_shrinker_add(struct drm_pagemap *dpagemap);
+
+int drm_pagemap_cache_lock_lookup(struct drm_pagemap_cache *cache);
+
+void drm_pagemap_cache_unlock_lookup(struct drm_pagemap_cache *cache);
+
+struct drm_pagemap_shrinker *drm_pagemap_shrinker_create_devm(struct drm_device *drm);
+
+struct drm_pagemap_cache *drm_pagemap_cache_create_devm(struct drm_pagemap_shrinker *shrinker);
+
+struct drm_pagemap *drm_pagemap_get_from_cache(struct drm_pagemap_cache *cache);
+
+void drm_pagemap_cache_set_pagemap(struct drm_pagemap_cache *cache, struct drm_pagemap *dpagemap);
+
+struct drm_pagemap *drm_pagemap_get_from_cache_if_active(struct drm_pagemap_cache *cache);
+
+#ifdef CONFIG_PROVE_LOCKING
+
+void drm_pagemap_shrinker_might_lock(struct drm_pagemap *dpagemap);
+
+#else
+
+static inline void drm_pagemap_shrinker_might_lock(struct drm_pagemap *dpagemap)
+{
+}
+
+#endif /* CONFIG_PROVE_LOCKING */
+
+void drm_pagemap_release_owner(struct drm_pagemap_peer *peer);
+
+int drm_pagemap_acquire_owner(struct drm_pagemap_peer *peer,
+ struct drm_pagemap_owner_list *owner_list,
+ bool (*has_interconnect)(struct drm_pagemap_peer *peer1,
+ struct drm_pagemap_peer *peer2));
+#endif
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
index 082f29156b3e..aa49b5a42bb5 100644
--- a/include/drm/drm_property.h
+++ b/include/drm/drm_property.h
@@ -284,6 +284,7 @@ int drm_property_replace_blob_from_id(struct drm_device *dev,
uint64_t blob_id,
ssize_t expected_size,
ssize_t expected_elem_size,
+ ssize_t max_size,
bool *replaced);
int drm_property_replace_global_blob(struct drm_device *dev,
struct drm_property_blob **replace,
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index ffa564d79638..2fcef9c0f5b1 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -302,8 +302,7 @@ bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe);
bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
int drm_crtc_vblank_get(struct drm_crtc *crtc);
void drm_crtc_vblank_put(struct drm_crtc *crtc);
-void drm_wait_one_vblank(struct drm_device *dev, unsigned int pipe);
-void drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
+int drm_crtc_wait_one_vblank(struct drm_crtc *crtc);
void drm_crtc_vblank_off(struct drm_crtc *crtc);
void drm_crtc_vblank_reset(struct drm_crtc *crtc);
void drm_crtc_vblank_on_config(struct drm_crtc *crtc,
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index fb88301b3c45..78e07c2507c7 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -645,6 +645,7 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
void drm_sched_start(struct drm_gpu_scheduler *sched, int errno);
void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
void drm_sched_fault(struct drm_gpu_scheduler *sched);
+bool drm_sched_is_stopped(struct drm_gpu_scheduler *sched);
struct drm_gpu_scheduler *
drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
@@ -674,6 +675,7 @@ bool drm_sched_job_has_dependency(struct drm_sched_job *job,
struct dma_fence *fence);
void drm_sched_job_cleanup(struct drm_sched_job *job);
void drm_sched_increase_karma(struct drm_sched_job *bad);
+bool drm_sched_job_is_signaled(struct drm_sched_job *job);
static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
int threshold)
@@ -698,4 +700,54 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
struct drm_gpu_scheduler **sched_list,
unsigned int num_sched_list);
+/**
+ * struct drm_sched_pending_job_iter - DRM scheduler pending job iterator state
+ * @sched: DRM scheduler associated with pending job iterator
+ */
+struct drm_sched_pending_job_iter {
+ struct drm_gpu_scheduler *sched;
+};
+
+/* Drivers should never call this directly */
+static inline struct drm_sched_pending_job_iter
+__drm_sched_pending_job_iter_begin(struct drm_gpu_scheduler *sched)
+{
+ struct drm_sched_pending_job_iter iter = {
+ .sched = sched,
+ };
+
+ WARN_ON(!drm_sched_is_stopped(sched));
+ return iter;
+}
+
+/* Drivers should never call this directly */
+static inline void
+__drm_sched_pending_job_iter_end(const struct drm_sched_pending_job_iter iter)
+{
+ WARN_ON(!drm_sched_is_stopped(iter.sched));
+}
+
+DEFINE_CLASS(drm_sched_pending_job_iter, struct drm_sched_pending_job_iter,
+ __drm_sched_pending_job_iter_end(_T),
+ __drm_sched_pending_job_iter_begin(__sched),
+ struct drm_gpu_scheduler *__sched);
+static inline void *
+class_drm_sched_pending_job_iter_lock_ptr(class_drm_sched_pending_job_iter_t *_T)
+{ return _T; }
+#define class_drm_sched_pending_job_iter_is_conditional false
+
+/**
+ * drm_sched_for_each_pending_job() - Iterator for each pending job in scheduler
+ * @__job: Current pending job being iterated over
+ * @__sched: DRM scheduler to iterate over pending jobs
+ * @__entity: DRM scheduler entity to filter jobs, NULL indicates no filter
+ *
+ * Iterator for each pending job in scheduler, filtering on an entity, and
+ * enforcing scheduler is fully stopped
+ */
+#define drm_sched_for_each_pending_job(__job, __sched, __entity) \
+ scoped_guard(drm_sched_pending_job_iter, (__sched)) \
+ list_for_each_entry((__job), &(__sched)->pending_list, list) \
+ for_each_if(!(__entity) || (__job)->entity == (__entity))
+
#endif
diff --git a/include/drm/intel/display_parent_interface.h b/include/drm/intel/display_parent_interface.h
index 26bedc360044..ce946859a3a9 100644
--- a/include/drm/intel/display_parent_interface.h
+++ b/include/drm/intel/display_parent_interface.h
@@ -6,9 +6,55 @@
#include <linux/types.h>
+struct dma_fence;
+struct drm_crtc;
struct drm_device;
+struct drm_framebuffer;
+struct drm_gem_object;
+struct drm_plane_state;
+struct drm_scanout_buffer;
+struct i915_vma;
+struct intel_hdcp_gsc_context;
+struct intel_initial_plane_config;
+struct intel_panic;
+struct intel_stolen_node;
struct ref_tracker;
+/* Keep struct definitions sorted */
+
+struct intel_display_hdcp_interface {
+ ssize_t (*gsc_msg_send)(struct intel_hdcp_gsc_context *gsc_context,
+ void *msg_in, size_t msg_in_len,
+ void *msg_out, size_t msg_out_len);
+ bool (*gsc_check_status)(struct drm_device *drm);
+ struct intel_hdcp_gsc_context *(*gsc_context_alloc)(struct drm_device *drm);
+ void (*gsc_context_free)(struct intel_hdcp_gsc_context *gsc_context);
+};
+
+struct intel_display_initial_plane_interface {
+ void (*vblank_wait)(struct drm_crtc *crtc);
+ struct drm_gem_object *(*alloc_obj)(struct drm_device *drm, struct intel_initial_plane_config *plane_config);
+ int (*setup)(struct drm_plane_state *plane_state, struct intel_initial_plane_config *plane_config,
+ struct drm_framebuffer *fb, struct i915_vma *vma);
+ void (*config_fini)(struct intel_initial_plane_config *plane_configs);
+};
+
+struct intel_display_irq_interface {
+ bool (*enabled)(struct drm_device *drm);
+ void (*synchronize)(struct drm_device *drm);
+};
+
+struct intel_display_panic_interface {
+ struct intel_panic *(*alloc)(void);
+ int (*setup)(struct intel_panic *panic, struct drm_scanout_buffer *sb);
+ void (*finish)(struct intel_panic *panic);
+};
+
+struct intel_display_pc8_interface {
+ void (*block)(struct drm_device *drm);
+ void (*unblock)(struct drm_device *drm);
+};
+
struct intel_display_rpm_interface {
struct ref_tracker *(*get)(const struct drm_device *drm);
struct ref_tracker *(*get_raw)(const struct drm_device *drm);
@@ -25,6 +71,28 @@ struct intel_display_rpm_interface {
void (*assert_unblock)(const struct drm_device *drm);
};
+struct intel_display_rps_interface {
+ void (*boost_if_not_started)(struct dma_fence *fence);
+ void (*mark_interactive)(struct drm_device *drm, bool interactive);
+ void (*ilk_irq_handler)(struct drm_device *drm);
+};
+
+struct intel_display_stolen_interface {
+ int (*insert_node_in_range)(struct intel_stolen_node *node, u64 size,
+ unsigned int align, u64 start, u64 end);
+ int (*insert_node)(struct intel_stolen_node *node, u64 size, unsigned int align); /* Optional */
+ void (*remove_node)(struct intel_stolen_node *node);
+ bool (*initialized)(struct drm_device *drm);
+ bool (*node_allocated)(const struct intel_stolen_node *node);
+ u64 (*node_offset)(const struct intel_stolen_node *node);
+ u64 (*area_address)(struct drm_device *drm); /* Optional */
+ u64 (*area_size)(struct drm_device *drm); /* Optional */
+ u64 (*node_address)(const struct intel_stolen_node *node);
+ u64 (*node_size)(const struct intel_stolen_node *node);
+ struct intel_stolen_node *(*node_alloc)(struct drm_device *drm);
+ void (*node_free)(const struct intel_stolen_node *node);
+};
+
/**
* struct intel_display_parent_interface - services parent driver provides to display
*
@@ -38,8 +106,44 @@ struct intel_display_rpm_interface {
* check the optional pointers.
*/
struct intel_display_parent_interface {
+ /** @hdcp: HDCP GSC interface */
+ const struct intel_display_hdcp_interface *hdcp;
+
+ /** @initial_plane: Initial plane interface */
+ const struct intel_display_initial_plane_interface *initial_plane;
+
+ /** @irq: IRQ interface */
+ const struct intel_display_irq_interface *irq;
+
+ /** @panic: Panic interface */
+ const struct intel_display_panic_interface *panic;
+
+ /** @pc8: PC8 interface. Optional. */
+ const struct intel_display_pc8_interface *pc8;
+
/** @rpm: Runtime PM functions */
const struct intel_display_rpm_interface *rpm;
+
+ /** @rps: RPS interface. Optional. */
+ const struct intel_display_rps_interface *rps;
+
+ /** @stolen: Stolen memory. */
+ const struct intel_display_stolen_interface *stolen;
+
+ /* Generic independent functions */
+ struct {
+ /** @fence_priority_display: Set display priority. Optional. */
+ void (*fence_priority_display)(struct dma_fence *fence);
+
+ /** @has_auxccs: Are AuxCCS formats supported by the parent. Optional. */
+ bool (*has_auxccs)(struct drm_device *drm);
+
+ /** @has_fenced_regions: Support legacy fencing? Optional. */
+ bool (*has_fenced_regions)(struct drm_device *drm);
+
+ /** @vgpu_active: Is vGPU active? Optional. */
+ bool (*vgpu_active)(struct drm_device *drm);
+ };
};
#endif
diff --git a/include/drm/intel/intel_lb_mei_interface.h b/include/drm/intel/intel_lb_mei_interface.h
index d65be2cba2ab..0850738a30fc 100644
--- a/include/drm/intel/intel_lb_mei_interface.h
+++ b/include/drm/intel/intel_lb_mei_interface.h
@@ -53,7 +53,8 @@ enum intel_lb_status {
*/
struct intel_lb_component_ops {
/**
- * push_payload - Sends a payload to the authentication firmware
+ * @push_payload: Sends a payload to the authentication firmware
+ *
* @dev: Device struct corresponding to the mei device
* @type: Payload type (see &enum intel_lb_type)
* @flags: Payload flags bitmap (e.g. %INTEL_LB_FLAGS_IS_PERSISTENT)
diff --git a/include/linux/console.h b/include/linux/console.h
index f882833bedf0..83882c615408 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -79,12 +79,6 @@ enum vc_intensity;
* characters. (optional)
* @con_invert_region: invert a region of length @count on @vc starting at @p.
* (optional)
- * @con_debug_enter: prepare the console for the debugger. This includes, but
- * is not limited to, unblanking the console, loading an
- * appropriate palette, and allowing debugger generated output.
- * (optional)
- * @con_debug_leave: restore the console to its pre-debug state as closely as
- * possible. (optional)
*/
struct consw {
struct module *owner;
@@ -123,8 +117,6 @@ struct consw {
enum vc_intensity intensity,
bool blink, bool underline, bool reverse, bool italic);
void (*con_invert_region)(struct vc_data *vc, u16 *p, int count);
- void (*con_debug_enter)(struct vc_data *vc);
- void (*con_debug_leave)(struct vc_data *vc);
};
extern const struct consw *conswitchp;
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 0bc492090237..91f4939db89b 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -429,18 +429,6 @@ struct dma_buf {
__poll_t active;
} cb_in, cb_out;
-#ifdef CONFIG_DMABUF_SYSFS_STATS
- /**
- * @sysfs_entry:
- *
- * For exposing information about this buffer in sysfs. See also
- * `DMA-BUF statistics`_ for the uapi this enables.
- */
- struct dma_buf_sysfs_entry {
- struct kobject kobj;
- struct dma_buf *dmabuf;
- } *sysfs_entry;
-#endif
};
/**
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 64639e104110..d4c92fd35092 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -364,11 +364,12 @@ static inline void dma_fence_end_signalling(bool cookie) {}
static inline void __dma_fence_might_wait(void) {}
#endif
-int dma_fence_signal(struct dma_fence *fence);
-int dma_fence_signal_locked(struct dma_fence *fence);
-int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp);
-int dma_fence_signal_timestamp_locked(struct dma_fence *fence,
- ktime_t timestamp);
+void dma_fence_signal(struct dma_fence *fence);
+bool dma_fence_check_and_signal(struct dma_fence *fence);
+bool dma_fence_check_and_signal_locked(struct dma_fence *fence);
+void dma_fence_signal_locked(struct dma_fence *fence);
+void dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp);
+void dma_fence_signal_timestamp_locked(struct dma_fence *fence, ktime_t timestamp);
signed long dma_fence_default_wait(struct dma_fence *fence,
bool intr, signed long timeout);
int dma_fence_add_callback(struct dma_fence *fence,
@@ -401,6 +402,26 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence);
const char __rcu *dma_fence_driver_name(struct dma_fence *fence);
const char __rcu *dma_fence_timeline_name(struct dma_fence *fence);
+/*
+ * dma_fence_test_signaled_flag - Only check whether a fence is signaled yet.
+ * @fence: the fence to check
+ *
+ * This function just checks whether @fence is signaled, without interacting
+ * with the fence in any way. The user must, therefore, ensure through other
+ * means that fences get signaled eventually.
+ *
+ * This function uses test_bit(), which is thread-safe. Naturally, this function
+ * should be used opportunistically; a fence could get signaled at any moment
+ * after the check is done.
+ *
+ * Return: true if signaled, false otherwise.
+ */
+static inline bool
+dma_fence_test_signaled_flag(struct dma_fence *fence)
+{
+ return test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags);
+}
+
/**
* dma_fence_is_signaled_locked - Return an indication if the fence
* is signaled yet.
@@ -418,7 +439,7 @@ const char __rcu *dma_fence_timeline_name(struct dma_fence *fence);
static inline bool
dma_fence_is_signaled_locked(struct dma_fence *fence)
{
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (dma_fence_test_signaled_flag(fence))
return true;
if (fence->ops->signaled && fence->ops->signaled(fence)) {
@@ -448,7 +469,7 @@ dma_fence_is_signaled_locked(struct dma_fence *fence)
static inline bool
dma_fence_is_signaled(struct dma_fence *fence)
{
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ if (dma_fence_test_signaled_flag(fence))
return true;
if (fence->ops->signaled && fence->ops->signaled(fence)) {
diff --git a/include/linux/dma-heap.h b/include/linux/dma-heap.h
index 27d15f60950a..648328a64b27 100644
--- a/include/linux/dma-heap.h
+++ b/include/linux/dma-heap.h
@@ -46,4 +46,6 @@ const char *dma_heap_get_name(struct dma_heap *heap);
struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info);
+extern bool mem_accounting;
+
#endif /* _DMA_HEAPS_H */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 05cc251035da..65fb70382675 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -304,10 +304,6 @@ struct fb_ops {
/* teardown any resources to do with this framebuffer */
void (*fb_destroy)(struct fb_info *info);
-
- /* called at KDB enter and leave time to prepare the console */
- int (*fb_debug_enter)(struct fb_info *info);
- int (*fb_debug_leave)(struct fb_info *info);
};
#ifdef CONFIG_FB_TILEBLITTING
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index 9fa9c30a34e6..5e7a63143a4a 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -380,7 +380,7 @@ struct host1x_driver {
struct list_head list;
int (*probe)(struct host1x_device *device);
- int (*remove)(struct host1x_device *device);
+ void (*remove)(struct host1x_device *device);
void (*shutdown)(struct host1x_device *device);
};
diff --git a/include/trace/events/dma_buf.h b/include/trace/events/dma_buf.h
new file mode 100644
index 000000000000..3bb88d05bcc8
--- /dev/null
+++ b/include/trace/events/dma_buf.h
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dma_buf
+
+#if !defined(_TRACE_DMA_BUF_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_DMA_BUF_H
+
+#include <linux/dma-buf.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(dma_buf,
+
+ TP_PROTO(struct dma_buf *dmabuf),
+
+ TP_ARGS(dmabuf),
+
+ TP_STRUCT__entry(
+ __string( exp_name, dmabuf->exp_name)
+ __field( size_t, size)
+ __field( ino_t, ino)
+ ),
+
+ TP_fast_assign(
+ __assign_str(exp_name);
+ __entry->size = dmabuf->size;
+ __entry->ino = dmabuf->file->f_inode->i_ino;
+ ),
+
+ TP_printk("exp_name=%s size=%zu ino=%lu",
+ __get_str(exp_name),
+ __entry->size,
+ __entry->ino)
+);
+
+DECLARE_EVENT_CLASS(dma_buf_attach_dev,
+
+ TP_PROTO(struct dma_buf *dmabuf, struct dma_buf_attachment *attach,
+ bool is_dynamic, struct device *dev),
+
+ TP_ARGS(dmabuf, attach, is_dynamic, dev),
+
+ TP_STRUCT__entry(
+ __string( dev_name, dev_name(dev))
+ __string( exp_name, dmabuf->exp_name)
+ __field( size_t, size)
+ __field( ino_t, ino)
+ __field( struct dma_buf_attachment *, attach)
+ __field( bool, is_dynamic)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev_name);
+ __assign_str(exp_name);
+ __entry->size = dmabuf->size;
+ __entry->ino = dmabuf->file->f_inode->i_ino;
+ __entry->is_dynamic = is_dynamic;
+ __entry->attach = attach;
+ ),
+
+ TP_printk("exp_name=%s size=%zu ino=%lu attachment:%p is_dynamic=%d dev_name=%s",
+ __get_str(exp_name),
+ __entry->size,
+ __entry->ino,
+ __entry->attach,
+ __entry->is_dynamic,
+ __get_str(dev_name))
+);
+
+DECLARE_EVENT_CLASS(dma_buf_fd,
+
+ TP_PROTO(struct dma_buf *dmabuf, int fd),
+
+ TP_ARGS(dmabuf, fd),
+
+ TP_STRUCT__entry(
+ __string( exp_name, dmabuf->exp_name)
+ __field( size_t, size)
+ __field( ino_t, ino)
+ __field( int, fd)
+ ),
+
+ TP_fast_assign(
+ __assign_str(exp_name);
+ __entry->size = dmabuf->size;
+ __entry->ino = dmabuf->file->f_inode->i_ino;
+ __entry->fd = fd;
+ ),
+
+ TP_printk("exp_name=%s size=%zu ino=%lu fd=%d",
+ __get_str(exp_name),
+ __entry->size,
+ __entry->ino,
+ __entry->fd)
+);
+
+DEFINE_EVENT(dma_buf, dma_buf_export,
+
+ TP_PROTO(struct dma_buf *dmabuf),
+
+ TP_ARGS(dmabuf)
+);
+
+DEFINE_EVENT(dma_buf, dma_buf_mmap_internal,
+
+ TP_PROTO(struct dma_buf *dmabuf),
+
+ TP_ARGS(dmabuf)
+);
+
+DEFINE_EVENT(dma_buf, dma_buf_mmap,
+
+ TP_PROTO(struct dma_buf *dmabuf),
+
+ TP_ARGS(dmabuf)
+);
+
+DEFINE_EVENT(dma_buf, dma_buf_put,
+
+ TP_PROTO(struct dma_buf *dmabuf),
+
+ TP_ARGS(dmabuf)
+);
+
+DEFINE_EVENT(dma_buf_attach_dev, dma_buf_dynamic_attach,
+
+ TP_PROTO(struct dma_buf *dmabuf, struct dma_buf_attachment *attach,
+ bool is_dynamic, struct device *dev),
+
+ TP_ARGS(dmabuf, attach, is_dynamic, dev)
+);
+
+DEFINE_EVENT(dma_buf_attach_dev, dma_buf_detach,
+
+ TP_PROTO(struct dma_buf *dmabuf, struct dma_buf_attachment *attach,
+ bool is_dynamic, struct device *dev),
+
+ TP_ARGS(dmabuf, attach, is_dynamic, dev)
+);
+
+DEFINE_EVENT_CONDITION(dma_buf_fd, dma_buf_fd,
+
+ TP_PROTO(struct dma_buf *dmabuf, int fd),
+
+ TP_ARGS(dmabuf, fd),
+
+ TP_CONDITION(fd >= 0)
+);
+
+DEFINE_EVENT(dma_buf_fd, dma_buf_get,
+
+ TP_PROTO(struct dma_buf *dmabuf, int fd),
+
+ TP_ARGS(dmabuf, fd)
+);
+
+#endif /* _TRACE_DMA_BUF_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index f80aa4c9d88f..1d34daa0ebcd 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -105,8 +105,6 @@ extern "C" {
*
* %AMDGPU_GEM_DOMAIN_DOORBELL Doorbell. It is an MMIO region for
* signalling user mode queues.
- *
- * %AMDGPU_GEM_DOMAIN_MMIO_REMAP MMIO remap page (special mapping for HDP flushing).
*/
#define AMDGPU_GEM_DOMAIN_CPU 0x1
#define AMDGPU_GEM_DOMAIN_GTT 0x2
@@ -115,15 +113,13 @@ extern "C" {
#define AMDGPU_GEM_DOMAIN_GWS 0x10
#define AMDGPU_GEM_DOMAIN_OA 0x20
#define AMDGPU_GEM_DOMAIN_DOORBELL 0x40
-#define AMDGPU_GEM_DOMAIN_MMIO_REMAP 0x80
#define AMDGPU_GEM_DOMAIN_MASK (AMDGPU_GEM_DOMAIN_CPU | \
AMDGPU_GEM_DOMAIN_GTT | \
AMDGPU_GEM_DOMAIN_VRAM | \
AMDGPU_GEM_DOMAIN_GDS | \
AMDGPU_GEM_DOMAIN_GWS | \
AMDGPU_GEM_DOMAIN_OA | \
- AMDGPU_GEM_DOMAIN_DOORBELL | \
- AMDGPU_GEM_DOMAIN_MMIO_REMAP)
+ AMDGPU_GEM_DOMAIN_DOORBELL)
/* Flag that CPU access will be required for the case of VRAM domain */
#define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0)
@@ -883,7 +879,7 @@ struct drm_amdgpu_gem_list_handles_entry {
#define AMDGPU_VM_PAGE_WRITEABLE (1 << 2)
/* executable mapping, new for VI */
#define AMDGPU_VM_PAGE_EXECUTABLE (1 << 3)
-/* partially resident texture */
+/* unmapped page of partially resident textures */
#define AMDGPU_VM_PAGE_PRT (1 << 4)
/* MTYPE flags use bit 5 to 8 */
#define AMDGPU_VM_MTYPE_MASK (0xf << 5)
@@ -1427,6 +1423,7 @@ struct drm_amdgpu_info_vbios {
#define AMDGPU_VRAM_TYPE_LPDDR4 11
#define AMDGPU_VRAM_TYPE_LPDDR5 12
#define AMDGPU_VRAM_TYPE_HBM3E 13
+#define AMDGPU_VRAM_TYPE_HBM4 14
struct drm_amdgpu_info_device {
/** PCI Device ID */
@@ -1629,9 +1626,25 @@ struct drm_amdgpu_info_uq_metadata_gfx {
__u32 csa_alignment;
};
+struct drm_amdgpu_info_uq_metadata_compute {
+ /* EOP size for gfx11 */
+ __u32 eop_size;
+ /* EOP base virtual alignment for gfx11 */
+ __u32 eop_alignment;
+};
+
+struct drm_amdgpu_info_uq_metadata_sdma {
+ /* context save area size for sdma6 */
+ __u32 csa_size;
+ /* context save area base virtual alignment for sdma6 */
+ __u32 csa_alignment;
+};
+
struct drm_amdgpu_info_uq_metadata {
union {
struct drm_amdgpu_info_uq_metadata_gfx gfx;
+ struct drm_amdgpu_info_uq_metadata_compute compute;
+ struct drm_amdgpu_info_uq_metadata_sdma sdma;
};
};
diff --git a/include/uapi/drm/amdxdna_accel.h b/include/uapi/drm/amdxdna_accel.h
index 62c917fd4f7b..9c44db2b3dcd 100644
--- a/include/uapi/drm/amdxdna_accel.h
+++ b/include/uapi/drm/amdxdna_accel.h
@@ -19,6 +19,14 @@ extern "C" {
#define AMDXDNA_INVALID_BO_HANDLE 0
#define AMDXDNA_INVALID_FENCE_HANDLE 0
+/*
+ * Define hardware context priority
+ */
+#define AMDXDNA_QOS_REALTIME_PRIORITY 0x100
+#define AMDXDNA_QOS_HIGH_PRIORITY 0x180
+#define AMDXDNA_QOS_NORMAL_PRIORITY 0x200
+#define AMDXDNA_QOS_LOW_PRIORITY 0x280
+
enum amdxdna_device_type {
AMDXDNA_DEV_TYPE_UNKNOWN = -1,
AMDXDNA_DEV_TYPE_KMQ,
diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h
index 1956431bb391..50d5337f35ef 100644
--- a/include/uapi/drm/panfrost_drm.h
+++ b/include/uapi/drm/panfrost_drm.h
@@ -24,6 +24,8 @@ extern "C" {
#define DRM_PANFROST_SET_LABEL_BO 0x09
#define DRM_PANFROST_JM_CTX_CREATE 0x0a
#define DRM_PANFROST_JM_CTX_DESTROY 0x0b
+#define DRM_PANFROST_SYNC_BO 0x0c
+#define DRM_PANFROST_QUERY_BO_INFO 0x0d
#define DRM_IOCTL_PANFROST_SUBMIT DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_SUBMIT, struct drm_panfrost_submit)
#define DRM_IOCTL_PANFROST_WAIT_BO DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_WAIT_BO, struct drm_panfrost_wait_bo)
@@ -35,6 +37,8 @@ extern "C" {
#define DRM_IOCTL_PANFROST_SET_LABEL_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_SET_LABEL_BO, struct drm_panfrost_set_label_bo)
#define DRM_IOCTL_PANFROST_JM_CTX_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_JM_CTX_CREATE, struct drm_panfrost_jm_ctx_create)
#define DRM_IOCTL_PANFROST_JM_CTX_DESTROY DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_JM_CTX_DESTROY, struct drm_panfrost_jm_ctx_destroy)
+#define DRM_IOCTL_PANFROST_SYNC_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_SYNC_BO, struct drm_panfrost_sync_bo)
+#define DRM_IOCTL_PANFROST_QUERY_BO_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_QUERY_BO_INFO, struct drm_panfrost_query_bo_info)
/*
* Unstable ioctl(s): only exposed when the unsafe unstable_ioctls module
@@ -120,9 +124,12 @@ struct drm_panfrost_wait_bo {
__s64 timeout_ns;
};
-/* Valid flags to pass to drm_panfrost_create_bo */
+/* Valid flags to pass to drm_panfrost_create_bo.
+ * PANFROST_BO_WB_MMAP can't be set if PANFROST_BO_HEAP is.
+ */
#define PANFROST_BO_NOEXEC 1
#define PANFROST_BO_HEAP 2
+#define PANFROST_BO_WB_MMAP 4
/**
* struct drm_panfrost_create_bo - ioctl argument for creating Panfrost BOs.
@@ -228,6 +235,13 @@ enum drm_panfrost_param {
DRM_PANFROST_PARAM_SYSTEM_TIMESTAMP,
DRM_PANFROST_PARAM_SYSTEM_TIMESTAMP_FREQUENCY,
DRM_PANFROST_PARAM_ALLOWED_JM_CTX_PRIORITIES,
+ DRM_PANFROST_PARAM_SELECTED_COHERENCY,
+};
+
+enum drm_panfrost_gpu_coherency {
+ DRM_PANFROST_GPU_COHERENCY_ACE_LITE = 0,
+ DRM_PANFROST_GPU_COHERENCY_ACE = 1,
+ DRM_PANFROST_GPU_COHERENCY_NONE = 31,
};
struct drm_panfrost_get_param {
@@ -301,6 +315,66 @@ struct drm_panfrost_set_label_bo {
__u64 label;
};
+/* Valid flags to pass to drm_panfrost_bo_sync_op */
+#define PANFROST_BO_SYNC_CPU_CACHE_FLUSH 0
+#define PANFROST_BO_SYNC_CPU_CACHE_FLUSH_AND_INVALIDATE 1
+
+/**
+ * struct drm_panthor_bo_flush_map_op - BO map sync op
+ */
+struct drm_panfrost_bo_sync_op {
+ /** @handle: Handle of the buffer object to sync. */
+ __u32 handle;
+
+ /** @type: Type of sync operation. */
+ __u32 type;
+
+ /**
+ * @offset: Offset into the BO at which the sync range starts.
+ *
+ * This will be rounded down to the nearest cache line as needed.
+ */
+ __u32 offset;
+
+ /**
+ * @size: Size of the range to sync
+ *
+ * @size + @offset will be rounded up to the nearest cache line as
+ * needed.
+ */
+ __u32 size;
+};
+
+/**
+ * struct drm_panfrost_sync_bo - ioctl argument for syncing BO maps
+ */
+struct drm_panfrost_sync_bo {
+ /** Array of struct drm_panfrost_bo_sync_op */
+ __u64 ops;
+
+ /** Number of BO sync ops */
+ __u32 op_count;
+
+ __u32 pad;
+};
+
+/** BO comes from a different subsystem. */
+#define DRM_PANFROST_BO_IS_IMPORTED (1 << 0)
+
+struct drm_panfrost_query_bo_info {
+ /** Handle of the object being queried. */
+ __u32 handle;
+
+ /** Extra flags that are not coming from the BO_CREATE ioctl(). */
+ __u32 extra_flags;
+
+ /** Flags passed at creation time. */
+ __u32 create_flags;
+
+ /** Will be zero on return. */
+ __u32 pad;
+};
+
/* Definitions for coredump decoding in user space */
#define PANFROSTDUMP_MAJOR 1
#define PANFROSTDUMP_MINOR 0
diff --git a/include/uapi/drm/panthor_drm.h b/include/uapi/drm/panthor_drm.h
index 467d365ed7ba..b401ac585d6a 100644
--- a/include/uapi/drm/panthor_drm.h
+++ b/include/uapi/drm/panthor_drm.h
@@ -144,6 +144,16 @@ enum drm_panthor_ioctl_id {
* pgoff_t size.
*/
DRM_PANTHOR_SET_USER_MMIO_OFFSET,
+
+ /** @DRM_PANTHOR_BO_SYNC: Sync BO data to/from the device */
+ DRM_PANTHOR_BO_SYNC,
+
+ /**
+ * @DRM_PANTHOR_BO_QUERY_INFO: Query information about a BO.
+ *
+ * This is useful for imported BOs.
+ */
+ DRM_PANTHOR_BO_QUERY_INFO,
};
/**
@@ -246,6 +256,26 @@ enum drm_panthor_dev_query_type {
};
/**
+ * enum drm_panthor_gpu_coherency: Type of GPU coherency
+ */
+enum drm_panthor_gpu_coherency {
+ /**
+ * @DRM_PANTHOR_GPU_COHERENCY_ACE_LITE: ACE Lite coherency.
+ */
+ DRM_PANTHOR_GPU_COHERENCY_ACE_LITE = 0,
+
+ /**
+ * @DRM_PANTHOR_GPU_COHERENCY_ACE: ACE coherency.
+ */
+ DRM_PANTHOR_GPU_COHERENCY_ACE = 1,
+
+ /**
+ * @DRM_PANTHOR_GPU_COHERENCY_NONE: No coherency.
+ */
+ DRM_PANTHOR_GPU_COHERENCY_NONE = 31,
+};
+
+/**
* struct drm_panthor_gpu_info - GPU information
*
* Structure grouping all queryable information relating to the GPU.
@@ -301,7 +331,16 @@ struct drm_panthor_gpu_info {
*/
__u32 thread_max_barrier_size;
- /** @coherency_features: Coherency features. */
+ /**
+ * @coherency_features: Coherency features.
+ *
+ * Combination of drm_panthor_gpu_coherency flags.
+ *
+ * Note that this is just what the coherency protocols supported by the
+ * GPU, but the actual coherency in place depends on the SoC
+ * integration and is reflected by
+ * drm_panthor_gpu_info::selected_coherency.
+ */
__u32 coherency_features;
/** @texture_features: Texture features. */
@@ -310,8 +349,12 @@ struct drm_panthor_gpu_info {
/** @as_present: Bitmask encoding the number of address-space exposed by the MMU. */
__u32 as_present;
- /** @pad0: MBZ. */
- __u32 pad0;
+ /**
+ * @selected_coherency: Coherency selected for this device.
+ *
+ * One of drm_panthor_gpu_coherency.
+ */
+ __u32 selected_coherency;
/** @shader_present: Bitmask encoding the shader cores exposed by the GPU. */
__u64 shader_present;
@@ -638,6 +681,15 @@ struct drm_panthor_vm_get_state {
enum drm_panthor_bo_flags {
/** @DRM_PANTHOR_BO_NO_MMAP: The buffer object will never be CPU-mapped in userspace. */
DRM_PANTHOR_BO_NO_MMAP = (1 << 0),
+
+ /**
+ * @DRM_PANTHOR_BO_WB_MMAP: Force "Write-Back Cacheable" CPU mapping.
+ *
+ * CPU map the buffer object in userspace by forcing the "Write-Back
+ * Cacheable" cacheability attribute. The mapping otherwise uses the
+ * "Non-Cacheable" attribute if the GPU is not IO coherent.
+ */
+ DRM_PANTHOR_BO_WB_MMAP = (1 << 1),
};
/**
@@ -1041,6 +1093,101 @@ struct drm_panthor_set_user_mmio_offset {
};
/**
+ * enum drm_panthor_bo_sync_op_type - BO sync type
+ */
+enum drm_panthor_bo_sync_op_type {
+ /** @DRM_PANTHOR_BO_SYNC_CPU_CACHE_FLUSH: Flush CPU caches. */
+ DRM_PANTHOR_BO_SYNC_CPU_CACHE_FLUSH = 0,
+
+ /** @DRM_PANTHOR_BO_SYNC_CPU_CACHE_FLUSH_AND_INVALIDATE: Flush and invalidate CPU caches. */
+ DRM_PANTHOR_BO_SYNC_CPU_CACHE_FLUSH_AND_INVALIDATE = 1,
+};
+
+/**
+ * struct drm_panthor_bo_sync_op - BO map sync op
+ */
+struct drm_panthor_bo_sync_op {
+ /** @handle: Handle of the buffer object to sync. */
+ __u32 handle;
+
+ /** @type: Type of operation. */
+ __u32 type;
+
+ /**
+ * @offset: Offset into the BO at which the sync range starts.
+ *
+ * This will be rounded down to the nearest cache line as needed.
+ */
+ __u64 offset;
+
+ /**
+ * @size: Size of the range to sync
+ *
+ * @size + @offset will be rounded up to the nearest cache line as
+ * needed.
+ */
+ __u64 size;
+};
+
+/**
+ * struct drm_panthor_bo_sync - BO map sync request
+ */
+struct drm_panthor_bo_sync {
+ /**
+ * @ops: Array of struct drm_panthor_bo_sync_op sync operations.
+ */
+ struct drm_panthor_obj_array ops;
+};
+
+/**
+ * enum drm_panthor_bo_extra_flags - Set of flags returned on a BO_QUERY_INFO request
+ *
+ * Those are flags reflecting BO properties that are not directly coming from the flags
+ * passed are creation time, or information on BOs that were imported from other drivers.
+ */
+enum drm_panthor_bo_extra_flags {
+ /**
+ * @DRM_PANTHOR_BO_IS_IMPORTED: BO has been imported from an external driver.
+ *
+ * Note that imported dma-buf handles are not flagged as imported if they
+ * where exported by panthor. Only buffers that are coming from other drivers
+ * (dma heaps, other GPUs, display controllers, V4L, ...).
+ *
+ * It's also important to note that all imported BOs are mapped cached and can't
+ * be considered IO-coherent even if the GPU is. This means they require explicit
+ * syncs that must go through the DRM_PANTHOR_BO_SYNC ioctl (userland cache
+ * maintenance is not allowed in that case, because extra operations might be
+ * needed to make changes visible to the CPU/device, like buffer migration when the
+ * exporter is a GPU with its own VRAM).
+ */
+ DRM_PANTHOR_BO_IS_IMPORTED = (1 << 0),
+};
+
+/**
+ * struct drm_panthor_bo_query_info - Query BO info
+ */
+struct drm_panthor_bo_query_info {
+ /** @handle: Handle of the buffer object to query flags on. */
+ __u32 handle;
+
+ /**
+ * @extra_flags: Combination of enum drm_panthor_bo_extra_flags flags.
+ */
+ __u32 extra_flags;
+
+ /**
+ * @create_flags: Flags passed at creation time.
+ *
+ * Combination of enum drm_panthor_bo_flags flags.
+ * Will be zero if the buffer comes from a different driver.
+ */
+ __u32 create_flags;
+
+ /** @pad: Will be zero on return. */
+ __u32 pad;
+};
+
+/**
* DRM_IOCTL_PANTHOR() - Build a Panthor IOCTL number
* @__access: Access type. Must be R, W or RW.
* @__id: One of the DRM_PANTHOR_xxx id.
@@ -1086,6 +1233,10 @@ enum {
DRM_IOCTL_PANTHOR(WR, BO_SET_LABEL, bo_set_label),
DRM_IOCTL_PANTHOR_SET_USER_MMIO_OFFSET =
DRM_IOCTL_PANTHOR(WR, SET_USER_MMIO_OFFSET, set_user_mmio_offset),
+ DRM_IOCTL_PANTHOR_BO_SYNC =
+ DRM_IOCTL_PANTHOR(WR, BO_SYNC, bo_sync),
+ DRM_IOCTL_PANTHOR_BO_QUERY_INFO =
+ DRM_IOCTL_PANTHOR(WR, BO_QUERY_INFO, bo_query_info),
};
#if defined(__cplusplus)
diff --git a/include/uapi/drm/rocket_accel.h b/include/uapi/drm/rocket_accel.h
index 14b2e12b7c49..d0685e372b79 100644
--- a/include/uapi/drm/rocket_accel.h
+++ b/include/uapi/drm/rocket_accel.h
@@ -26,20 +26,27 @@ extern "C" {
*
*/
struct drm_rocket_create_bo {
- /** Input: Size of the requested BO. */
+ /**
+ * @size: Input: Size of the requested BO.
+ */
__u32 size;
- /** Output: GEM handle for the BO. */
+ /**
+ * @handle: Output: GEM handle for the BO.
+ */
__u32 handle;
/**
- * Output: DMA address for the BO in the NPU address space. This address
- * is private to the DRM fd and is valid for the lifetime of the GEM
- * handle.
+ * @dma_address: Output: DMA address for the BO in the NPU address
+ * space. This address is private to the DRM fd and is valid for
+ * the lifetime of the GEM handle.
*/
__u64 dma_address;
- /** Output: Offset into the drm node to use for subsequent mmap call. */
+ /**
+ * @offset: Output: Offset into the drm node to use for subsequent
+ * mmap call.
+ */
__u64 offset;
};
@@ -50,13 +57,19 @@ struct drm_rocket_create_bo {
* synchronization.
*/
struct drm_rocket_prep_bo {
- /** Input: GEM handle of the buffer object. */
+ /**
+ * @handle: Input: GEM handle of the buffer object.
+ */
__u32 handle;
- /** Reserved, must be zero. */
+ /**
+ * @reserved: Reserved, must be zero.
+ */
__u32 reserved;
- /** Input: Amount of time to wait for NPU jobs. */
+ /**
+ * @timeout_ns: Input: Amount of time to wait for NPU jobs.
+ */
__s64 timeout_ns;
};
@@ -66,10 +79,14 @@ struct drm_rocket_prep_bo {
* Synchronize caches for NPU access.
*/
struct drm_rocket_fini_bo {
- /** Input: GEM handle of the buffer object. */
+ /**
+ * @handle: Input: GEM handle of the buffer object.
+ */
__u32 handle;
- /** Reserved, must be zero. */
+ /**
+ * @reserved: Reserved, must be zero.
+ */
__u32 reserved;
};
@@ -79,10 +96,15 @@ struct drm_rocket_fini_bo {
* A task is the smallest unit of work that can be run on the NPU.
*/
struct drm_rocket_task {
- /** Input: DMA address to NPU mapping of register command buffer */
+ /**
+ * @regcmd: Input: DMA address to NPU mapping of register command buffer
+ */
__u32 regcmd;
- /** Input: Number of commands in the register command buffer */
+ /**
+ * @regcmd_count: Input: Number of commands in the register command
+ * buffer
+ */
__u32 regcmd_count;
};
@@ -94,25 +116,44 @@ struct drm_rocket_task {
* sequentially on the same core, to benefit from memory residency in SRAM.
*/
struct drm_rocket_job {
- /** Input: Pointer to an array of struct drm_rocket_task. */
+ /**
+ * @tasks: Input: Pointer to an array of struct drm_rocket_task.
+ */
__u64 tasks;
- /** Input: Pointer to a u32 array of the BOs that are read by the job. */
+ /**
+ * @in_bo_handles: Input: Pointer to a u32 array of the BOs that
+ * are read by the job.
+ */
__u64 in_bo_handles;
- /** Input: Pointer to a u32 array of the BOs that are written to by the job. */
+ /**
+ * @out_bo_handles: Input: Pointer to a u32 array of the BOs that
+ * are written to by the job.
+ */
__u64 out_bo_handles;
- /** Input: Number of tasks passed in. */
+ /**
+ * @task_count: Input: Number of tasks passed in.
+ */
__u32 task_count;
- /** Input: Size in bytes of the structs in the @tasks field. */
+ /**
+ * @task_struct_size: Input: Size in bytes of the structs in the
+ * @tasks field.
+ */
__u32 task_struct_size;
- /** Input: Number of input BO handles passed in (size is that times 4). */
+ /**
+ * @in_bo_handle_count: Input: Number of input BO handles passed in
+ * (size is that times 4).
+ */
__u32 in_bo_handle_count;
- /** Input: Number of output BO handles passed in (size is that times 4). */
+ /**
+ * @out_bo_handle_count: Input: Number of output BO handles passed in
+ * (size is that times 4).
+ */
__u32 out_bo_handle_count;
};
@@ -122,16 +163,25 @@ struct drm_rocket_job {
* The kernel will schedule the execution of these jobs in dependency order.
*/
struct drm_rocket_submit {
- /** Input: Pointer to an array of struct drm_rocket_job. */
+ /**
+ * @jobs: Input: Pointer to an array of struct drm_rocket_job.
+ */
__u64 jobs;
- /** Input: Number of jobs passed in. */
+ /**
+ * @job_count: Input: Number of jobs passed in.
+ */
__u32 job_count;
- /** Input: Size in bytes of the structs in the @jobs field. */
+ /**
+ * @job_struct_size: Input: Size in bytes of the structs in the
+ * @jobs field.
+ */
__u32 job_struct_size;
- /** Reserved, must be zero. */
+ /**
+ * @reserved: Reserved, must be zero.
+ */
__u64 reserved;
};
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
index f64dc0eff0e6..077e66a682e2 100644
--- a/include/uapi/drm/xe_drm.h
+++ b/include/uapi/drm/xe_drm.h
@@ -106,6 +106,7 @@ extern "C" {
#define DRM_XE_OBSERVATION 0x0b
#define DRM_XE_MADVISE 0x0c
#define DRM_XE_VM_QUERY_MEM_RANGE_ATTRS 0x0d
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY 0x0e
/* Must be kept compact -- no holes */
@@ -123,6 +124,7 @@ extern "C" {
#define DRM_IOCTL_XE_OBSERVATION DRM_IOW(DRM_COMMAND_BASE + DRM_XE_OBSERVATION, struct drm_xe_observation_param)
#define DRM_IOCTL_XE_MADVISE DRM_IOW(DRM_COMMAND_BASE + DRM_XE_MADVISE, struct drm_xe_madvise)
#define DRM_IOCTL_XE_VM_QUERY_MEM_RANGE_ATTRS DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_QUERY_MEM_RANGE_ATTRS, struct drm_xe_vm_query_mem_range_attr)
+#define DRM_IOCTL_XE_EXEC_QUEUE_SET_PROPERTY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_SET_PROPERTY, struct drm_xe_exec_queue_set_property)
/**
* DOC: Xe IOCTL Extensions
@@ -210,8 +212,12 @@ struct drm_xe_ext_set_property {
/** @pad: MBZ */
__u32 pad;
- /** @value: property value */
- __u64 value;
+ union {
+ /** @value: property value */
+ __u64 value;
+ /** @ptr: pointer to user value */
+ __u64 ptr;
+ };
/** @reserved: Reserved */
__u64 reserved[2];
@@ -403,6 +409,9 @@ struct drm_xe_query_mem_regions {
* has low latency hint support
* - %DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR - Flag is set if the
* device has CPU address mirroring support
+ * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_NO_COMPRESSION_HINT - Flag is set if the
+ * device supports the userspace hint %DRM_XE_GEM_CREATE_FLAG_NO_COMPRESSION.
+ * This is exposed only on Xe2+.
* - %DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT - Minimal memory alignment
* required by this device, typically SZ_4K or SZ_64K
* - %DRM_XE_QUERY_CONFIG_VA_BITS - Maximum bits of a virtual address
@@ -421,6 +430,7 @@ struct drm_xe_query_config {
#define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0)
#define DRM_XE_QUERY_CONFIG_FLAG_HAS_LOW_LATENCY (1 << 1)
#define DRM_XE_QUERY_CONFIG_FLAG_HAS_CPU_ADDR_MIRROR (1 << 2)
+ #define DRM_XE_QUERY_CONFIG_FLAG_HAS_NO_COMPRESSION_HINT (1 << 3)
#define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2
#define DRM_XE_QUERY_CONFIG_VA_BITS 3
#define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4
@@ -791,6 +801,17 @@ struct drm_xe_device_query {
* need to use VRAM for display surfaces, therefore the kernel requires
* setting this flag for such objects, otherwise an error is thrown on
* small-bar systems.
+ * - %DRM_XE_GEM_CREATE_FLAG_NO_COMPRESSION - Allows userspace to
+ * hint that compression (CCS) should be disabled for the buffer being
+ * created. This can avoid unnecessary memory operations and CCS state
+ * management.
+ * On pre-Xe2 platforms, this flag is currently rejected as compression
+ * control is not supported via PAT index. On Xe2+ platforms, compression
+ * is controlled via PAT entries. If this flag is set, the driver will reject
+ * any VM bind that requests a PAT index enabling compression for this BO.
+ * Note: On dGPU platforms, there is currently no change in behavior with
+ * this flag, but future improvements may leverage it. The current benefit is
+ * primarily applicable to iGPU platforms.
*
* @cpu_caching supports the following values:
* - %DRM_XE_GEM_CPU_CACHING_WB - Allocate the pages with write-back
@@ -837,6 +858,7 @@ struct drm_xe_gem_create {
#define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING (1 << 0)
#define DRM_XE_GEM_CREATE_FLAG_SCANOUT (1 << 1)
#define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (1 << 2)
+#define DRM_XE_GEM_CREATE_FLAG_NO_COMPRESSION (1 << 3)
/**
* @flags: Flags, currently a mask of memory instances of where BO can
* be placed
@@ -1252,6 +1274,17 @@ struct drm_xe_vm_bind {
* Given that going into a power-saving state kills PXP HWDRM sessions,
* runtime PM will be blocked while queues of this type are alive.
* All PXP queues will be killed if a PXP invalidation event occurs.
+ * - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_GROUP - Create a multi-queue group
+ * or add secondary queues to a multi-queue group.
+ * If the extension's 'value' field has %DRM_XE_MULTI_GROUP_CREATE flag set,
+ * then a new multi-queue group is created with this queue as the primary queue
+ * (Q0). Otherwise, the queue gets added to the multi-queue group whose primary
+ * queue's exec_queue_id is specified in the lower 32 bits of the 'value' field.
+ * All the other non-relevant bits of extension's 'value' field while adding the
+ * primary or the secondary queues of the group must be set to 0.
+ * - %DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY - Set the queue
+ * priority within the multi-queue group. Current valid priority values are 0–2
+ * (default is 1), with higher values indicating higher priority.
*
* The example below shows how to use @drm_xe_exec_queue_create to create
* a simple exec_queue (no parallel submission) of class
@@ -1292,6 +1325,10 @@ struct drm_xe_exec_queue_create {
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PXP_TYPE 2
+#define DRM_XE_EXEC_QUEUE_SET_HANG_REPLAY_STATE 3
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_GROUP 4
+#define DRM_XE_MULTI_GROUP_CREATE (1ull << 63)
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY 5
/** @extensions: Pointer to the first extension struct, if any */
__u64 extensions;
@@ -1655,6 +1692,9 @@ enum drm_xe_oa_unit_type {
/** @DRM_XE_OA_UNIT_TYPE_OAM_SAG: OAM_SAG OA unit */
DRM_XE_OA_UNIT_TYPE_OAM_SAG,
+
+ /** @DRM_XE_OA_UNIT_TYPE_MERT: MERT OA unit */
+ DRM_XE_OA_UNIT_TYPE_MERT,
};
/**
@@ -1677,12 +1717,19 @@ struct drm_xe_oa_unit {
#define DRM_XE_OA_CAPS_OA_BUFFER_SIZE (1 << 2)
#define DRM_XE_OA_CAPS_WAIT_NUM_REPORTS (1 << 3)
#define DRM_XE_OA_CAPS_OAM (1 << 4)
+#define DRM_XE_OA_CAPS_OA_UNIT_GT_ID (1 << 5)
/** @oa_timestamp_freq: OA timestamp freq */
__u64 oa_timestamp_freq;
+ /** @gt_id: gt id for this OA unit */
+ __u16 gt_id;
+
+ /** @reserved1: MBZ */
+ __u16 reserved1[3];
+
/** @reserved: MBZ */
- __u64 reserved[4];
+ __u64 reserved[3];
/** @num_engines: number of engines in @eci array */
__u64 num_engines;
@@ -2072,7 +2119,13 @@ struct drm_xe_madvise {
struct {
#define DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE 0
#define DRM_XE_PREFERRED_LOC_DEFAULT_SYSTEM -1
- /** @preferred_mem_loc.devmem_fd: fd for preferred loc */
+ /**
+ * @preferred_mem_loc.devmem_fd:
+ * Device file-descriptor of the device where the
+ * preferred memory is located, or one of the
+ * above special values. Please also see
+ * @preferred_mem_loc.region_instance below.
+ */
__u32 devmem_fd;
#define DRM_XE_MIGRATE_ALL_PAGES 0
@@ -2080,8 +2133,14 @@ struct drm_xe_madvise {
/** @preferred_mem_loc.migration_policy: Page migration policy */
__u16 migration_policy;
- /** @preferred_mem_loc.pad : MBZ */
- __u16 pad;
+ /**
+ * @preferred_mem_loc.region_instance : Region instance.
+ * MBZ if @devmem_fd <= &DRM_XE_PREFERRED_LOC_DEFAULT_DEVICE.
+ * Otherwise should point to the desired device
+ * VRAM instance of the device indicated by
+ * @preferred_mem_loc.devmem_fd.
+ */
+ __u16 region_instance;
/** @preferred_mem_loc.reserved : Reserved */
__u64 reserved;
@@ -2274,6 +2333,30 @@ struct drm_xe_vm_query_mem_range_attr {
};
+/**
+ * struct drm_xe_exec_queue_set_property - exec queue set property
+ *
+ * Sets execution queue properties dynamically.
+ * Currently only %DRM_XE_EXEC_QUEUE_SET_PROPERTY_MULTI_QUEUE_PRIORITY
+ * property can be dynamically set.
+ */
+struct drm_xe_exec_queue_set_property {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @exec_queue_id: Exec queue ID */
+ __u32 exec_queue_id;
+
+ /** @property: property to set */
+ __u32 property;
+
+ /** @value: property value */
+ __u64 value;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index 5d1727a6d040..e72359370857 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -44,9 +44,13 @@
* - 1.16 - Add contiguous VRAM allocation flag
* - 1.17 - Add SDMA queue creation with target SDMA engine ID
* - 1.18 - Rename pad in set_memory_policy_args to misc_process_flag
+ * - 1.19 - Add a new ioctl to craete secondary kfd processes
+ * - 1.20 - Trap handler support for expert scheduling mode available
+ * - 1.21 - Debugger support to subscribe to LDS out-of-address exceptions
+ * - 1.22 - Add queue creation with metadata ring base address
*/
#define KFD_IOCTL_MAJOR_VERSION 1
-#define KFD_IOCTL_MINOR_VERSION 18
+#define KFD_IOCTL_MINOR_VERSION 22
struct kfd_ioctl_get_version_args {
__u32 major_version; /* from KFD */
@@ -84,7 +88,7 @@ struct kfd_ioctl_create_queue_args {
__u32 ctx_save_restore_size; /* to KFD */
__u32 ctl_stack_size; /* to KFD */
__u32 sdma_engine_id; /* to KFD */
- __u32 pad;
+ __u32 metadata_ring_size; /* to KFD */
};
struct kfd_ioctl_destroy_queue_args {
@@ -145,6 +149,8 @@ struct kfd_dbg_device_info_entry {
__u32 num_xcc;
__u32 capability;
__u32 debug_prop;
+ __u32 capability2;
+ __u32 pad;
};
/* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
@@ -945,6 +951,7 @@ enum kfd_dbg_trap_address_watch_mode {
enum kfd_dbg_trap_flags {
KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP = 1,
KFD_DBG_TRAP_FLAG_SINGLE_ALU_OP = 2,
+ KFD_DBG_TRAP_FLAG_LDS_OUT_OF_ADDR_RANGE = 4
};
/* Trap exceptions */
@@ -1671,7 +1678,10 @@ struct kfd_ioctl_dbg_trap_args {
#define AMDKFD_IOC_DBG_TRAP \
AMDKFD_IOWR(0x26, struct kfd_ioctl_dbg_trap_args)
+#define AMDKFD_IOC_CREATE_PROCESS \
+ AMDKFD_IO(0x27)
+
#define AMDKFD_COMMAND_START 0x01
-#define AMDKFD_COMMAND_END 0x27
+#define AMDKFD_COMMAND_END 0x28
#endif
diff --git a/include/uapi/linux/kfd_sysfs.h b/include/uapi/linux/kfd_sysfs.h
index 1125fe47959f..0b6ce2f3c887 100644
--- a/include/uapi/linux/kfd_sysfs.h
+++ b/include/uapi/linux/kfd_sysfs.h
@@ -64,7 +64,8 @@
#define HSA_CAP_RESERVED 0x000f8000
#define HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED 0x00000001
-#define HSA_CAP2_RESERVED 0xfffffffe
+#define HSA_CAP2_TRAP_DEBUG_LDS_OUT_OF_ADDR_RANGE_SUPPORTED 0x00000002
+#define HSA_CAP2_RESERVED 0xfffffffc
/* debug_prop bits in node properties */
#define HSA_DBG_WATCH_ADDR_MASK_LO_BIT_MASK 0x0000000f