diff options
Diffstat (limited to 'drivers/accel/ivpu')
25 files changed, 1303 insertions, 522 deletions
diff --git a/drivers/accel/ivpu/Makefile b/drivers/accel/ivpu/Makefile index 1029e0bab061..dbf76b8a5b4c 100644 --- a/drivers/accel/ivpu/Makefile +++ b/drivers/accel/ivpu/Makefile @@ -6,6 +6,7 @@ intel_vpu-y := \ ivpu_fw.o \ ivpu_fw_log.o \ ivpu_gem.o \ + ivpu_gem_userptr.o \ ivpu_hw.o \ ivpu_hw_btrs.o \ ivpu_hw_ip.o \ diff --git a/drivers/accel/ivpu/ivpu_debugfs.c b/drivers/accel/ivpu/ivpu_debugfs.c index cd24ccd20ba6..3bd85ee6c26b 100644 --- a/drivers/accel/ivpu/ivpu_debugfs.c +++ b/drivers/accel/ivpu/ivpu_debugfs.c @@ -398,35 +398,25 @@ static int dct_active_set(void *data, u64 active_percent) DEFINE_DEBUGFS_ATTRIBUTE(ivpu_dct_fops, dct_active_get, dct_active_set, "%llu\n"); +static void print_priority_band(struct seq_file *s, struct ivpu_hw_info *hw, + int band, const char *name) +{ + seq_printf(s, "%-9s: grace_period %9u process_grace_period %9u process_quantum %9u\n", + name, + hw->hws.grace_period[band], + hw->hws.process_grace_period[band], + hw->hws.process_quantum[band]); +} + static int priority_bands_show(struct seq_file *s, void *v) { struct ivpu_device *vdev = s->private; struct ivpu_hw_info *hw = vdev->hw; - for (int band = VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE; - band < VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT; band++) { - switch (band) { - case VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE: - seq_puts(s, "Idle: "); - break; - - case VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL: - seq_puts(s, "Normal: "); - break; - - case VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS: - seq_puts(s, "Focus: "); - break; - - case VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME: - seq_puts(s, "Realtime: "); - break; - } - - seq_printf(s, "grace_period %9u process_grace_period %9u process_quantum %9u\n", - hw->hws.grace_period[band], hw->hws.process_grace_period[band], - hw->hws.process_quantum[band]); - } + print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE, "Idle"); + print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL, "Normal"); + print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS, "Focus"); + print_priority_band(s, hw, VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME, "Realtime"); return 0; } diff --git a/drivers/accel/ivpu/ivpu_drv.c b/drivers/accel/ivpu/ivpu_drv.c index 3289751b4757..3d6fccdefdd6 100644 --- a/drivers/accel/ivpu/ivpu_drv.c +++ b/drivers/accel/ivpu/ivpu_drv.c @@ -57,7 +57,7 @@ MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set NPU frequency"); int ivpu_sched_mode = IVPU_SCHED_MODE_AUTO; module_param_named(sched_mode, ivpu_sched_mode, int, 0444); -MODULE_PARM_DESC(sched_mode, "Scheduler mode: -1 - Use default scheduler, 0 - Use OS scheduler, 1 - Use HW scheduler"); +MODULE_PARM_DESC(sched_mode, "Scheduler mode: -1 - Use default scheduler, 0 - Use OS scheduler (supported on 27XX - 50XX), 1 - Use HW scheduler"); bool ivpu_disable_mmu_cont_pages; module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0444); @@ -134,6 +134,8 @@ bool ivpu_is_capable(struct ivpu_device *vdev, u32 capability) return true; case DRM_IVPU_CAP_DMA_MEMORY_RANGE: return true; + case DRM_IVPU_CAP_BO_CREATE_FROM_USERPTR: + return true; case DRM_IVPU_CAP_MANAGE_CMDQ: return vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW; default: @@ -200,6 +202,9 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f case DRM_IVPU_PARAM_CAPABILITIES: args->value = ivpu_is_capable(vdev, args->index); break; + case DRM_IVPU_PARAM_PREEMPT_BUFFER_SIZE: + args->value = ivpu_fw_preempt_buf_size(vdev); + break; default: ret = -EINVAL; break; @@ -310,6 +315,7 @@ static const struct drm_ioctl_desc ivpu_drm_ioctls[] = { DRM_IOCTL_DEF_DRV(IVPU_CMDQ_CREATE, ivpu_cmdq_create_ioctl, 0), DRM_IOCTL_DEF_DRV(IVPU_CMDQ_DESTROY, ivpu_cmdq_destroy_ioctl, 0), DRM_IOCTL_DEF_DRV(IVPU_CMDQ_SUBMIT, ivpu_cmdq_submit_ioctl, 0), + DRM_IOCTL_DEF_DRV(IVPU_BO_CREATE_FROM_USERPTR, ivpu_bo_create_from_userptr_ioctl, 0), }; static int ivpu_wait_for_ready(struct ivpu_device *vdev) @@ -377,8 +383,7 @@ int ivpu_boot(struct ivpu_device *vdev) drm_WARN_ON(&vdev->drm, atomic_read(&vdev->job_timeout_counter)); drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa)); - /* Update boot params located at first 4KB of FW memory */ - ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem)); + ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem_bp)); ret = ivpu_hw_boot_fw(vdev); if (ret) { @@ -450,6 +455,9 @@ int ivpu_shutdown(struct ivpu_device *vdev) static const struct file_operations ivpu_fops = { .owner = THIS_MODULE, DRM_ACCEL_FOPS, +#ifdef CONFIG_PROC_FS + .show_fdinfo = drm_show_fdinfo, +#endif }; static const struct drm_driver driver = { @@ -464,6 +472,9 @@ static const struct drm_driver driver = { .ioctls = ivpu_drm_ioctls, .num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls), .fops = &ivpu_fops, +#ifdef CONFIG_PROC_FS + .show_fdinfo = drm_show_memory_stats, +#endif .name = DRIVER_NAME, .desc = DRIVER_DESC, @@ -705,6 +716,7 @@ static struct pci_device_id ivpu_pci_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PTL_P) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_WCL) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_NVL) }, { } }; MODULE_DEVICE_TABLE(pci, ivpu_pci_ids); diff --git a/drivers/accel/ivpu/ivpu_drv.h b/drivers/accel/ivpu/ivpu_drv.h index 62ab1c654e63..5b34b6f50e69 100644 --- a/drivers/accel/ivpu/ivpu_drv.h +++ b/drivers/accel/ivpu/ivpu_drv.h @@ -27,6 +27,7 @@ #define PCI_DEVICE_ID_LNL 0x643e #define PCI_DEVICE_ID_PTL_P 0xb03e #define PCI_DEVICE_ID_WCL 0xfd3e +#define PCI_DEVICE_ID_NVL 0xd71d #define IVPU_HW_IP_37XX 37 #define IVPU_HW_IP_40XX 40 @@ -78,6 +79,7 @@ #define IVPU_DBG_KREF BIT(11) #define IVPU_DBG_RPM BIT(12) #define IVPU_DBG_MMU_MAP BIT(13) +#define IVPU_DBG_IOCTL BIT(14) #define ivpu_err(vdev, fmt, ...) \ drm_err(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__) @@ -245,6 +247,8 @@ static inline int ivpu_hw_ip_gen(struct ivpu_device *vdev) case PCI_DEVICE_ID_PTL_P: case PCI_DEVICE_ID_WCL: return IVPU_HW_IP_50XX; + case PCI_DEVICE_ID_NVL: + return IVPU_HW_IP_60XX; default: dump_stack(); ivpu_err(vdev, "Unknown NPU IP generation\n"); @@ -261,6 +265,7 @@ static inline int ivpu_hw_btrs_gen(struct ivpu_device *vdev) case PCI_DEVICE_ID_LNL: case PCI_DEVICE_ID_PTL_P: case PCI_DEVICE_ID_WCL: + case PCI_DEVICE_ID_NVL: return IVPU_HW_BTRS_LNL; default: dump_stack(); diff --git a/drivers/accel/ivpu/ivpu_fw.c b/drivers/accel/ivpu/ivpu_fw.c index 9db741695401..48386d2cddbb 100644 --- a/drivers/accel/ivpu/ivpu_fw.c +++ b/drivers/accel/ivpu/ivpu_fw.c @@ -17,15 +17,10 @@ #include "ivpu_ipc.h" #include "ivpu_pm.h" -#define FW_GLOBAL_MEM_START (2ull * SZ_1G) -#define FW_GLOBAL_MEM_END (3ull * SZ_1G) -#define FW_SHARED_MEM_SIZE SZ_256M /* Must be aligned to FW_SHARED_MEM_ALIGNMENT */ -#define FW_SHARED_MEM_ALIGNMENT SZ_128K /* VPU MTRR limitation */ -#define FW_RUNTIME_MAX_SIZE SZ_512M #define FW_SHAVE_NN_MAX_SIZE SZ_2M -#define FW_RUNTIME_MIN_ADDR (FW_GLOBAL_MEM_START) -#define FW_RUNTIME_MAX_ADDR (FW_GLOBAL_MEM_END - FW_SHARED_MEM_SIZE) #define FW_FILE_IMAGE_OFFSET (VPU_FW_HEADER_SIZE + FW_VERSION_HEADER_SIZE) +#define FW_PREEMPT_BUF_MIN_SIZE SZ_4K +#define FW_PREEMPT_BUF_MAX_SIZE SZ_32M #define WATCHDOG_MSS_REDIRECT 32 #define WATCHDOG_NCE_REDIRECT 33 @@ -61,12 +56,14 @@ static struct { { IVPU_HW_IP_40XX, "intel/vpu/vpu_40xx_v0.0.bin" }, { IVPU_HW_IP_50XX, "intel/vpu/vpu_50xx_v1.bin" }, { IVPU_HW_IP_50XX, "intel/vpu/vpu_50xx_v0.0.bin" }, + { IVPU_HW_IP_60XX, "intel/vpu/vpu_60xx_v1.bin" }, }; /* Production fw_names from the table above */ MODULE_FIRMWARE("intel/vpu/vpu_37xx_v1.bin"); MODULE_FIRMWARE("intel/vpu/vpu_40xx_v1.bin"); MODULE_FIRMWARE("intel/vpu/vpu_50xx_v1.bin"); +MODULE_FIRMWARE("intel/vpu/vpu_60xx_v1.bin"); static int ivpu_fw_request(struct ivpu_device *vdev) { @@ -131,9 +128,14 @@ ivpu_fw_check_api_ver_lt(struct ivpu_device *vdev, const struct vpu_firmware_hea return false; } -static bool is_within_range(u64 addr, size_t size, u64 range_start, size_t range_size) +bool ivpu_is_within_range(u64 addr, size_t size, struct ivpu_addr_range *range) { - if (addr < range_start || addr + size > range_start + range_size) + u64 addr_end; + + if (!range || check_add_overflow(addr, size, &addr_end)) + return false; + + if (addr < range->start || addr_end > range->end) return false; return true; @@ -142,6 +144,12 @@ static bool is_within_range(u64 addr, size_t size, u64 range_start, size_t range static u32 ivpu_fw_sched_mode_select(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr) { + if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_60XX && + ivpu_sched_mode == VPU_SCHEDULING_MODE_OS) { + ivpu_warn(vdev, "OS sched mode is not supported, using HW mode\n"); + return VPU_SCHEDULING_MODE_HW; + } + if (ivpu_sched_mode != IVPU_SCHED_MODE_AUTO) return ivpu_sched_mode; @@ -151,11 +159,56 @@ ivpu_fw_sched_mode_select(struct ivpu_device *vdev, const struct vpu_firmware_he return VPU_SCHEDULING_MODE_HW; } +static void +ivpu_preemption_config_parse(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr) +{ + struct ivpu_fw_info *fw = vdev->fw; + u32 primary_preempt_buf_size, secondary_preempt_buf_size; + + if (fw_hdr->preemption_buffer_1_max_size) + primary_preempt_buf_size = fw_hdr->preemption_buffer_1_max_size; + else + primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size; + + if (fw_hdr->preemption_buffer_2_max_size) + secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_max_size; + else + secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size; + + ivpu_dbg(vdev, FW_BOOT, "Preemption buffer size, primary: %u, secondary: %u\n", + primary_preempt_buf_size, secondary_preempt_buf_size); + + if (primary_preempt_buf_size < FW_PREEMPT_BUF_MIN_SIZE || + secondary_preempt_buf_size < FW_PREEMPT_BUF_MIN_SIZE) { + ivpu_warn(vdev, "Preemption buffers size too small\n"); + return; + } + + if (primary_preempt_buf_size > FW_PREEMPT_BUF_MAX_SIZE || + secondary_preempt_buf_size > FW_PREEMPT_BUF_MAX_SIZE) { + ivpu_warn(vdev, "Preemption buffers size too big\n"); + return; + } + + if (fw->sched_mode != VPU_SCHEDULING_MODE_HW) + return; + + if (ivpu_test_mode & IVPU_TEST_MODE_MIP_DISABLE) + return; + + vdev->fw->primary_preempt_buf_size = ALIGN(primary_preempt_buf_size, PAGE_SIZE); + vdev->fw->secondary_preempt_buf_size = ALIGN(secondary_preempt_buf_size, PAGE_SIZE); +} + static int ivpu_fw_parse(struct ivpu_device *vdev) { struct ivpu_fw_info *fw = vdev->fw; const struct vpu_firmware_header *fw_hdr = (const void *)fw->file->data; - u64 runtime_addr, image_load_addr, runtime_size, image_size; + struct ivpu_addr_range fw_image_range; + u64 boot_params_addr, boot_params_size; + u64 fw_version_addr, fw_version_size; + u64 runtime_addr, runtime_size; + u64 image_load_addr, image_size; if (fw->file->size <= FW_FILE_IMAGE_OFFSET) { ivpu_err(vdev, "Firmware file is too small: %zu\n", fw->file->size); @@ -167,18 +220,37 @@ static int ivpu_fw_parse(struct ivpu_device *vdev) return -EINVAL; } - runtime_addr = fw_hdr->boot_params_load_address; - runtime_size = fw_hdr->runtime_size; - image_load_addr = fw_hdr->image_load_address; - image_size = fw_hdr->image_size; + boot_params_addr = fw_hdr->boot_params_load_address; + boot_params_size = SZ_4K; - if (runtime_addr < FW_RUNTIME_MIN_ADDR || runtime_addr > FW_RUNTIME_MAX_ADDR) { - ivpu_err(vdev, "Invalid firmware runtime address: 0x%llx\n", runtime_addr); + if (!ivpu_is_within_range(boot_params_addr, boot_params_size, &vdev->hw->ranges.runtime)) { + ivpu_err(vdev, "Invalid boot params address: 0x%llx\n", boot_params_addr); return -EINVAL; } - if (runtime_size < fw->file->size || runtime_size > FW_RUNTIME_MAX_SIZE) { - ivpu_err(vdev, "Invalid firmware runtime size: %llu\n", runtime_size); + fw_version_addr = fw_hdr->firmware_version_load_address; + fw_version_size = ALIGN(fw_hdr->firmware_version_size, SZ_4K); + + if (fw_version_size != SZ_4K) { + ivpu_err(vdev, "Invalid firmware version size: %u\n", + fw_hdr->firmware_version_size); + return -EINVAL; + } + + if (!ivpu_is_within_range(fw_version_addr, fw_version_size, &vdev->hw->ranges.runtime)) { + ivpu_err(vdev, "Invalid firmware version address: 0x%llx\n", fw_version_addr); + return -EINVAL; + } + + runtime_addr = fw_hdr->image_load_address; + runtime_size = fw_hdr->runtime_size - boot_params_size - fw_version_size; + + image_load_addr = fw_hdr->image_load_address; + image_size = fw_hdr->image_size; + + if (!ivpu_is_within_range(runtime_addr, runtime_size, &vdev->hw->ranges.runtime)) { + ivpu_err(vdev, "Invalid firmware runtime address: 0x%llx and size %llu\n", + runtime_addr, runtime_size); return -EINVAL; } @@ -187,23 +259,25 @@ static int ivpu_fw_parse(struct ivpu_device *vdev) return -EINVAL; } - if (image_load_addr < runtime_addr || - image_load_addr + image_size > runtime_addr + runtime_size) { - ivpu_err(vdev, "Invalid firmware load address size: 0x%llx and size %llu\n", + if (!ivpu_is_within_range(image_load_addr, image_size, &vdev->hw->ranges.runtime)) { + ivpu_err(vdev, "Invalid firmware load address: 0x%llx and size %llu\n", image_load_addr, image_size); return -EINVAL; } - if (fw_hdr->shave_nn_fw_size > FW_SHAVE_NN_MAX_SIZE) { - ivpu_err(vdev, "SHAVE NN firmware is too big: %u\n", fw_hdr->shave_nn_fw_size); + if (ivpu_hw_range_init(vdev, &fw_image_range, image_load_addr, image_size)) return -EINVAL; - } - if (fw_hdr->entry_point < image_load_addr || - fw_hdr->entry_point >= image_load_addr + image_size) { + if (!ivpu_is_within_range(fw_hdr->entry_point, SZ_4K, &fw_image_range)) { ivpu_err(vdev, "Invalid entry point: 0x%llx\n", fw_hdr->entry_point); return -EINVAL; } + + if (fw_hdr->shave_nn_fw_size > FW_SHAVE_NN_MAX_SIZE) { + ivpu_err(vdev, "SHAVE NN firmware is too big: %u\n", fw_hdr->shave_nn_fw_size); + return -EINVAL; + } + ivpu_dbg(vdev, FW_BOOT, "Header version: 0x%x, format 0x%x\n", fw_hdr->header_version, fw_hdr->image_format); @@ -217,6 +291,10 @@ static int ivpu_fw_parse(struct ivpu_device *vdev) if (IVPU_FW_CHECK_API_COMPAT(vdev, fw_hdr, JSM, 3)) return -EINVAL; + fw->boot_params_addr = boot_params_addr; + fw->boot_params_size = boot_params_size; + fw->fw_version_addr = fw_version_addr; + fw->fw_version_size = fw_version_size; fw->runtime_addr = runtime_addr; fw->runtime_size = runtime_size; fw->image_load_offset = image_load_addr - runtime_addr; @@ -235,22 +313,13 @@ static int ivpu_fw_parse(struct ivpu_device *vdev) fw->sched_mode = ivpu_fw_sched_mode_select(vdev, fw_hdr); ivpu_info(vdev, "Scheduler mode: %s\n", fw->sched_mode ? "HW" : "OS"); - if (fw_hdr->preemption_buffer_1_max_size) - fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_max_size; - else - fw->primary_preempt_buf_size = fw_hdr->preemption_buffer_1_size; + ivpu_preemption_config_parse(vdev, fw_hdr); + ivpu_dbg(vdev, FW_BOOT, "Mid-inference preemption %s supported\n", + ivpu_fw_preempt_buf_size(vdev) ? "is" : "is not"); - if (fw_hdr->preemption_buffer_2_max_size) - fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_max_size; - else - fw->secondary_preempt_buf_size = fw_hdr->preemption_buffer_2_size; - ivpu_dbg(vdev, FW_BOOT, "Preemption buffer sizes: primary %u, secondary %u\n", - fw->primary_preempt_buf_size, fw->secondary_preempt_buf_size); - - if (fw_hdr->ro_section_start_address && !is_within_range(fw_hdr->ro_section_start_address, - fw_hdr->ro_section_size, - fw_hdr->image_load_address, - fw_hdr->image_size)) { + if (fw_hdr->ro_section_start_address && + !ivpu_is_within_range(fw_hdr->ro_section_start_address, fw_hdr->ro_section_size, + &fw_image_range)) { ivpu_err(vdev, "Invalid read-only section: start address 0x%llx, size %u\n", fw_hdr->ro_section_start_address, fw_hdr->ro_section_size); return -EINVAL; @@ -259,12 +328,18 @@ static int ivpu_fw_parse(struct ivpu_device *vdev) fw->read_only_addr = fw_hdr->ro_section_start_address; fw->read_only_size = fw_hdr->ro_section_size; - ivpu_dbg(vdev, FW_BOOT, "Size: file %lu image %u runtime %u shavenn %u\n", - fw->file->size, fw->image_size, fw->runtime_size, fw->shave_nn_size); - ivpu_dbg(vdev, FW_BOOT, "Address: runtime 0x%llx, load 0x%llx, entry point 0x%llx\n", - fw->runtime_addr, image_load_addr, fw->entry_point); + ivpu_dbg(vdev, FW_BOOT, "Boot params: address 0x%llx, size %llu\n", + fw->boot_params_addr, fw->boot_params_size); + ivpu_dbg(vdev, FW_BOOT, "FW version: address 0x%llx, size %llu\n", + fw->fw_version_addr, fw->fw_version_size); + ivpu_dbg(vdev, FW_BOOT, "Runtime: address 0x%llx, size %u\n", + fw->runtime_addr, fw->runtime_size); + ivpu_dbg(vdev, FW_BOOT, "Image load offset: 0x%llx, size %u\n", + fw->image_load_offset, fw->image_size); ivpu_dbg(vdev, FW_BOOT, "Read-only section: address 0x%llx, size %u\n", fw->read_only_addr, fw->read_only_size); + ivpu_dbg(vdev, FW_BOOT, "FW entry point: 0x%llx\n", fw->entry_point); + ivpu_dbg(vdev, FW_BOOT, "SHAVE NN size: %u\n", fw->shave_nn_size); return 0; } @@ -291,39 +366,33 @@ ivpu_fw_init_wa(struct ivpu_device *vdev) IVPU_PRINT_WA(disable_d0i3_msg); } -static int ivpu_fw_update_global_range(struct ivpu_device *vdev) -{ - struct ivpu_fw_info *fw = vdev->fw; - u64 start = ALIGN(fw->runtime_addr + fw->runtime_size, FW_SHARED_MEM_ALIGNMENT); - u64 size = FW_SHARED_MEM_SIZE; - - if (start + size > FW_GLOBAL_MEM_END) { - ivpu_err(vdev, "No space for shared region, start %lld, size %lld\n", start, size); - return -EINVAL; - } - - ivpu_hw_range_init(&vdev->hw->ranges.global, start, size); - return 0; -} - static int ivpu_fw_mem_init(struct ivpu_device *vdev) { struct ivpu_fw_info *fw = vdev->fw; - struct ivpu_addr_range fw_range; int log_verb_size; int ret; - ret = ivpu_fw_update_global_range(vdev); - if (ret) - return ret; + fw->mem_bp = ivpu_bo_create_runtime(vdev, fw->boot_params_addr, fw->boot_params_size, + DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE); + if (!fw->mem_bp) { + ivpu_err(vdev, "Failed to create firmware boot params memory buffer\n"); + return -ENOMEM; + } - fw_range.start = fw->runtime_addr; - fw_range.end = fw->runtime_addr + fw->runtime_size; - fw->mem = ivpu_bo_create(vdev, &vdev->gctx, &fw_range, fw->runtime_size, - DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE); + fw->mem_fw_ver = ivpu_bo_create_runtime(vdev, fw->fw_version_addr, fw->fw_version_size, + DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE); + if (!fw->mem_fw_ver) { + ivpu_err(vdev, "Failed to create firmware version memory buffer\n"); + ret = -ENOMEM; + goto err_free_bp; + } + + fw->mem = ivpu_bo_create_runtime(vdev, fw->runtime_addr, fw->runtime_size, + DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE); if (!fw->mem) { ivpu_err(vdev, "Failed to create firmware runtime memory buffer\n"); - return -ENOMEM; + ret = -ENOMEM; + goto err_free_fw_ver; } ret = ivpu_mmu_context_set_pages_ro(vdev, &vdev->gctx, fw->read_only_addr, @@ -372,6 +441,10 @@ err_free_log_crit: ivpu_bo_free(fw->mem_log_crit); err_free_fw_mem: ivpu_bo_free(fw->mem); +err_free_fw_ver: + ivpu_bo_free(fw->mem_fw_ver); +err_free_bp: + ivpu_bo_free(fw->mem_bp); return ret; } @@ -387,10 +460,14 @@ static void ivpu_fw_mem_fini(struct ivpu_device *vdev) ivpu_bo_free(fw->mem_log_verb); ivpu_bo_free(fw->mem_log_crit); ivpu_bo_free(fw->mem); + ivpu_bo_free(fw->mem_fw_ver); + ivpu_bo_free(fw->mem_bp); fw->mem_log_verb = NULL; fw->mem_log_crit = NULL; fw->mem = NULL; + fw->mem_fw_ver = NULL; + fw->mem_bp = NULL; } int ivpu_fw_init(struct ivpu_device *vdev) @@ -483,11 +560,6 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_ ivpu_dbg(vdev, FW_BOOT, "boot_params.cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg = 0x%x\n", boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg); - ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_base = 0x%llx\n", - boot_params->global_memory_allocator_base); - ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_size = 0x%x\n", - boot_params->global_memory_allocator_size); - ivpu_dbg(vdev, FW_BOOT, "boot_params.shave_nn_fw_base = 0x%llx\n", boot_params->shave_nn_fw_base); @@ -495,10 +567,6 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_ boot_params->watchdog_irq_mss); ivpu_dbg(vdev, FW_BOOT, "boot_params.watchdog_irq_nce = 0x%x\n", boot_params->watchdog_irq_nce); - ivpu_dbg(vdev, FW_BOOT, "boot_params.host_to_vpu_irq = 0x%x\n", - boot_params->host_to_vpu_irq); - ivpu_dbg(vdev, FW_BOOT, "boot_params.job_done_irq = 0x%x\n", - boot_params->job_done_irq); ivpu_dbg(vdev, FW_BOOT, "boot_params.host_version_id = 0x%x\n", boot_params->host_version_id); @@ -546,6 +614,8 @@ static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_ boot_params->system_time_us); ivpu_dbg(vdev, FW_BOOT, "boot_params.power_profile = 0x%x\n", boot_params->power_profile); + ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_uses_ecc_mca_signal = 0x%x\n", + boot_params->vpu_uses_ecc_mca_signal); } void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *boot_params) @@ -572,6 +642,7 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params return; } + memset(boot_params, 0, sizeof(*boot_params)); vdev->pm->is_warmboot = false; boot_params->magic = VPU_BOOT_PARAMS_MAGIC; @@ -647,6 +718,8 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params boot_params->d0i3_entry_vpu_ts = 0; if (IVPU_WA(disable_d0i2)) boot_params->power_profile |= BIT(1); + boot_params->vpu_uses_ecc_mca_signal = + ivpu_hw_uses_ecc_mca_signal(vdev) ? VPU_BOOT_MCA_ECC_BOTH : 0; boot_params->system_time_us = ktime_to_us(ktime_get_real()); wmb(); /* Flush WC buffers after writing bootparams */ diff --git a/drivers/accel/ivpu/ivpu_fw.h b/drivers/accel/ivpu/ivpu_fw.h index 7081913fb0dd..00945892b55e 100644 --- a/drivers/accel/ivpu/ivpu_fw.h +++ b/drivers/accel/ivpu/ivpu_fw.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (C) 2020-2024 Intel Corporation + * Copyright (C) 2020-2025 Intel Corporation */ #ifndef __IVPU_FW_H__ @@ -19,10 +19,16 @@ struct ivpu_fw_info { const struct firmware *file; const char *name; char version[FW_VERSION_STR_SIZE]; + struct ivpu_bo *mem_bp; + struct ivpu_bo *mem_fw_ver; struct ivpu_bo *mem; struct ivpu_bo *mem_shave_nn; struct ivpu_bo *mem_log_crit; struct ivpu_bo *mem_log_verb; + u64 boot_params_addr; + u64 boot_params_size; + u64 fw_version_addr; + u64 fw_version_size; u64 runtime_addr; u32 runtime_size; u64 image_load_offset; @@ -42,6 +48,7 @@ struct ivpu_fw_info { u64 last_heartbeat; }; +bool ivpu_is_within_range(u64 addr, size_t size, struct ivpu_addr_range *range); int ivpu_fw_init(struct ivpu_device *vdev); void ivpu_fw_fini(struct ivpu_device *vdev); void ivpu_fw_load(struct ivpu_device *vdev); @@ -52,4 +59,9 @@ static inline bool ivpu_fw_is_cold_boot(struct ivpu_device *vdev) return vdev->fw->entry_point == vdev->fw->cold_boot_entry_point; } +static inline u32 ivpu_fw_preempt_buf_size(struct ivpu_device *vdev) +{ + return vdev->fw->primary_preempt_buf_size + vdev->fw->secondary_preempt_buf_size; +} + #endif /* __IVPU_FW_H__ */ diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c index 59cfcf3eaded..ece68f570b7e 100644 --- a/drivers/accel/ivpu/ivpu_gem.c +++ b/drivers/accel/ivpu/ivpu_gem.c @@ -15,6 +15,7 @@ #include <drm/drm_utils.h> #include "ivpu_drv.h" +#include "ivpu_fw.h" #include "ivpu_gem.h" #include "ivpu_hw.h" #include "ivpu_mmu.h" @@ -27,8 +28,8 @@ static const struct drm_gem_object_funcs ivpu_gem_funcs; static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, const char *action) { ivpu_dbg(vdev, BO, - "%6s: bo %8p vpu_addr %9llx size %8zu ctx %d has_pages %d dma_mapped %d mmu_mapped %d wc %d imported %d\n", - action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx_id, + "%6s: bo %8p size %9zu ctx %d vpu_addr %9llx pages %d sgt %d mmu_mapped %d wc %d imported %d\n", + action, bo, ivpu_bo_size(bo), bo->ctx_id, bo->vpu_addr, (bool)bo->base.pages, (bool)bo->base.sgt, bo->mmu_mapped, bo->base.map_wc, (bool)drm_gem_is_imported(&bo->base.base)); } @@ -43,22 +44,47 @@ static inline void ivpu_bo_unlock(struct ivpu_bo *bo) dma_resv_unlock(bo->base.base.resv); } +static struct sg_table *ivpu_bo_map_attachment(struct ivpu_device *vdev, struct ivpu_bo *bo) +{ + struct sg_table *sgt; + + drm_WARN_ON(&vdev->drm, !bo->base.base.import_attach); + + ivpu_bo_lock(bo); + + sgt = bo->base.sgt; + if (!sgt) { + sgt = dma_buf_map_attachment(bo->base.base.import_attach, DMA_BIDIRECTIONAL); + if (IS_ERR(sgt)) + ivpu_err(vdev, "Failed to map BO in IOMMU: %ld\n", PTR_ERR(sgt)); + else + bo->base.sgt = sgt; + } + + ivpu_bo_unlock(bo); + + return sgt; +} + /* - * ivpu_bo_pin() - pin the backing physical pages and map them to VPU. + * ivpu_bo_bind() - pin the backing physical pages and map them to VPU. * * This function pins physical memory pages, then maps the physical pages * to IOMMU address space and finally updates the VPU MMU page tables * to allow the VPU to translate VPU address to IOMMU address. */ -int __must_check ivpu_bo_pin(struct ivpu_bo *bo) +int __must_check ivpu_bo_bind(struct ivpu_bo *bo) { struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); struct sg_table *sgt; int ret = 0; - ivpu_dbg_bo(vdev, bo, "pin"); + ivpu_dbg_bo(vdev, bo, "bind"); - sgt = drm_gem_shmem_get_pages_sgt(&bo->base); + if (bo->base.base.import_attach) + sgt = ivpu_bo_map_attachment(vdev, bo); + else + sgt = drm_gem_shmem_get_pages_sgt(&bo->base); if (IS_ERR(sgt)) { ret = PTR_ERR(sgt); ivpu_err(vdev, "Failed to map BO in IOMMU: %d\n", ret); @@ -70,7 +96,7 @@ int __must_check ivpu_bo_pin(struct ivpu_bo *bo) if (!bo->mmu_mapped) { drm_WARN_ON(&vdev->drm, !bo->ctx); ret = ivpu_mmu_context_map_sgt(vdev, bo->ctx, bo->vpu_addr, sgt, - ivpu_bo_is_snooped(bo)); + ivpu_bo_is_snooped(bo), ivpu_bo_is_read_only(bo)); if (ret) { ivpu_err(vdev, "Failed to map BO in MMU: %d\n", ret); goto unlock; @@ -99,9 +125,9 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx, ret = ivpu_mmu_context_insert_node(ctx, range, ivpu_bo_size(bo), &bo->mm_node); if (!ret) { bo->ctx = ctx; + bo->ctx_id = ctx->id; bo->vpu_addr = bo->mm_node.start; - } else { - ivpu_err(vdev, "Failed to add BO to context %u: %d\n", ctx->id, ret); + ivpu_dbg_bo(vdev, bo, "vaddr"); } ivpu_bo_unlock(bo); @@ -115,7 +141,7 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo) { struct ivpu_device *vdev = ivpu_bo_to_vdev(bo); - lockdep_assert(dma_resv_held(bo->base.base.resv) || !kref_read(&bo->base.base.refcount)); + dma_resv_assert_held(bo->base.base.resv); if (bo->mmu_mapped) { drm_WARN_ON(&vdev->drm, !bo->ctx); @@ -130,13 +156,15 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo) bo->ctx = NULL; } - if (drm_gem_is_imported(&bo->base.base)) - return; - if (bo->base.sgt) { - dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0); - sg_free_table(bo->base.sgt); - kfree(bo->base.sgt); + if (bo->base.base.import_attach) { + dma_buf_unmap_attachment(bo->base.base.import_attach, + bo->base.sgt, DMA_BIDIRECTIONAL); + } else { + dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0); + sg_free_table(bo->base.sgt); + kfree(bo->base.sgt); + } bo->base.sgt = NULL; } } @@ -182,10 +210,11 @@ struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t siz struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) { + struct ivpu_device *vdev = to_ivpu_device(dev); struct device *attach_dev = dev->dev; struct dma_buf_attachment *attach; - struct sg_table *sgt; struct drm_gem_object *obj; + struct ivpu_bo *bo; int ret; attach = dma_buf_attach(dma_buf, attach_dev); @@ -194,25 +223,25 @@ struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, get_dma_buf(dma_buf); - sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL); - if (IS_ERR(sgt)) { - ret = PTR_ERR(sgt); - goto fail_detach; - } - - obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt); + obj = drm_gem_shmem_prime_import_sg_table(dev, attach, NULL); if (IS_ERR(obj)) { ret = PTR_ERR(obj); - goto fail_unmap; + goto fail_detach; } obj->import_attach = attach; obj->resv = dma_buf->resv; + bo = to_ivpu_bo(obj); + + mutex_lock(&vdev->bo_list_lock); + list_add_tail(&bo->bo_list_node, &vdev->bo_list); + mutex_unlock(&vdev->bo_list_lock); + + ivpu_dbg(vdev, BO, "import: bo %8p size %9zu\n", bo, ivpu_bo_size(bo)); + return obj; -fail_unmap: - dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL); fail_detach: dma_buf_detach(dma_buf, attach); dma_buf_put(dma_buf); @@ -220,7 +249,7 @@ fail_detach: return ERR_PTR(ret); } -static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags, u32 ctx_id) +static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 flags) { struct drm_gem_shmem_object *shmem; struct ivpu_bo *bo; @@ -238,7 +267,6 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla return ERR_CAST(shmem); bo = to_ivpu_bo(&shmem->base); - bo->ctx_id = ctx_id; bo->base.map_wc = flags & DRM_IVPU_BO_WC; bo->flags = flags; @@ -246,7 +274,7 @@ static struct ivpu_bo *ivpu_bo_alloc(struct ivpu_device *vdev, u64 size, u32 fla list_add_tail(&bo->bo_list_node, &vdev->bo_list); mutex_unlock(&vdev->bo_list_lock); - ivpu_dbg_bo(vdev, bo, "alloc"); + ivpu_dbg(vdev, BO, " alloc: bo %8p size %9llu\n", bo, size); return bo; } @@ -259,8 +287,8 @@ static int ivpu_gem_bo_open(struct drm_gem_object *obj, struct drm_file *file) struct ivpu_addr_range *range; if (bo->ctx) { - ivpu_warn(vdev, "Can't add BO to ctx %u: already in ctx %u\n", - file_priv->ctx.id, bo->ctx->id); + ivpu_dbg(vdev, IOCTL, "Can't add BO %pe to ctx %u: already in ctx %u\n", + bo, file_priv->ctx.id, bo->ctx->id); return -EALREADY; } @@ -281,23 +309,41 @@ static void ivpu_gem_bo_free(struct drm_gem_object *obj) ivpu_dbg_bo(vdev, bo, "free"); + drm_WARN_ON(&vdev->drm, list_empty(&bo->bo_list_node)); + mutex_lock(&vdev->bo_list_lock); list_del(&bo->bo_list_node); - mutex_unlock(&vdev->bo_list_lock); drm_WARN_ON(&vdev->drm, !drm_gem_is_imported(&bo->base.base) && !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ)); drm_WARN_ON(&vdev->drm, ivpu_bo_size(bo) == 0); drm_WARN_ON(&vdev->drm, bo->base.vaddr); + ivpu_bo_lock(bo); ivpu_bo_unbind_locked(bo); + ivpu_bo_unlock(bo); + + mutex_unlock(&vdev->bo_list_lock); + drm_WARN_ON(&vdev->drm, bo->mmu_mapped); drm_WARN_ON(&vdev->drm, bo->ctx); drm_WARN_ON(obj->dev, refcount_read(&bo->base.pages_use_count) > 1); + drm_WARN_ON(obj->dev, bo->base.base.vma_node.vm_files.rb_node); drm_gem_shmem_free(&bo->base); } +static enum drm_gem_object_status ivpu_gem_status(struct drm_gem_object *obj) +{ + struct ivpu_bo *bo = to_ivpu_bo(obj); + enum drm_gem_object_status status = 0; + + if (ivpu_bo_is_resident(bo)) + status |= DRM_GEM_OBJECT_RESIDENT; + + return status; +} + static const struct drm_gem_object_funcs ivpu_gem_funcs = { .free = ivpu_gem_bo_free, .open = ivpu_gem_bo_open, @@ -308,6 +354,7 @@ static const struct drm_gem_object_funcs ivpu_gem_funcs = { .vmap = drm_gem_shmem_object_vmap, .vunmap = drm_gem_shmem_object_vunmap, .mmap = drm_gem_shmem_object_mmap, + .status = ivpu_gem_status, .vm_ops = &drm_gem_shmem_vm_ops, }; @@ -320,25 +367,33 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi struct ivpu_bo *bo; int ret; - if (args->flags & ~DRM_IVPU_BO_FLAGS) + if (args->flags & ~DRM_IVPU_BO_FLAGS) { + ivpu_dbg(vdev, IOCTL, "Invalid BO flags 0x%x\n", args->flags); return -EINVAL; + } - if (size == 0) + if (size == 0) { + ivpu_dbg(vdev, IOCTL, "Invalid BO size %llu\n", args->size); return -EINVAL; + } - bo = ivpu_bo_alloc(vdev, size, args->flags, file_priv->ctx.id); + bo = ivpu_bo_alloc(vdev, size, args->flags); if (IS_ERR(bo)) { - ivpu_err(vdev, "Failed to allocate BO: %pe (ctx %u size %llu flags 0x%x)", + ivpu_dbg(vdev, IOCTL, "Failed to allocate BO: %pe ctx %u size %llu flags 0x%x\n", bo, file_priv->ctx.id, args->size, args->flags); return PTR_ERR(bo); } + drm_WARN_ON(&vdev->drm, bo->base.base.handle_count != 0); + ret = drm_gem_handle_create(file, &bo->base.base, &args->handle); - if (ret) - ivpu_err(vdev, "Failed to create handle for BO: %pe (ctx %u size %llu flags 0x%x)", + if (ret) { + ivpu_dbg(vdev, IOCTL, "Failed to create handle for BO: %pe ctx %u size %llu flags 0x%x\n", bo, file_priv->ctx.id, args->size, args->flags); - else + } else { args->vpu_addr = bo->vpu_addr; + drm_WARN_ON(&vdev->drm, bo->base.base.handle_count != 1); + } drm_gem_object_put(&bo->base.base); @@ -360,18 +415,21 @@ ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(range->end)); drm_WARN_ON(&vdev->drm, !PAGE_ALIGNED(size)); - bo = ivpu_bo_alloc(vdev, size, flags, IVPU_GLOBAL_CONTEXT_MMU_SSID); + bo = ivpu_bo_alloc(vdev, size, flags); if (IS_ERR(bo)) { - ivpu_err(vdev, "Failed to allocate BO: %pe (vpu_addr 0x%llx size %llu flags 0x%x)", + ivpu_err(vdev, "Failed to allocate BO: %pe vpu_addr 0x%llx size %llu flags 0x%x\n", bo, range->start, size, flags); return NULL; } ret = ivpu_bo_alloc_vpu_addr(bo, ctx, range); - if (ret) + if (ret) { + ivpu_err(vdev, "Failed to allocate NPU address for BO: %pe ctx %u size %llu: %d\n", + bo, ctx->id, size, ret); goto err_put; + } - ret = ivpu_bo_pin(bo); + ret = ivpu_bo_bind(bo); if (ret) goto err_put; @@ -391,6 +449,21 @@ err_put: return NULL; } +struct ivpu_bo *ivpu_bo_create_runtime(struct ivpu_device *vdev, u64 addr, u64 size, u32 flags) +{ + struct ivpu_addr_range range; + + if (!ivpu_is_within_range(addr, size, &vdev->hw->ranges.runtime)) { + ivpu_err(vdev, "Invalid runtime BO address 0x%llx size %llu\n", addr, size); + return NULL; + } + + if (ivpu_hw_range_init(vdev, &range, addr, size)) + return NULL; + + return ivpu_bo_create(vdev, &vdev->gctx, &range, size, flags); +} + struct ivpu_bo *ivpu_bo_create_global(struct ivpu_device *vdev, u64 size, u32 flags) { return ivpu_bo_create(vdev, &vdev->gctx, &vdev->hw->ranges.global, size, flags); diff --git a/drivers/accel/ivpu/ivpu_gem.h b/drivers/accel/ivpu/ivpu_gem.h index aa8ff14f7aae..0c3350f22b55 100644 --- a/drivers/accel/ivpu/ivpu_gem.h +++ b/drivers/accel/ivpu/ivpu_gem.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (C) 2020-2023 Intel Corporation + * Copyright (C) 2020-2025 Intel Corporation */ #ifndef __IVPU_GEM_H__ #define __IVPU_GEM_H__ @@ -24,19 +24,22 @@ struct ivpu_bo { bool mmu_mapped; }; -int ivpu_bo_pin(struct ivpu_bo *bo); +int ivpu_bo_bind(struct ivpu_bo *bo); void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx); struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size); struct drm_gem_object *ivpu_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf); struct ivpu_bo *ivpu_bo_create(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, struct ivpu_addr_range *range, u64 size, u32 flags); +struct ivpu_bo *ivpu_bo_create_runtime(struct ivpu_device *vdev, u64 addr, u64 size, u32 flags); struct ivpu_bo *ivpu_bo_create_global(struct ivpu_device *vdev, u64 size, u32 flags); void ivpu_bo_free(struct ivpu_bo *bo); int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int ivpu_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file); +int ivpu_bo_create_from_userptr_ioctl(struct drm_device *dev, void *data, + struct drm_file *file); void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p); void ivpu_bo_list_print(struct drm_device *dev); @@ -74,6 +77,16 @@ static inline bool ivpu_bo_is_snooped(struct ivpu_bo *bo) return ivpu_bo_cache_mode(bo) == DRM_IVPU_BO_CACHED; } +static inline bool ivpu_bo_is_read_only(struct ivpu_bo *bo) +{ + return bo->flags & DRM_IVPU_BO_READ_ONLY; +} + +static inline bool ivpu_bo_is_resident(struct ivpu_bo *bo) +{ + return !!bo->base.pages; +} + static inline void *ivpu_to_cpu_addr(struct ivpu_bo *bo, u32 vpu_addr) { if (vpu_addr < bo->vpu_addr) @@ -96,4 +109,9 @@ static inline u32 cpu_to_vpu_addr(struct ivpu_bo *bo, void *cpu_addr) return bo->vpu_addr + (cpu_addr - ivpu_bo_vaddr(bo)); } +static inline bool ivpu_bo_is_mappable(struct ivpu_bo *bo) +{ + return bo->flags & DRM_IVPU_BO_MAPPABLE; +} + #endif /* __IVPU_GEM_H__ */ diff --git a/drivers/accel/ivpu/ivpu_gem_userptr.c b/drivers/accel/ivpu/ivpu_gem_userptr.c new file mode 100644 index 000000000000..25ba606164c0 --- /dev/null +++ b/drivers/accel/ivpu/ivpu_gem_userptr.c @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020-2025 Intel Corporation + */ + +#include <linux/dma-buf.h> +#include <linux/err.h> +#include <linux/highmem.h> +#include <linux/mm.h> +#include <linux/mman.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <linux/capability.h> + +#include <drm/drm_device.h> +#include <drm/drm_file.h> +#include <drm/drm_gem.h> + +#include "ivpu_drv.h" +#include "ivpu_gem.h" + +static struct sg_table * +ivpu_gem_userptr_dmabuf_map(struct dma_buf_attachment *attachment, + enum dma_data_direction direction) +{ + struct sg_table *sgt = attachment->dmabuf->priv; + int ret; + + ret = dma_map_sgtable(attachment->dev, sgt, direction, DMA_ATTR_SKIP_CPU_SYNC); + if (ret) + return ERR_PTR(ret); + + return sgt; +} + +static void ivpu_gem_userptr_dmabuf_unmap(struct dma_buf_attachment *attachment, + struct sg_table *sgt, + enum dma_data_direction direction) +{ + dma_unmap_sgtable(attachment->dev, sgt, direction, DMA_ATTR_SKIP_CPU_SYNC); +} + +static void ivpu_gem_userptr_dmabuf_release(struct dma_buf *dma_buf) +{ + struct sg_table *sgt = dma_buf->priv; + struct sg_page_iter page_iter; + struct page *page; + + for_each_sgtable_page(sgt, &page_iter, 0) { + page = sg_page_iter_page(&page_iter); + unpin_user_page(page); + } + + sg_free_table(sgt); + kfree(sgt); +} + +static const struct dma_buf_ops ivpu_gem_userptr_dmabuf_ops = { + .map_dma_buf = ivpu_gem_userptr_dmabuf_map, + .unmap_dma_buf = ivpu_gem_userptr_dmabuf_unmap, + .release = ivpu_gem_userptr_dmabuf_release, +}; + +static struct dma_buf * +ivpu_create_userptr_dmabuf(struct ivpu_device *vdev, void __user *user_ptr, + size_t size, uint32_t flags) +{ + struct dma_buf_export_info exp_info = {}; + struct dma_buf *dma_buf; + struct sg_table *sgt; + struct page **pages; + unsigned long nr_pages = size >> PAGE_SHIFT; + unsigned int gup_flags = FOLL_LONGTERM; + int ret, i, pinned; + + /* Add FOLL_WRITE only if the BO is not read-only */ + if (!(flags & DRM_IVPU_BO_READ_ONLY)) + gup_flags |= FOLL_WRITE; + + pages = kvmalloc_array(nr_pages, sizeof(*pages), GFP_KERNEL); + if (!pages) + return ERR_PTR(-ENOMEM); + + pinned = pin_user_pages_fast((unsigned long)user_ptr, nr_pages, gup_flags, pages); + if (pinned < 0) { + ret = pinned; + ivpu_dbg(vdev, IOCTL, "Failed to pin user pages: %d\n", ret); + goto free_pages_array; + } + + if (pinned != nr_pages) { + ivpu_dbg(vdev, IOCTL, "Pinned %d pages, expected %lu\n", pinned, nr_pages); + ret = -EFAULT; + goto unpin_pages; + } + + sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) { + ret = -ENOMEM; + goto unpin_pages; + } + + ret = sg_alloc_table_from_pages(sgt, pages, nr_pages, 0, size, GFP_KERNEL); + if (ret) { + ivpu_dbg(vdev, IOCTL, "Failed to create sg table: %d\n", ret); + goto free_sgt; + } + + exp_info.exp_name = "ivpu_userptr_dmabuf"; + exp_info.owner = THIS_MODULE; + exp_info.ops = &ivpu_gem_userptr_dmabuf_ops; + exp_info.size = size; + exp_info.flags = O_RDWR | O_CLOEXEC; + exp_info.priv = sgt; + + dma_buf = dma_buf_export(&exp_info); + if (IS_ERR(dma_buf)) { + ret = PTR_ERR(dma_buf); + ivpu_dbg(vdev, IOCTL, "Failed to export userptr dma-buf: %d\n", ret); + goto free_sg_table; + } + + kvfree(pages); + return dma_buf; + +free_sg_table: + sg_free_table(sgt); +free_sgt: + kfree(sgt); +unpin_pages: + for (i = 0; i < pinned; i++) + unpin_user_page(pages[i]); +free_pages_array: + kvfree(pages); + return ERR_PTR(ret); +} + +static struct ivpu_bo * +ivpu_bo_create_from_userptr(struct ivpu_device *vdev, void __user *user_ptr, + size_t size, uint32_t flags) +{ + struct dma_buf *dma_buf; + struct drm_gem_object *obj; + struct ivpu_bo *bo; + + dma_buf = ivpu_create_userptr_dmabuf(vdev, user_ptr, size, flags); + if (IS_ERR(dma_buf)) + return ERR_CAST(dma_buf); + + obj = ivpu_gem_prime_import(&vdev->drm, dma_buf); + if (IS_ERR(obj)) { + dma_buf_put(dma_buf); + return ERR_CAST(obj); + } + + dma_buf_put(dma_buf); + + bo = to_ivpu_bo(obj); + bo->flags = flags; + + return bo; +} + +int ivpu_bo_create_from_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file) +{ + struct drm_ivpu_bo_create_from_userptr *args = data; + struct ivpu_file_priv *file_priv = file->driver_priv; + struct ivpu_device *vdev = to_ivpu_device(dev); + void __user *user_ptr = u64_to_user_ptr(args->user_ptr); + struct ivpu_bo *bo; + int ret; + + if (args->flags & ~(DRM_IVPU_BO_HIGH_MEM | DRM_IVPU_BO_DMA_MEM | DRM_IVPU_BO_READ_ONLY)) { + ivpu_dbg(vdev, IOCTL, "Invalid BO flags: 0x%x\n", args->flags); + return -EINVAL; + } + + if (!args->user_ptr || !args->size) { + ivpu_dbg(vdev, IOCTL, "Userptr or size are zero: ptr %llx size %llu\n", + args->user_ptr, args->size); + return -EINVAL; + } + + if (!PAGE_ALIGNED(args->user_ptr) || !PAGE_ALIGNED(args->size)) { + ivpu_dbg(vdev, IOCTL, "Userptr or size not page aligned: ptr %llx size %llu\n", + args->user_ptr, args->size); + return -EINVAL; + } + + if (!access_ok(user_ptr, args->size)) { + ivpu_dbg(vdev, IOCTL, "Userptr is not accessible: ptr %llx size %llu\n", + args->user_ptr, args->size); + return -EFAULT; + } + + bo = ivpu_bo_create_from_userptr(vdev, user_ptr, args->size, args->flags); + if (IS_ERR(bo)) + return PTR_ERR(bo); + + ret = drm_gem_handle_create(file, &bo->base.base, &args->handle); + if (ret) { + ivpu_dbg(vdev, IOCTL, "Failed to create handle for BO: %pe ctx %u size %llu flags 0x%x\n", + bo, file_priv->ctx.id, args->size, args->flags); + } else { + ivpu_dbg(vdev, BO, "Created userptr BO: handle=%u vpu_addr=0x%llx size=%llu flags=0x%x\n", + args->handle, bo->vpu_addr, args->size, bo->flags); + args->vpu_addr = bo->vpu_addr; + } + + drm_gem_object_put(&bo->base.base); + + return ret; +} diff --git a/drivers/accel/ivpu/ivpu_hw.c b/drivers/accel/ivpu/ivpu_hw.c index 08dcc31b56f4..d69cd0d93569 100644 --- a/drivers/accel/ivpu/ivpu_hw.c +++ b/drivers/accel/ivpu/ivpu_hw.c @@ -8,6 +8,8 @@ #include "ivpu_hw_btrs.h" #include "ivpu_hw_ip.h" +#include <asm/msr-index.h> +#include <asm/msr.h> #include <linux/dmi.h> #include <linux/fault-inject.h> #include <linux/pm_runtime.h> @@ -20,6 +22,10 @@ module_param_named_unsafe(fail_hw, ivpu_fail_hw, charp, 0444); MODULE_PARM_DESC(fail_hw, "<interval>,<probability>,<space>,<times>"); #endif +#define FW_SHARED_MEM_ALIGNMENT SZ_512K /* VPU MTRR limitation */ + +#define ECC_MCA_SIGNAL_ENABLE_MASK 0xff + static char *platform_to_str(u32 platform) { switch (platform) { @@ -147,19 +153,39 @@ static void priority_bands_init(struct ivpu_device *vdev) vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 200000; } +int ivpu_hw_range_init(struct ivpu_device *vdev, struct ivpu_addr_range *range, u64 start, u64 size) +{ + u64 end; + + if (!range || check_add_overflow(start, size, &end)) { + ivpu_err(vdev, "Invalid range: start 0x%llx size %llu\n", start, size); + return -EINVAL; + } + + range->start = start; + range->end = end; + + return 0; +} + static void memory_ranges_init(struct ivpu_device *vdev) { if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) { - ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M); - ivpu_hw_range_init(&vdev->hw->ranges.user, 0x88000000, 511 * SZ_1M); - ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x180000000, SZ_2G); - ivpu_hw_range_init(&vdev->hw->ranges.dma, 0x200000000, SZ_128G); + ivpu_hw_range_init(vdev, &vdev->hw->ranges.runtime, 0x84800000, SZ_64M); + ivpu_hw_range_init(vdev, &vdev->hw->ranges.global, 0x90000000, SZ_256M); + ivpu_hw_range_init(vdev, &vdev->hw->ranges.user, 0xa0000000, 511 * SZ_1M); + ivpu_hw_range_init(vdev, &vdev->hw->ranges.shave, 0x180000000, SZ_2G); + ivpu_hw_range_init(vdev, &vdev->hw->ranges.dma, 0x200000000, SZ_128G); } else { - ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M); - ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x80000000, SZ_2G); - ivpu_hw_range_init(&vdev->hw->ranges.user, 0x100000000, SZ_256G); + ivpu_hw_range_init(vdev, &vdev->hw->ranges.runtime, 0x80000000, SZ_64M); + ivpu_hw_range_init(vdev, &vdev->hw->ranges.global, 0x90000000, SZ_256M); + ivpu_hw_range_init(vdev, &vdev->hw->ranges.shave, 0x80000000, SZ_2G); + ivpu_hw_range_init(vdev, &vdev->hw->ranges.user, 0x100000000, SZ_256G); vdev->hw->ranges.dma = vdev->hw->ranges.user; } + + drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vdev->hw->ranges.global.start, + FW_SHARED_MEM_ALIGNMENT)); } static int wp_enable(struct ivpu_device *vdev) @@ -373,3 +399,22 @@ irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr) pm_runtime_mark_last_busy(vdev->drm.dev); return IRQ_HANDLED; } + +bool ivpu_hw_uses_ecc_mca_signal(struct ivpu_device *vdev) +{ + unsigned long long msr_integrity_caps; + int ret; + + if (ivpu_hw_ip_gen(vdev) < IVPU_HW_IP_50XX) + return false; + + ret = rdmsrq_safe(MSR_INTEGRITY_CAPS, &msr_integrity_caps); + if (ret) { + ivpu_warn(vdev, "Error reading MSR_INTEGRITY_CAPS: %d", ret); + return false; + } + + ivpu_dbg(vdev, MISC, "MSR_INTEGRITY_CAPS: 0x%llx\n", msr_integrity_caps); + + return msr_integrity_caps & ECC_MCA_SIGNAL_ENABLE_MASK; +} diff --git a/drivers/accel/ivpu/ivpu_hw.h b/drivers/accel/ivpu/ivpu_hw.h index d79668fe1609..b6d0f0d0dccc 100644 --- a/drivers/accel/ivpu/ivpu_hw.h +++ b/drivers/accel/ivpu/ivpu_hw.h @@ -21,6 +21,7 @@ struct ivpu_hw_info { bool (*ip_irq_handler)(struct ivpu_device *vdev, int irq); } irq; struct { + struct ivpu_addr_range runtime; struct ivpu_addr_range global; struct ivpu_addr_range user; struct ivpu_addr_range shave; @@ -51,6 +52,8 @@ struct ivpu_hw_info { }; int ivpu_hw_init(struct ivpu_device *vdev); +int ivpu_hw_range_init(struct ivpu_device *vdev, struct ivpu_addr_range *range, u64 start, + u64 size); int ivpu_hw_power_up(struct ivpu_device *vdev); int ivpu_hw_power_down(struct ivpu_device *vdev); int ivpu_hw_reset(struct ivpu_device *vdev); @@ -60,6 +63,7 @@ void ivpu_irq_handlers_init(struct ivpu_device *vdev); void ivpu_hw_irq_enable(struct ivpu_device *vdev); void ivpu_hw_irq_disable(struct ivpu_device *vdev); irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr); +bool ivpu_hw_uses_ecc_mca_signal(struct ivpu_device *vdev); static inline u32 ivpu_hw_btrs_irq_handler(struct ivpu_device *vdev, int irq) { @@ -71,12 +75,6 @@ static inline u32 ivpu_hw_ip_irq_handler(struct ivpu_device *vdev, int irq) return vdev->hw->irq.ip_irq_handler(vdev, irq); } -static inline void ivpu_hw_range_init(struct ivpu_addr_range *range, u64 start, u64 size) -{ - range->start = start; - range->end = start + size; -} - static inline u64 ivpu_hw_range_size(const struct ivpu_addr_range *range) { return range->end - range->start; diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.c b/drivers/accel/ivpu/ivpu_hw_btrs.c index afdb3b2aa72a..06e65c592618 100644 --- a/drivers/accel/ivpu/ivpu_hw_btrs.c +++ b/drivers/accel/ivpu/ivpu_hw_btrs.c @@ -321,6 +321,14 @@ static int wait_for_pll_lock(struct ivpu_device *vdev, bool enable) return REGB_POLL_FLD(VPU_HW_BTRS_MTL_PLL_STATUS, LOCK, exp_val, PLL_TIMEOUT_US); } +static int wait_for_cdyn_deassert(struct ivpu_device *vdev) +{ + if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL) + return 0; + + return REGB_POLL_FLD(VPU_HW_BTRS_LNL_CDYN, CDYN, 0, PLL_TIMEOUT_US); +} + int ivpu_hw_btrs_wp_drive(struct ivpu_device *vdev, bool enable) { struct wp_request wp; @@ -354,6 +362,14 @@ int ivpu_hw_btrs_wp_drive(struct ivpu_device *vdev, bool enable) return ret; } + if (!enable) { + ret = wait_for_cdyn_deassert(vdev); + if (ret) { + ivpu_err(vdev, "Timed out waiting for CDYN deassert\n"); + return ret; + } + } + return 0; } @@ -673,7 +689,7 @@ bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq) if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, SURV_ERR, status)) { ivpu_dbg(vdev, IRQ, "Survivability IRQ\n"); - queue_work(system_wq, &vdev->irq_dct_work); + queue_work(system_percpu_wq, &vdev->irq_dct_work); } if (REG_TEST_FLD(VPU_HW_BTRS_LNL_INTERRUPT_STAT, FREQ_CHANGE, status)) { @@ -752,7 +768,7 @@ int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable) } } -void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 active_percent) +void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u8 active_percent) { u32 val = 0; u32 cmd = enable ? DCT_ENABLE : DCT_DISABLE; diff --git a/drivers/accel/ivpu/ivpu_hw_btrs.h b/drivers/accel/ivpu/ivpu_hw_btrs.h index 032c384ac3d4..c4c10e22f30f 100644 --- a/drivers/accel/ivpu/ivpu_hw_btrs.h +++ b/drivers/accel/ivpu/ivpu_hw_btrs.h @@ -36,7 +36,7 @@ u32 ivpu_hw_btrs_dpu_freq_get(struct ivpu_device *vdev); bool ivpu_hw_btrs_irq_handler_mtl(struct ivpu_device *vdev, int irq); bool ivpu_hw_btrs_irq_handler_lnl(struct ivpu_device *vdev, int irq); int ivpu_hw_btrs_dct_get_request(struct ivpu_device *vdev, bool *enable); -void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u32 active_percent); +void ivpu_hw_btrs_dct_set_status(struct ivpu_device *vdev, bool enable, u8 active_percent); u32 ivpu_hw_btrs_telemetry_offset_get(struct ivpu_device *vdev); u32 ivpu_hw_btrs_telemetry_size_get(struct ivpu_device *vdev); u32 ivpu_hw_btrs_telemetry_enable_get(struct ivpu_device *vdev); diff --git a/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h b/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h index fff2ef2cada6..a81a9ba540fa 100644 --- a/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h +++ b/drivers/accel/ivpu/ivpu_hw_btrs_lnl_reg.h @@ -74,6 +74,9 @@ #define VPU_HW_BTRS_LNL_PLL_FREQ 0x00000148u #define VPU_HW_BTRS_LNL_PLL_FREQ_RATIO_MASK GENMASK(15, 0) +#define VPU_HW_BTRS_LNL_CDYN 0x0000014cu +#define VPU_HW_BTRS_LNL_CDYN_CDYN_MASK GENMASK(15, 0) + #define VPU_HW_BTRS_LNL_TILE_FUSE 0x00000150u #define VPU_HW_BTRS_LNL_TILE_FUSE_VALID_MASK BIT_MASK(0) #define VPU_HW_BTRS_LNL_TILE_FUSE_CONFIG_MASK GENMASK(6, 1) diff --git a/drivers/accel/ivpu/ivpu_hw_ip.c b/drivers/accel/ivpu/ivpu_hw_ip.c index 2bf9882ab52e..06aa1e7dc50b 100644 --- a/drivers/accel/ivpu/ivpu_hw_ip.c +++ b/drivers/accel/ivpu/ivpu_hw_ip.c @@ -691,6 +691,13 @@ static void pwr_island_delay_set(struct ivpu_device *vdev) status = high ? 46 : 3; break; + case PCI_DEVICE_ID_NVL: + post = high ? 198 : 17; + post1 = 0; + post2 = high ? 198 : 17; + status = 0; + break; + default: dump_stack(); ivpu_err(vdev, "Unknown device ID\n"); @@ -889,6 +896,9 @@ static int soc_cpu_drive_40xx(struct ivpu_device *vdev, bool enable) static int soc_cpu_enable(struct ivpu_device *vdev) { + if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_60XX) + return 0; + return soc_cpu_drive_40xx(vdev, true); } diff --git a/drivers/accel/ivpu/ivpu_ipc.c b/drivers/accel/ivpu/ivpu_ipc.c index 5f00809d448a..1f13bf95b2b3 100644 --- a/drivers/accel/ivpu/ivpu_ipc.c +++ b/drivers/accel/ivpu/ivpu_ipc.c @@ -459,7 +459,7 @@ void ivpu_ipc_irq_handler(struct ivpu_device *vdev) } } - queue_work(system_wq, &vdev->irq_ipc_work); + queue_work(system_percpu_wq, &vdev->irq_ipc_work); } void ivpu_ipc_irq_work_fn(struct work_struct *work) diff --git a/drivers/accel/ivpu/ivpu_job.c b/drivers/accel/ivpu/ivpu_job.c index 060f1fc031d3..4f8564e2878a 100644 --- a/drivers/accel/ivpu/ivpu_job.c +++ b/drivers/accel/ivpu/ivpu_job.c @@ -34,22 +34,20 @@ static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq) static int ivpu_preemption_buffers_create(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq) { - u64 primary_size = ALIGN(vdev->fw->primary_preempt_buf_size, PAGE_SIZE); - u64 secondary_size = ALIGN(vdev->fw->secondary_preempt_buf_size, PAGE_SIZE); - - if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW || - ivpu_test_mode & IVPU_TEST_MODE_MIP_DISABLE) + if (ivpu_fw_preempt_buf_size(vdev) == 0) return 0; cmdq->primary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.user, - primary_size, DRM_IVPU_BO_WC); + vdev->fw->primary_preempt_buf_size, + DRM_IVPU_BO_WC); if (!cmdq->primary_preempt_buf) { ivpu_err(vdev, "Failed to create primary preemption buffer\n"); return -ENOMEM; } cmdq->secondary_preempt_buf = ivpu_bo_create(vdev, &file_priv->ctx, &vdev->hw->ranges.dma, - secondary_size, DRM_IVPU_BO_WC); + vdev->fw->secondary_preempt_buf_size, + DRM_IVPU_BO_WC); if (!cmdq->secondary_preempt_buf) { ivpu_err(vdev, "Failed to create secondary preemption buffer\n"); goto err_free_primary; @@ -66,20 +64,39 @@ err_free_primary: static void ivpu_preemption_buffers_free(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq) { - if (vdev->fw->sched_mode != VPU_SCHEDULING_MODE_HW) - return; - if (cmdq->primary_preempt_buf) ivpu_bo_free(cmdq->primary_preempt_buf); if (cmdq->secondary_preempt_buf) ivpu_bo_free(cmdq->secondary_preempt_buf); } +static int ivpu_preemption_job_init(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv, + struct ivpu_cmdq *cmdq, struct ivpu_job *job) +{ + int ret; + + /* Use preemption buffer provided by the user space */ + if (job->primary_preempt_buf) + return 0; + + if (!cmdq->primary_preempt_buf) { + /* Allocate per command queue preemption buffers */ + ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq); + if (ret) + return ret; + } + + /* Use preemption buffers allocated by the kernel */ + job->primary_preempt_buf = cmdq->primary_preempt_buf; + job->secondary_preempt_buf = cmdq->secondary_preempt_buf; + + return 0; +} + static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv) { struct ivpu_device *vdev = file_priv->vdev; struct ivpu_cmdq *cmdq; - int ret; cmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL); if (!cmdq) @@ -89,10 +106,6 @@ static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv) if (!cmdq->mem) goto err_free_cmdq; - ret = ivpu_preemption_buffers_create(vdev, file_priv, cmdq); - if (ret) - ivpu_warn(vdev, "Failed to allocate preemption buffers, preemption limited\n"); - return cmdq; err_free_cmdq: @@ -219,11 +232,13 @@ static int ivpu_register_db(struct ivpu_file_priv *file_priv, struct ivpu_cmdq * ret = ivpu_jsm_register_db(vdev, file_priv->ctx.id, cmdq->db_id, cmdq->mem->vpu_addr, ivpu_bo_size(cmdq->mem)); - if (!ret) + if (!ret) { ivpu_dbg(vdev, JOB, "DB %d registered to cmdq %d ctx %d priority %d\n", cmdq->db_id, cmdq->id, file_priv->ctx.id, cmdq->priority); - else + } else { xa_erase(&vdev->db_xa, cmdq->db_id); + cmdq->db_id = 0; + } return ret; } @@ -333,7 +348,7 @@ static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u32 cmdq = xa_load(&file_priv->cmdq_xa, cmdq_id); if (!cmdq) { - ivpu_warn_ratelimited(vdev, "Failed to find command queue with ID: %u\n", cmdq_id); + ivpu_dbg(vdev, IOCTL, "Failed to find command queue with ID: %u\n", cmdq_id); return NULL; } @@ -427,17 +442,14 @@ static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job) if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_SUBMISSION)) entry->flags = VPU_JOB_FLAGS_NULL_SUBMISSION_MASK; - if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) { - if (cmdq->primary_preempt_buf) { - entry->primary_preempt_buf_addr = cmdq->primary_preempt_buf->vpu_addr; - entry->primary_preempt_buf_size = ivpu_bo_size(cmdq->primary_preempt_buf); - } + if (job->primary_preempt_buf) { + entry->primary_preempt_buf_addr = job->primary_preempt_buf->vpu_addr; + entry->primary_preempt_buf_size = ivpu_bo_size(job->primary_preempt_buf); + } - if (cmdq->secondary_preempt_buf) { - entry->secondary_preempt_buf_addr = cmdq->secondary_preempt_buf->vpu_addr; - entry->secondary_preempt_buf_size = - ivpu_bo_size(cmdq->secondary_preempt_buf); - } + if (job->secondary_preempt_buf) { + entry->secondary_preempt_buf_addr = job->secondary_preempt_buf->vpu_addr; + entry->secondary_preempt_buf_size = ivpu_bo_size(job->secondary_preempt_buf); } wmb(); /* Ensure that tail is updated after filling entry */ @@ -522,7 +534,7 @@ ivpu_job_create(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count) job->bo_count = bo_count; job->done_fence = ivpu_fence_create(vdev); if (!job->done_fence) { - ivpu_warn_ratelimited(vdev, "Failed to create a fence\n"); + ivpu_err(vdev, "Failed to create a fence\n"); goto err_free_job; } @@ -552,21 +564,26 @@ static struct ivpu_job *ivpu_job_remove_from_submitted_jobs(struct ivpu_device * return job; } -static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 job_status) +bool ivpu_job_handle_engine_error(struct ivpu_device *vdev, u32 job_id, u32 job_status) { - struct ivpu_job *job; - lockdep_assert_held(&vdev->submitted_jobs_lock); - job = xa_load(&vdev->submitted_jobs_xa, job_id); - if (!job) - return -ENOENT; + switch (job_status) { + case VPU_JSM_STATUS_PROCESSING_ERR: + case VPU_JSM_STATUS_ENGINE_RESET_REQUIRED_MIN ... VPU_JSM_STATUS_ENGINE_RESET_REQUIRED_MAX: + { + struct ivpu_job *job = xa_load(&vdev->submitted_jobs_xa, job_id); - if (job_status == VPU_JSM_STATUS_MVNCI_CONTEXT_VIOLATION_HW) { + if (!job) + return false; + + /* Trigger an engine reset */ guard(mutex)(&job->file_priv->lock); + job->job_status = job_status; + if (job->file_priv->has_mmu_faults) - return 0; + return false; /* * Mark context as faulty and defer destruction of the job to jobs abort thread @@ -574,23 +591,43 @@ static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 * status and ensure both are handled in the same way */ job->file_priv->has_mmu_faults = true; - queue_work(system_wq, &vdev->context_abort_work); - return 0; + queue_work(system_percpu_wq, &vdev->context_abort_work); + return true; } + default: + /* Complete job with error status, engine reset not required */ + break; + } + + return false; +} - job = ivpu_job_remove_from_submitted_jobs(vdev, job_id); +static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 job_status) +{ + struct ivpu_job *job; + + lockdep_assert_held(&vdev->submitted_jobs_lock); + + job = xa_load(&vdev->submitted_jobs_xa, job_id); if (!job) return -ENOENT; - if (job->file_priv->has_mmu_faults) - job_status = DRM_IVPU_JOB_STATUS_ABORTED; + ivpu_job_remove_from_submitted_jobs(vdev, job_id); - job->bos[CMD_BUF_IDX]->job_status = job_status; + if (job->job_status == VPU_JSM_STATUS_SUCCESS) { + if (job->file_priv->has_mmu_faults) + job->job_status = DRM_IVPU_JOB_STATUS_ABORTED; + else + job->job_status = job_status; + } + + job->bos[CMD_BUF_IDX]->job_status = job->job_status; dma_fence_signal(job->done_fence); trace_job("done", job); ivpu_dbg(vdev, JOB, "Job complete: id %3u ctx %2d cmdq_id %u engine %d status 0x%x\n", - job->job_id, job->file_priv->ctx.id, job->cmdq_id, job->engine_idx, job_status); + job->job_id, job->file_priv->ctx.id, job->cmdq_id, job->engine_idx, + job->job_status); ivpu_job_destroy(job); ivpu_stop_job_timeout_detection(vdev); @@ -650,7 +687,6 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id) else cmdq = ivpu_cmdq_acquire(file_priv, cmdq_id); if (!cmdq) { - ivpu_warn_ratelimited(vdev, "Failed to get job queue, ctx %d\n", file_priv->ctx.id); ret = -EINVAL; goto err_unlock; } @@ -661,6 +697,13 @@ static int ivpu_job_submit(struct ivpu_job *job, u8 priority, u32 cmdq_id) goto err_unlock; } + ret = ivpu_preemption_job_init(vdev, file_priv, cmdq, job); + if (ret) { + ivpu_err(vdev, "Failed to initialize preemption buffers for job %d: %d\n", + job->job_id, ret); + goto err_unlock; + } + job->cmdq_id = cmdq->id; is_first_job = xa_empty(&vdev->submitted_jobs_xa); @@ -714,7 +757,7 @@ err_unlock: static int ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 *buf_handles, - u32 buf_count, u32 commands_offset) + u32 buf_count, u32 commands_offset, u32 preempt_buffer_index) { struct ivpu_file_priv *file_priv = job->file_priv; struct ivpu_device *vdev = file_priv->vdev; @@ -727,40 +770,58 @@ ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 for (i = 0; i < buf_count; i++) { struct drm_gem_object *obj = drm_gem_object_lookup(file, buf_handles[i]); - if (!obj) + if (!obj) { + ivpu_dbg(vdev, IOCTL, "Failed to lookup GEM object with handle %u\n", + buf_handles[i]); return -ENOENT; + } job->bos[i] = to_ivpu_bo(obj); - ret = ivpu_bo_pin(job->bos[i]); + ret = ivpu_bo_bind(job->bos[i]); if (ret) return ret; } bo = job->bos[CMD_BUF_IDX]; if (!dma_resv_test_signaled(bo->base.base.resv, DMA_RESV_USAGE_READ)) { - ivpu_warn(vdev, "Buffer is already in use\n"); + ivpu_dbg(vdev, IOCTL, "Buffer is already in use by another job\n"); return -EBUSY; } if (commands_offset >= ivpu_bo_size(bo)) { - ivpu_warn(vdev, "Invalid command buffer offset %u\n", commands_offset); + ivpu_dbg(vdev, IOCTL, "Invalid commands offset %u for buffer size %zu\n", + commands_offset, ivpu_bo_size(bo)); return -EINVAL; } job->cmd_buf_vpu_addr = bo->vpu_addr + commands_offset; + if (preempt_buffer_index) { + struct ivpu_bo *preempt_bo = job->bos[preempt_buffer_index]; + + if (ivpu_bo_size(preempt_bo) < ivpu_fw_preempt_buf_size(vdev)) { + ivpu_dbg(vdev, IOCTL, "Preemption buffer is too small\n"); + return -EINVAL; + } + if (ivpu_bo_is_mappable(preempt_bo)) { + ivpu_dbg(vdev, IOCTL, "Preemption buffer cannot be mappable\n"); + return -EINVAL; + } + job->primary_preempt_buf = preempt_bo; + } + ret = drm_gem_lock_reservations((struct drm_gem_object **)job->bos, buf_count, &acquire_ctx); if (ret) { - ivpu_warn(vdev, "Failed to lock reservations: %d\n", ret); + ivpu_warn_ratelimited(vdev, "Failed to lock reservations: %d\n", ret); return ret; } for (i = 0; i < buf_count; i++) { ret = dma_resv_reserve_fences(job->bos[i]->base.base.resv, 1); if (ret) { - ivpu_warn(vdev, "Failed to reserve fences: %d\n", ret); + ivpu_warn_ratelimited(vdev, "Failed to reserve fences: %d\n", ret); goto unlock_reservations; } } @@ -780,7 +841,7 @@ unlock_reservations: static int ivpu_submit(struct drm_file *file, struct ivpu_file_priv *file_priv, u32 cmdq_id, u32 buffer_count, u32 engine, void __user *buffers_ptr, u32 cmds_offset, - u8 priority) + u32 preempt_buffer_index, u8 priority) { struct ivpu_device *vdev = file_priv->vdev; struct ivpu_job *job; @@ -807,16 +868,14 @@ static int ivpu_submit(struct drm_file *file, struct ivpu_file_priv *file_priv, job = ivpu_job_create(file_priv, engine, buffer_count); if (!job) { - ivpu_err(vdev, "Failed to create job\n"); ret = -ENOMEM; goto err_exit_dev; } - ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, buffer_count, cmds_offset); - if (ret) { - ivpu_err(vdev, "Failed to prepare job: %d\n", ret); + ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, buffer_count, cmds_offset, + preempt_buffer_index); + if (ret) goto err_destroy_job; - } down_read(&vdev->pm->reset_lock); ret = ivpu_job_submit(job, priority, cmdq_id); @@ -842,58 +901,91 @@ err_free_handles: int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct ivpu_file_priv *file_priv = file->driver_priv; + struct ivpu_device *vdev = file_priv->vdev; struct drm_ivpu_submit *args = data; u8 priority; - if (args->engine != DRM_IVPU_ENGINE_COMPUTE) + if (args->engine != DRM_IVPU_ENGINE_COMPUTE) { + ivpu_dbg(vdev, IOCTL, "Invalid engine %d\n", args->engine); return -EINVAL; + } - if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME) + if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME) { + ivpu_dbg(vdev, IOCTL, "Invalid priority %d\n", args->priority); return -EINVAL; + } - if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT) + if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT) { + ivpu_dbg(vdev, IOCTL, "Invalid buffer count %u\n", args->buffer_count); return -EINVAL; + } - if (!IS_ALIGNED(args->commands_offset, 8)) + if (!IS_ALIGNED(args->commands_offset, 8)) { + ivpu_dbg(vdev, IOCTL, "Invalid commands offset %u\n", args->commands_offset); return -EINVAL; + } - if (!file_priv->ctx.id) + if (!file_priv->ctx.id) { + ivpu_dbg(vdev, IOCTL, "Context not initialized\n"); return -EINVAL; + } - if (file_priv->has_mmu_faults) + if (file_priv->has_mmu_faults) { + ivpu_dbg(vdev, IOCTL, "Context %u has MMU faults\n", file_priv->ctx.id); return -EBADFD; + } priority = ivpu_job_to_jsm_priority(args->priority); return ivpu_submit(file, file_priv, 0, args->buffer_count, args->engine, - (void __user *)args->buffers_ptr, args->commands_offset, priority); + (void __user *)args->buffers_ptr, args->commands_offset, 0, priority); } int ivpu_cmdq_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct ivpu_file_priv *file_priv = file->driver_priv; + struct ivpu_device *vdev = file_priv->vdev; struct drm_ivpu_cmdq_submit *args = data; - if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) + if (!ivpu_is_capable(file_priv->vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) { + ivpu_dbg(vdev, IOCTL, "Command queue management not supported\n"); return -ENODEV; + } - if (args->cmdq_id < IVPU_CMDQ_MIN_ID || args->cmdq_id > IVPU_CMDQ_MAX_ID) + if (args->cmdq_id < IVPU_CMDQ_MIN_ID || args->cmdq_id > IVPU_CMDQ_MAX_ID) { + ivpu_dbg(vdev, IOCTL, "Invalid command queue ID %u\n", args->cmdq_id); return -EINVAL; + } - if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT) + if (args->buffer_count == 0 || args->buffer_count > JOB_MAX_BUFFER_COUNT) { + ivpu_dbg(vdev, IOCTL, "Invalid buffer count %u\n", args->buffer_count); return -EINVAL; + } - if (!IS_ALIGNED(args->commands_offset, 8)) + if (args->preempt_buffer_index >= args->buffer_count) { + ivpu_dbg(vdev, IOCTL, "Invalid preemption buffer index %u\n", + args->preempt_buffer_index); return -EINVAL; + } - if (!file_priv->ctx.id) + if (!IS_ALIGNED(args->commands_offset, 8)) { + ivpu_dbg(vdev, IOCTL, "Invalid commands offset %u\n", args->commands_offset); return -EINVAL; + } - if (file_priv->has_mmu_faults) + if (!file_priv->ctx.id) { + ivpu_dbg(vdev, IOCTL, "Context not initialized\n"); + return -EINVAL; + } + + if (file_priv->has_mmu_faults) { + ivpu_dbg(vdev, IOCTL, "Context %u has MMU faults\n", file_priv->ctx.id); return -EBADFD; + } return ivpu_submit(file, file_priv, args->cmdq_id, args->buffer_count, VPU_ENGINE_COMPUTE, - (void __user *)args->buffers_ptr, args->commands_offset, 0); + (void __user *)args->buffers_ptr, args->commands_offset, + args->preempt_buffer_index, 0); } int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file) @@ -904,11 +996,15 @@ int ivpu_cmdq_create_ioctl(struct drm_device *dev, void *data, struct drm_file * struct ivpu_cmdq *cmdq; int ret; - if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) + if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) { + ivpu_dbg(vdev, IOCTL, "Command queue management not supported\n"); return -ENODEV; + } - if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME) + if (args->priority > DRM_IVPU_JOB_PRIORITY_REALTIME) { + ivpu_dbg(vdev, IOCTL, "Invalid priority %d\n", args->priority); return -EINVAL; + } ret = ivpu_rpm_get(vdev); if (ret < 0) @@ -936,8 +1032,10 @@ int ivpu_cmdq_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file u32 cmdq_id = 0; int ret; - if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) + if (!ivpu_is_capable(vdev, DRM_IVPU_CAP_MANAGE_CMDQ)) { + ivpu_dbg(vdev, IOCTL, "Command queue management not supported\n"); return -ENODEV; + } ret = ivpu_rpm_get(vdev); if (ret < 0) @@ -984,7 +1082,9 @@ ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr, payload = (struct vpu_ipc_msg_payload_job_done *)&jsm_msg->payload; mutex_lock(&vdev->submitted_jobs_lock); - ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status); + if (!ivpu_job_handle_engine_error(vdev, payload->job_id, payload->job_status)) + /* No engine error, complete the job normally */ + ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status); mutex_unlock(&vdev->submitted_jobs_lock); } @@ -1012,7 +1112,7 @@ void ivpu_context_abort_work_fn(struct work_struct *work) if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) if (ivpu_jsm_reset_engine(vdev, 0)) - return; + goto runtime_put; mutex_lock(&vdev->context_list_lock); xa_for_each(&vdev->context_xa, ctx_id, file_priv) { @@ -1036,7 +1136,7 @@ void ivpu_context_abort_work_fn(struct work_struct *work) goto runtime_put; if (ivpu_jsm_hws_resume_engine(vdev, 0)) - return; + goto runtime_put; /* * In hardware scheduling mode NPU already has stopped processing jobs * and won't send us any further notifications, thus we have to free job related resources @@ -1049,6 +1149,5 @@ void ivpu_context_abort_work_fn(struct work_struct *work) mutex_unlock(&vdev->submitted_jobs_lock); runtime_put: - pm_runtime_mark_last_busy(vdev->drm.dev); pm_runtime_put_autosuspend(vdev->drm.dev); } diff --git a/drivers/accel/ivpu/ivpu_job.h b/drivers/accel/ivpu/ivpu_job.h index 2e301c2eea7b..3ab61e6a5616 100644 --- a/drivers/accel/ivpu/ivpu_job.h +++ b/drivers/accel/ivpu/ivpu_job.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (C) 2020-2024 Intel Corporation + * Copyright (C) 2020-2025 Intel Corporation */ #ifndef __IVPU_JOB_H__ @@ -15,12 +15,17 @@ struct ivpu_device; struct ivpu_file_priv; /** - * struct ivpu_cmdq - Object representing device queue used to send jobs. - * @jobq: Pointer to job queue memory shared with the device - * @mem: Memory allocated for the job queue, shared with device - * @entry_count Number of job entries in the queue - * @db_id: Doorbell assigned to this job queue - * @db_registered: True if doorbell is registered in device + * struct ivpu_cmdq - Represents a command queue for submitting jobs to the VPU. + * Tracks queue memory, preemption buffers, and metadata for job management. + * @jobq: Pointer to job queue memory shared with the device + * @primary_preempt_buf: Primary preemption buffer for this queue (optional) + * @secondary_preempt_buf: Secondary preemption buffer for this queue (optional) + * @mem: Memory allocated for the job queue, shared with device + * @entry_count: Number of job entries in the queue + * @id: Unique command queue ID + * @db_id: Doorbell ID assigned to this job queue + * @priority: Priority level of the command queue + * @is_legacy: True if this is a legacy command queue */ struct ivpu_cmdq { struct vpu_job_queue *jobq; @@ -35,16 +40,22 @@ struct ivpu_cmdq { }; /** - * struct ivpu_job - KMD object that represents batchbuffer / DMA buffer. - * Each batch / DMA buffer is a job to be submitted and executed by the VPU FW. - * This is a unit of execution, and be tracked by the job_id for - * any status reporting from VPU FW through IPC JOB RET/DONE message. - * @file_priv: The client that submitted this job - * @job_id: Job ID for KMD tracking and job status reporting from VPU FW - * @status: Status of the Job from IPC JOB RET/DONE message - * @batch_buffer: CPU vaddr points to the batch buffer memory allocated for the job - * @submit_status_offset: Offset within batch buffer where job completion handler - will update the job status + * struct ivpu_job - Representing a batch or DMA buffer submitted to the VPU. + * Each job is a unit of execution, tracked by job_id for status reporting from VPU FW. + * The structure holds all resources and metadata needed for job submission, execution, + * and completion handling. + * @vdev: Pointer to the VPU device + * @file_priv: The client context that submitted this job + * @done_fence: Fence signaled when job completes + * @cmd_buf_vpu_addr: VPU address of the command buffer for this job + * @cmdq_id: Command queue ID used for submission + * @job_id: Unique job ID for tracking and status reporting + * @engine_idx: Engine index for job execution + * @job_status: Status reported by firmware for this job + * @primary_preempt_buf: Primary preemption buffer for job + * @secondary_preempt_buf: Secondary preemption buffer for job (optional) + * @bo_count: Number of buffer objects associated with this job + * @bos: Array of buffer objects used by the job (batch buffer is at index 0) */ struct ivpu_job { struct ivpu_device *vdev; @@ -54,6 +65,9 @@ struct ivpu_job { u32 cmdq_id; u32 job_id; u32 engine_idx; + u32 job_status; + struct ivpu_bo *primary_preempt_buf; + struct ivpu_bo *secondary_preempt_buf; size_t bo_count; struct ivpu_bo *bos[] __counted_by(bo_count); }; @@ -71,6 +85,7 @@ void ivpu_cmdq_abort_all_jobs(struct ivpu_device *vdev, u32 ctx_id, u32 cmdq_id) void ivpu_job_done_consumer_init(struct ivpu_device *vdev); void ivpu_job_done_consumer_fini(struct ivpu_device *vdev); +bool ivpu_job_handle_engine_error(struct ivpu_device *vdev, u32 job_id, u32 job_status); void ivpu_context_abort_work_fn(struct work_struct *work); void ivpu_jobs_abort_all(struct ivpu_device *vdev); diff --git a/drivers/accel/ivpu/ivpu_mmu.c b/drivers/accel/ivpu/ivpu_mmu.c index 5ea010568faa..e1baf6b64935 100644 --- a/drivers/accel/ivpu/ivpu_mmu.c +++ b/drivers/accel/ivpu/ivpu_mmu.c @@ -970,7 +970,7 @@ void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev) } } - queue_work(system_wq, &vdev->context_abort_work); + queue_work(system_percpu_wq, &vdev->context_abort_work); } void ivpu_mmu_evtq_dump(struct ivpu_device *vdev) diff --git a/drivers/accel/ivpu/ivpu_mmu_context.c b/drivers/accel/ivpu/ivpu_mmu_context.c index f0267efa55aa..87ad593ef47d 100644 --- a/drivers/accel/ivpu/ivpu_mmu_context.c +++ b/drivers/accel/ivpu/ivpu_mmu_context.c @@ -430,7 +430,7 @@ static void ivpu_mmu_context_unmap_pages(struct ivpu_mmu_context *ctx, u64 vpu_a int ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, - u64 vpu_addr, struct sg_table *sgt, bool llc_coherent) + u64 vpu_addr, struct sg_table *sgt, bool llc_coherent, bool read_only) { size_t start_vpu_addr = vpu_addr; struct scatterlist *sg; @@ -450,6 +450,8 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, prot = IVPU_MMU_ENTRY_MAPPED; if (llc_coherent) prot |= IVPU_MMU_ENTRY_FLAG_LLC_COHERENT; + if (read_only) + prot |= IVPU_MMU_ENTRY_FLAG_RO; mutex_lock(&ctx->lock); @@ -527,7 +529,8 @@ ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ct ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id); if (ret) - ivpu_warn(vdev, "Failed to invalidate TLB for ctx %u: %d\n", ctx->id, ret); + ivpu_warn_ratelimited(vdev, "Failed to invalidate TLB for ctx %u: %d\n", + ctx->id, ret); } int @@ -568,7 +571,7 @@ void ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ct mutex_init(&ctx->lock); if (!context_id) { - start = vdev->hw->ranges.global.start; + start = vdev->hw->ranges.runtime.start; end = vdev->hw->ranges.shave.end; } else { start = min_t(u64, vdev->hw->ranges.user.start, vdev->hw->ranges.shave.start); diff --git a/drivers/accel/ivpu/ivpu_mmu_context.h b/drivers/accel/ivpu/ivpu_mmu_context.h index f255310968cf..663a11a9db11 100644 --- a/drivers/accel/ivpu/ivpu_mmu_context.h +++ b/drivers/accel/ivpu/ivpu_mmu_context.h @@ -42,7 +42,7 @@ int ivpu_mmu_context_insert_node(struct ivpu_mmu_context *ctx, const struct ivpu void ivpu_mmu_context_remove_node(struct ivpu_mmu_context *ctx, struct drm_mm_node *node); int ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, - u64 vpu_addr, struct sg_table *sgt, bool llc_coherent); + u64 vpu_addr, struct sg_table *sgt, bool llc_coherent, bool read_only); void ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u64 vpu_addr, struct sg_table *sgt); int ivpu_mmu_context_set_pages_ro(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, diff --git a/drivers/accel/ivpu/ivpu_ms.c b/drivers/accel/ivpu/ivpu_ms.c index 2a043baf10ca..1d9c1cb17924 100644 --- a/drivers/accel/ivpu/ivpu_ms.c +++ b/drivers/accel/ivpu/ivpu_ms.c @@ -8,6 +8,7 @@ #include "ivpu_drv.h" #include "ivpu_gem.h" +#include "ivpu_hw.h" #include "ivpu_jsm_msg.h" #include "ivpu_ms.h" #include "ivpu_pm.h" @@ -37,8 +38,8 @@ int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *fil struct drm_ivpu_metric_streamer_start *args = data; struct ivpu_device *vdev = file_priv->vdev; struct ivpu_ms_instance *ms; - u64 single_buff_size; u32 sample_size; + u64 buf_size; int ret; if (!args->metric_group_mask || !args->read_period_samples || @@ -52,7 +53,8 @@ int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *fil mutex_lock(&file_priv->ms_lock); if (get_instance_by_mask(file_priv, args->metric_group_mask)) { - ivpu_err(vdev, "Instance already exists (mask %#llx)\n", args->metric_group_mask); + ivpu_dbg(vdev, IOCTL, "Instance already exists (mask %#llx)\n", + args->metric_group_mask); ret = -EALREADY; goto unlock; } @@ -69,12 +71,18 @@ int ivpu_ms_start_ioctl(struct drm_device *dev, void *data, struct drm_file *fil if (ret) goto err_free_ms; - single_buff_size = sample_size * - ((u64)args->read_period_samples * MS_READ_PERIOD_MULTIPLIER); - ms->bo = ivpu_bo_create_global(vdev, PAGE_ALIGN(single_buff_size * MS_NUM_BUFFERS), - DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE); + buf_size = PAGE_ALIGN((u64)args->read_period_samples * sample_size * + MS_READ_PERIOD_MULTIPLIER * MS_NUM_BUFFERS); + if (buf_size > ivpu_hw_range_size(&vdev->hw->ranges.global)) { + ivpu_dbg(vdev, IOCTL, "Requested MS buffer size %llu exceeds range size %llu\n", + buf_size, ivpu_hw_range_size(&vdev->hw->ranges.global)); + ret = -EINVAL; + goto err_free_ms; + } + + ms->bo = ivpu_bo_create_global(vdev, buf_size, DRM_IVPU_BO_CACHED | DRM_IVPU_BO_MAPPABLE); if (!ms->bo) { - ivpu_err(vdev, "Failed to allocate MS buffer (size %llu)\n", single_buff_size); + ivpu_dbg(vdev, IOCTL, "Failed to allocate MS buffer (size %llu)\n", buf_size); ret = -ENOMEM; goto err_free_ms; } @@ -175,7 +183,8 @@ int ivpu_ms_get_data_ioctl(struct drm_device *dev, void *data, struct drm_file * ms = get_instance_by_mask(file_priv, args->metric_group_mask); if (!ms) { - ivpu_err(vdev, "Instance doesn't exist for mask: %#llx\n", args->metric_group_mask); + ivpu_dbg(vdev, IOCTL, "Instance doesn't exist for mask: %#llx\n", + args->metric_group_mask); ret = -EINVAL; goto unlock; } diff --git a/drivers/accel/ivpu/ivpu_pm.c b/drivers/accel/ivpu/ivpu_pm.c index 475ddc94f1cf..480c075d87f6 100644 --- a/drivers/accel/ivpu/ivpu_pm.c +++ b/drivers/accel/ivpu/ivpu_pm.c @@ -54,7 +54,7 @@ static void ivpu_pm_prepare_cold_boot(struct ivpu_device *vdev) static void ivpu_pm_prepare_warm_boot(struct ivpu_device *vdev) { struct ivpu_fw_info *fw = vdev->fw; - struct vpu_boot_params *bp = ivpu_bo_vaddr(fw->mem); + struct vpu_boot_params *bp = ivpu_bo_vaddr(fw->mem_bp); if (!bp->save_restore_ret_address) { ivpu_pm_prepare_cold_boot(vdev); @@ -186,7 +186,7 @@ void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason) if (atomic_cmpxchg(&vdev->pm->reset_pending, 0, 1) == 0) { ivpu_hw_diagnose_failure(vdev); ivpu_hw_irq_disable(vdev); /* Disable IRQ early to protect from IRQ storm */ - queue_work(system_unbound_wq, &vdev->pm->recovery_work); + queue_work(system_dfl_wq, &vdev->pm->recovery_work); } } @@ -226,7 +226,8 @@ void ivpu_start_job_timeout_detection(struct ivpu_device *vdev) unsigned long timeout_ms = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr; /* No-op if already queued */ - queue_delayed_work(system_wq, &vdev->pm->job_timeout_work, msecs_to_jiffies(timeout_ms)); + queue_delayed_work(system_percpu_wq, &vdev->pm->job_timeout_work, + msecs_to_jiffies(timeout_ms)); } void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev) @@ -359,7 +360,6 @@ int ivpu_rpm_get(struct ivpu_device *vdev) void ivpu_rpm_put(struct ivpu_device *vdev) { - pm_runtime_mark_last_busy(vdev->drm.dev); pm_runtime_put_autosuspend(vdev->drm.dev); } @@ -428,7 +428,6 @@ void ivpu_pm_enable(struct ivpu_device *vdev) struct device *dev = vdev->drm.dev; pm_runtime_allow(dev); - pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); } @@ -502,6 +501,11 @@ void ivpu_pm_irq_dct_work_fn(struct work_struct *work) else ret = ivpu_pm_dct_disable(vdev); - if (!ret) - ivpu_hw_btrs_dct_set_status(vdev, enable, vdev->pm->dct_active_percent); + if (!ret) { + /* Convert percent to U1.7 format */ + u8 val = DIV_ROUND_CLOSEST(vdev->pm->dct_active_percent * 128, 100); + + ivpu_hw_btrs_dct_set_status(vdev, enable, val); + } + } diff --git a/drivers/accel/ivpu/ivpu_sysfs.c b/drivers/accel/ivpu/ivpu_sysfs.c index 268ab7744a8b..d250a10caca9 100644 --- a/drivers/accel/ivpu/ivpu_sysfs.c +++ b/drivers/accel/ivpu/ivpu_sysfs.c @@ -63,7 +63,8 @@ npu_memory_utilization_show(struct device *dev, struct device_attribute *attr, c mutex_lock(&vdev->bo_list_lock); list_for_each_entry(bo, &vdev->bo_list, bo_list_node) - total_npu_memory += bo->base.base.size; + if (ivpu_bo_is_resident(bo)) + total_npu_memory += ivpu_bo_size(bo); mutex_unlock(&vdev->bo_list_lock); return sysfs_emit(buf, "%lld\n", total_npu_memory); diff --git a/drivers/accel/ivpu/vpu_jsm_api.h b/drivers/accel/ivpu/vpu_jsm_api.h index 4b6b2b3d2583..bca6a44dc041 100644 --- a/drivers/accel/ivpu/vpu_jsm_api.h +++ b/drivers/accel/ivpu/vpu_jsm_api.h @@ -1,15 +1,16 @@ /* SPDX-License-Identifier: MIT */ /* - * Copyright (c) 2020-2024, Intel Corporation. + * Copyright (c) 2020-2025, Intel Corporation. + */ + +/** + * @addtogroup Jsm + * @{ */ /** * @file * @brief JSM shared definitions - * - * @ingroup Jsm - * @brief JSM shared definitions - * @{ */ #ifndef VPU_JSM_API_H #define VPU_JSM_API_H @@ -22,7 +23,7 @@ /* * Minor version changes when API backward compatibility is preserved. */ -#define VPU_JSM_API_VER_MINOR 29 +#define VPU_JSM_API_VER_MINOR 33 /* * API header changed (field names, documentation, formatting) but API itself has not been changed @@ -71,9 +72,15 @@ #define VPU_JSM_STATUS_MVNCI_OUT_OF_RESOURCES 0xAU #define VPU_JSM_STATUS_MVNCI_NOT_IMPLEMENTED 0xBU #define VPU_JSM_STATUS_MVNCI_INTERNAL_ERROR 0xCU -/* Job status returned when the job was preempted mid-inference */ +/* @deprecated (use VPU_JSM_STATUS_PREEMPTED_MID_COMMAND instead) */ #define VPU_JSM_STATUS_PREEMPTED_MID_INFERENCE 0xDU +/* Job status returned when the job was preempted mid-command */ +#define VPU_JSM_STATUS_PREEMPTED_MID_COMMAND 0xDU +/* Range of status codes that require engine reset */ +#define VPU_JSM_STATUS_ENGINE_RESET_REQUIRED_MIN 0xEU #define VPU_JSM_STATUS_MVNCI_CONTEXT_VIOLATION_HW 0xEU +#define VPU_JSM_STATUS_MVNCI_PREEMPTION_TIMED_OUT 0xFU +#define VPU_JSM_STATUS_ENGINE_RESET_REQUIRED_MAX 0x1FU /* * Host <-> VPU IPC channels. @@ -134,11 +141,21 @@ enum { * 2. Native fence queues are only supported on VPU 40xx onwards. */ VPU_JOB_QUEUE_FLAGS_USE_NATIVE_FENCE_MASK = (1 << 1U), - /* * Enable turbo mode for testing NPU performance; not recommended for regular usage. */ - VPU_JOB_QUEUE_FLAGS_TURBO_MODE = (1 << 2U) + VPU_JOB_QUEUE_FLAGS_TURBO_MODE = (1 << 2U), + /* + * Queue error detection mode flag + * For 'interactive' queues (this bit not set), the FW will identify queues that have not + * completed a job inside the TDR timeout as in error as part of engine reset sequence. + * For 'non-interactive' queues (this bit set), the FW will identify queues that have not + * progressed the heartbeat inside the non-interactive no-progress timeout as in error as + * part of engine reset sequence. Additionally, there is an upper limit applied to these + * queues: even if they progress the heartbeat, if they run longer than non-interactive + * timeout, then the FW will also identify them as in error. + */ + VPU_JOB_QUEUE_FLAGS_NON_INTERACTIVE = (1 << 3U) }; /* @@ -209,7 +226,7 @@ enum { */ #define VPU_INLINE_CMD_TYPE_FENCE_SIGNAL 0x2 -/* +/** * Job scheduling priority bands for both hardware scheduling and OS scheduling. */ enum vpu_job_scheduling_priority_band { @@ -220,16 +237,16 @@ enum vpu_job_scheduling_priority_band { VPU_JOB_SCHEDULING_PRIORITY_BAND_COUNT = 4, }; -/* +/** * Job format. * Jobs defines the actual workloads to be executed by a given engine. */ struct vpu_job_queue_entry { - /**< Address of VPU commands batch buffer */ + /** Address of VPU commands batch buffer */ u64 batch_buf_addr; - /**< Job ID */ + /** Job ID */ u32 job_id; - /**< Flags bit field, see VPU_JOB_FLAGS_* above */ + /** Flags bit field, see VPU_JOB_FLAGS_* above */ u32 flags; /** * Doorbell ring timestamp taken by KMD from SoC's global system clock, in @@ -237,20 +254,20 @@ struct vpu_job_queue_entry { * to match other profiling timestamps. */ u64 doorbell_timestamp; - /**< Extra id for job tracking, used only in the firmware perf traces */ + /** Extra id for job tracking, used only in the firmware perf traces */ u64 host_tracking_id; - /**< Address of the primary preemption buffer to use for this job */ + /** Address of the primary preemption buffer to use for this job */ u64 primary_preempt_buf_addr; - /**< Size of the primary preemption buffer to use for this job */ + /** Size of the primary preemption buffer to use for this job */ u32 primary_preempt_buf_size; - /**< Size of secondary preemption buffer to use for this job */ + /** Size of secondary preemption buffer to use for this job */ u32 secondary_preempt_buf_size; - /**< Address of secondary preemption buffer to use for this job */ + /** Address of secondary preemption buffer to use for this job */ u64 secondary_preempt_buf_addr; u64 reserved_0; }; -/* +/** * Inline command format. * Inline commands are the commands executed at scheduler level (typically, * synchronization directives). Inline command and job objects must be of @@ -258,34 +275,36 @@ struct vpu_job_queue_entry { */ struct vpu_inline_cmd { u64 reserved_0; - /* Inline command type, see VPU_INLINE_CMD_TYPE_* defines. */ + /** Inline command type, see VPU_INLINE_CMD_TYPE_* defines. */ u32 type; - /* Flags bit field, see VPU_JOB_FLAGS_* above. */ + /** Flags bit field, see VPU_JOB_FLAGS_* above. */ u32 flags; - /* Inline command payload. Depends on inline command type. */ - union { - /* Fence (wait and signal) commands' payload. */ - struct { - /* Fence object handle. */ + /** Inline command payload. Depends on inline command type. */ + union payload { + /** Fence (wait and signal) commands' payload. */ + struct fence { + /** Fence object handle. */ u64 fence_handle; - /* User VA of the current fence value. */ + /** User VA of the current fence value. */ u64 current_value_va; - /* User VA of the monitored fence value (read-only). */ + /** User VA of the monitored fence value (read-only). */ u64 monitored_value_va; - /* Value to wait for or write in fence location. */ + /** Value to wait for or write in fence location. */ u64 value; - /* User VA of the log buffer in which to add log entry on completion. */ + /** User VA of the log buffer in which to add log entry on completion. */ u64 log_buffer_va; - /* NPU private data. */ + /** NPU private data. */ u64 npu_private_data; } fence; - /* Other commands do not have a payload. */ - /* Payload definition for future inline commands can be inserted here. */ + /** + * Other commands do not have a payload: + * Payload definition for future inline commands can be inserted here. + */ u64 reserved_1[6]; } payload; }; -/* +/** * Job queue slots can be populated either with job objects or inline command objects. */ union vpu_jobq_slot { @@ -293,7 +312,7 @@ union vpu_jobq_slot { struct vpu_inline_cmd inline_cmd; }; -/* +/** * Job queue control registers. */ struct vpu_job_queue_header { @@ -301,18 +320,18 @@ struct vpu_job_queue_header { u32 head; u32 tail; u32 flags; - /* Set to 1 to indicate priority_band field is valid */ + /** Set to 1 to indicate priority_band field is valid */ u32 priority_band_valid; - /* + /** * Priority for the work of this job queue, valid only if the HWS is NOT used - * and the `priority_band_valid` is set to 1. It is applied only during - * the VPU_JSM_MSG_REGISTER_DB message processing. - * The device firmware might use the `priority_band` to optimize the power + * and the @ref priority_band_valid is set to 1. It is applied only during + * the @ref VPU_JSM_MSG_REGISTER_DB message processing. + * The device firmware might use the priority_band to optimize the power * management logic, but it will not affect the order of jobs. * Available priority bands: @see enum vpu_job_scheduling_priority_band */ u32 priority_band; - /* Inside realtime band assigns a further priority, limited to 0..31 range */ + /** Inside realtime band assigns a further priority, limited to 0..31 range */ u32 realtime_priority_level; u32 reserved_0[9]; }; @@ -337,16 +356,16 @@ enum vpu_trace_entity_type { VPU_TRACE_ENTITY_TYPE_HW_COMPONENT = 2, }; -/* +/** * HWS specific log buffer header details. * Total size is 32 bytes. */ struct vpu_hws_log_buffer_header { - /* Written by VPU after adding a log entry. Initialised by host to 0. */ + /** Written by VPU after adding a log entry. Initialised by host to 0. */ u32 first_free_entry_index; - /* Incremented by VPU every time the VPU writes the 0th entry; initialised by host to 0. */ + /** Incremented by VPU every time the VPU writes the 0th entry; initialised by host to 0. */ u32 wraparound_count; - /* + /** * This is the number of buffers that can be stored in the log buffer provided by the host. * It is written by host before passing buffer to VPU. VPU should consider it read-only. */ @@ -354,14 +373,14 @@ struct vpu_hws_log_buffer_header { u64 reserved[2]; }; -/* +/** * HWS specific log buffer entry details. * Total size is 32 bytes. */ struct vpu_hws_log_buffer_entry { - /* VPU timestamp must be an invariant timer tick (not impacted by DVFS) */ + /** VPU timestamp must be an invariant timer tick (not impacted by DVFS) */ u64 vpu_timestamp; - /* + /** * Operation type: * 0 - context state change * 1 - queue new work @@ -371,7 +390,7 @@ struct vpu_hws_log_buffer_entry { */ u32 operation_type; u32 reserved; - /* Operation data depends on operation type */ + /** Operation data depends on operation type */ u64 operation_data[2]; }; @@ -381,51 +400,54 @@ enum vpu_hws_native_fence_log_type { VPU_HWS_NATIVE_FENCE_LOG_TYPE_SIGNALS = 2 }; -/* HWS native fence log buffer header. */ +/** HWS native fence log buffer header. */ struct vpu_hws_native_fence_log_header { union { struct { - /* Index of the first free entry in buffer. */ + /** Index of the first free entry in buffer. */ u32 first_free_entry_idx; - /* Incremented each time NPU wraps around the buffer to write next entry. */ + /** + * Incremented whenever the NPU wraps around the buffer and writes + * to the first entry again. + */ u32 wraparound_count; }; - /* Field allowing atomic update of both fields above. */ + /** Field allowing atomic update of both fields above. */ u64 atomic_wraparound_and_entry_idx; }; - /* Log buffer type, see enum vpu_hws_native_fence_log_type. */ + /** Log buffer type, see enum vpu_hws_native_fence_log_type. */ u64 type; - /* Allocated number of entries in the log buffer. */ + /** Allocated number of entries in the log buffer. */ u64 entry_nb; u64 reserved[2]; }; -/* Native fence log operation types. */ +/** Native fence log operation types. */ enum vpu_hws_native_fence_log_op { VPU_HWS_NATIVE_FENCE_LOG_OP_SIGNAL_EXECUTED = 0, VPU_HWS_NATIVE_FENCE_LOG_OP_WAIT_UNBLOCKED = 1 }; -/* HWS native fence log entry. */ +/** HWS native fence log entry. */ struct vpu_hws_native_fence_log_entry { - /* Newly signaled/unblocked fence value. */ + /** Newly signaled/unblocked fence value. */ u64 fence_value; - /* Native fence object handle to which this operation belongs. */ + /** Native fence object handle to which this operation belongs. */ u64 fence_handle; - /* Operation type, see enum vpu_hws_native_fence_log_op. */ + /** Operation type, see enum vpu_hws_native_fence_log_op. */ u64 op_type; u64 reserved_0; - /* + /** * VPU_HWS_NATIVE_FENCE_LOG_OP_WAIT_UNBLOCKED only: Timestamp at which fence * wait was started (in NPU SysTime). */ u64 fence_wait_start_ts; u64 reserved_1; - /* Timestamp at which fence operation was completed (in NPU SysTime). */ + /** Timestamp at which fence operation was completed (in NPU SysTime). */ u64 fence_end_ts; }; -/* Native fence log buffer. */ +/** Native fence log buffer. */ struct vpu_hws_native_fence_log_buffer { struct vpu_hws_native_fence_log_header header; struct vpu_hws_native_fence_log_entry entry[]; @@ -435,10 +457,17 @@ struct vpu_hws_native_fence_log_buffer { * Host <-> VPU IPC messages types. */ enum vpu_ipc_msg_type { + /** Unsupported command */ VPU_JSM_MSG_UNKNOWN = 0xFFFFFFFF, - /* IPC Host -> Device, Async commands */ + /** IPC Host -> Device, base id for async commands */ VPU_JSM_MSG_ASYNC_CMD = 0x1100, + /** + * Reset engine. The NPU cancels all the jobs currently executing on the target + * engine making the engine become idle and then does a HW reset, before returning + * to the host. + * @see struct vpu_ipc_msg_payload_engine_reset + */ VPU_JSM_MSG_ENGINE_RESET = VPU_JSM_MSG_ASYNC_CMD, /** * Preempt engine. The NPU stops (preempts) all the jobs currently @@ -448,10 +477,24 @@ enum vpu_ipc_msg_type { * the target engine, but it stops processing them (until the queue doorbell * is rung again); the host is responsible to reset the job queue, either * after preemption or when resubmitting jobs to the queue. + * @see vpu_ipc_msg_payload_engine_preempt */ VPU_JSM_MSG_ENGINE_PREEMPT = 0x1101, + /** + * OS scheduling doorbell register command + * @see vpu_ipc_msg_payload_register_db + */ VPU_JSM_MSG_REGISTER_DB = 0x1102, + /** + * OS scheduling doorbell unregister command + * @see vpu_ipc_msg_payload_unregister_db + */ VPU_JSM_MSG_UNREGISTER_DB = 0x1103, + /** + * Query engine heartbeat. Heartbeat is expected to increase monotonically + * and increase while work is being progressed by NPU. + * @see vpu_ipc_msg_payload_query_engine_hb + */ VPU_JSM_MSG_QUERY_ENGINE_HB = 0x1104, VPU_JSM_MSG_GET_POWER_LEVEL_COUNT = 0x1105, VPU_JSM_MSG_GET_POWER_LEVEL = 0x1106, @@ -477,6 +520,7 @@ enum vpu_ipc_msg_type { * aborted and removed from internal scheduling queues. All doorbells assigned * to the host_ssid are unregistered and any internal FW resources belonging to * the host_ssid are released. + * @see vpu_ipc_msg_payload_ssid_release */ VPU_JSM_MSG_SSID_RELEASE = 0x110e, /** @@ -504,43 +548,78 @@ enum vpu_ipc_msg_type { * @see vpu_jsm_metric_streamer_start */ VPU_JSM_MSG_METRIC_STREAMER_INFO = 0x1112, - /** Control command: Priority band setup */ + /** + * Control command: Priority band setup + * @see vpu_ipc_msg_payload_hws_priority_band_setup + */ VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP = 0x1113, - /** Control command: Create command queue */ + /** + * Control command: Create command queue + * @see vpu_ipc_msg_payload_hws_create_cmdq + */ VPU_JSM_MSG_CREATE_CMD_QUEUE = 0x1114, - /** Control command: Destroy command queue */ + /** + * Control command: Destroy command queue + * @see vpu_ipc_msg_payload_hws_destroy_cmdq + */ VPU_JSM_MSG_DESTROY_CMD_QUEUE = 0x1115, - /** Control command: Set context scheduling properties */ + /** + * Control command: Set context scheduling properties + * @see vpu_ipc_msg_payload_hws_set_context_sched_properties + */ VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES = 0x1116, - /* + /** * Register a doorbell to notify VPU of new work. The doorbell may later be * deallocated or reassigned to another context. + * @see vpu_jsm_hws_register_db */ VPU_JSM_MSG_HWS_REGISTER_DB = 0x1117, - /** Control command: Log buffer setting */ + /** + * Control command: Log buffer setting + * @see vpu_ipc_msg_payload_hws_set_scheduling_log + */ VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG = 0x1118, - /* Control command: Suspend command queue. */ + /** + * Control command: Suspend command queue. + * @see vpu_ipc_msg_payload_hws_suspend_cmdq + */ VPU_JSM_MSG_HWS_SUSPEND_CMDQ = 0x1119, - /* Control command: Resume command queue */ + /** + * Control command: Resume command queue + * @see vpu_ipc_msg_payload_hws_resume_cmdq + */ VPU_JSM_MSG_HWS_RESUME_CMDQ = 0x111a, - /* Control command: Resume engine after reset */ + /** + * Control command: Resume engine after reset + * @see vpu_ipc_msg_payload_hws_resume_engine + */ VPU_JSM_MSG_HWS_ENGINE_RESUME = 0x111b, - /* Control command: Enable survivability/DCT mode */ + /** + * Control command: Enable survivability/DCT mode + * @see vpu_ipc_msg_payload_pwr_dct_control + */ VPU_JSM_MSG_DCT_ENABLE = 0x111c, - /* Control command: Disable survivability/DCT mode */ + /** + * Control command: Disable survivability/DCT mode + * This command has no payload + */ VPU_JSM_MSG_DCT_DISABLE = 0x111d, /** * Dump VPU state. To be used for debug purposes only. - * NOTE: Please introduce new ASYNC commands before this one. * + * This command has no payload. + * NOTE: Please introduce new ASYNC commands before this one. */ VPU_JSM_MSG_STATE_DUMP = 0x11FF, - /* IPC Host -> Device, General commands */ + /** IPC Host -> Device, base id for general commands */ VPU_JSM_MSG_GENERAL_CMD = 0x1200, + /** Unsupported command */ VPU_JSM_MSG_BLOB_DEINIT_DEPRECATED = VPU_JSM_MSG_GENERAL_CMD, /** * Control dyndbg behavior by executing a dyndbg command; equivalent to - * Linux command: `echo '<dyndbg_cmd>' > <debugfs>/dynamic_debug/control`. + * Linux command: + * @verbatim echo '<dyndbg_cmd>' > <debugfs>/dynamic_debug/control @endverbatim + * @see vpu_ipc_msg_payload_dyndbg_control */ VPU_JSM_MSG_DYNDBG_CONTROL = 0x1201, /** @@ -548,17 +627,35 @@ enum vpu_ipc_msg_type { */ VPU_JSM_MSG_PWR_D0I3_ENTER = 0x1202, - /* IPC Device -> Host, Job completion */ + /** + * IPC Device -> Host, Job completion + * @see struct vpu_ipc_msg_payload_job_done + */ VPU_JSM_MSG_JOB_DONE = 0x2100, - /* IPC Device -> Host, Fence signalled */ + /** + * IPC Device -> Host, Fence signalled + * @see vpu_ipc_msg_payload_native_fence_signalled + */ VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED = 0x2101, /* IPC Device -> Host, Async command completion */ VPU_JSM_MSG_ASYNC_CMD_DONE = 0x2200, + /** + * IPC Device -> Host, engine reset complete + * @see vpu_ipc_msg_payload_engine_reset_done + */ VPU_JSM_MSG_ENGINE_RESET_DONE = VPU_JSM_MSG_ASYNC_CMD_DONE, + /** + * Preempt complete message + * @see vpu_ipc_msg_payload_engine_preempt_done + */ VPU_JSM_MSG_ENGINE_PREEMPT_DONE = 0x2201, VPU_JSM_MSG_REGISTER_DB_DONE = 0x2202, VPU_JSM_MSG_UNREGISTER_DB_DONE = 0x2203, + /** + * Response to query engine heartbeat. + * @see vpu_ipc_msg_payload_query_engine_hb_done + */ VPU_JSM_MSG_QUERY_ENGINE_HB_DONE = 0x2204, VPU_JSM_MSG_GET_POWER_LEVEL_COUNT_DONE = 0x2205, VPU_JSM_MSG_GET_POWER_LEVEL_DONE = 0x2206, @@ -575,7 +672,10 @@ enum vpu_ipc_msg_type { VPU_JSM_MSG_TRACE_GET_CAPABILITY_RSP = 0x220c, /** Response to VPU_JSM_MSG_TRACE_GET_NAME. */ VPU_JSM_MSG_TRACE_GET_NAME_RSP = 0x220d, - /** Response to VPU_JSM_MSG_SSID_RELEASE. */ + /** + * Response to VPU_JSM_MSG_SSID_RELEASE. + * @see vpu_ipc_msg_payload_ssid_release + */ VPU_JSM_MSG_SSID_RELEASE_DONE = 0x220e, /** * Response to VPU_JSM_MSG_METRIC_STREAMER_START. @@ -605,37 +705,71 @@ enum vpu_ipc_msg_type { /** * Asynchronous event sent from the VPU to the host either when the current * metric buffer is full or when the VPU has collected a multiple of - * @notify_sample_count samples as indicated through the start command - * (VPU_JSM_MSG_METRIC_STREAMER_START). Returns information about collected - * metric data. + * @ref vpu_jsm_metric_streamer_start::notify_sample_count samples as indicated + * through the start command (VPU_JSM_MSG_METRIC_STREAMER_START). Returns + * information about collected metric data. * @see vpu_jsm_metric_streamer_done */ VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION = 0x2213, - /** Response to control command: Priority band setup */ + /** + * Response to control command: Priority band setup + * @see vpu_ipc_msg_payload_hws_priority_band_setup + */ VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP_RSP = 0x2214, - /** Response to control command: Create command queue */ + /** + * Response to control command: Create command queue + * @see vpu_ipc_msg_payload_hws_create_cmdq_rsp + */ VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP = 0x2215, - /** Response to control command: Destroy command queue */ + /** + * Response to control command: Destroy command queue + * @see vpu_ipc_msg_payload_hws_destroy_cmdq + */ VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP = 0x2216, - /** Response to control command: Set context scheduling properties */ + /** + * Response to control command: Set context scheduling properties + * @see vpu_ipc_msg_payload_hws_set_context_sched_properties + */ VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP = 0x2217, - /** Response to control command: Log buffer setting */ + /** + * Response to control command: Log buffer setting + * @see vpu_ipc_msg_payload_hws_set_scheduling_log + */ VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG_RSP = 0x2218, - /* IPC Device -> Host, HWS notify index entry of log buffer written */ + /** + * IPC Device -> Host, HWS notify index entry of log buffer written + * @see vpu_ipc_msg_payload_hws_scheduling_log_notification + */ VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION = 0x2219, - /* IPC Device -> Host, HWS completion of a context suspend request */ + /** + * IPC Device -> Host, HWS completion of a context suspend request + * @see vpu_ipc_msg_payload_hws_suspend_cmdq + */ VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE = 0x221a, - /* Response to control command: Resume command queue */ + /** + * Response to control command: Resume command queue + * @see vpu_ipc_msg_payload_hws_resume_cmdq + */ VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP = 0x221b, - /* Response to control command: Resume engine command response */ + /** + * Response to control command: Resume engine command response + * @see vpu_ipc_msg_payload_hws_resume_engine + */ VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE = 0x221c, - /* Response to control command: Enable survivability/DCT mode */ + /** + * Response to control command: Enable survivability/DCT mode + * This command has no payload + */ VPU_JSM_MSG_DCT_ENABLE_DONE = 0x221d, - /* Response to control command: Disable survivability/DCT mode */ + /** + * Response to control command: Disable survivability/DCT mode + * This command has no payload + */ VPU_JSM_MSG_DCT_DISABLE_DONE = 0x221e, /** * Response to state dump control command. - * NOTE: Please introduce new ASYNC responses before this one. * + * This command has no payload. + * NOTE: Please introduce new ASYNC responses before this one. */ VPU_JSM_MSG_STATE_DUMP_RSP = 0x22FF, @@ -653,57 +787,66 @@ enum vpu_ipc_msg_type { enum vpu_ipc_msg_status { VPU_JSM_MSG_FREE, VPU_JSM_MSG_ALLOCATED }; -/* - * Host <-> LRT IPC message payload definitions +/** + * Engine reset request payload + * @see VPU_JSM_MSG_ENGINE_RESET */ struct vpu_ipc_msg_payload_engine_reset { - /* Engine to be reset. */ + /** Engine to be reset. */ u32 engine_idx; - /* Reserved */ + /** Reserved */ u32 reserved_0; }; +/** + * Engine preemption request struct + * @see VPU_JSM_MSG_ENGINE_PREEMPT + */ struct vpu_ipc_msg_payload_engine_preempt { - /* Engine to be preempted. */ + /** Engine to be preempted. */ u32 engine_idx; - /* ID of the preemption request. */ + /** ID of the preemption request. */ u32 preempt_id; }; -/* - * @brief Register doorbell command structure. +/** + * Register doorbell command structure. * This structure supports doorbell registration for only OS scheduling. * @see VPU_JSM_MSG_REGISTER_DB */ struct vpu_ipc_msg_payload_register_db { - /* Index of the doorbell to register. */ + /** Index of the doorbell to register. */ u32 db_idx; - /* Reserved */ + /** Reserved */ u32 reserved_0; - /* Virtual address in Global GTT pointing to the start of job queue. */ + /** Virtual address in Global GTT pointing to the start of job queue. */ u64 jobq_base; - /* Size of the job queue in bytes. */ + /** Size of the job queue in bytes. */ u32 jobq_size; - /* Host sub-stream ID for the context assigned to the doorbell. */ + /** Host sub-stream ID for the context assigned to the doorbell. */ u32 host_ssid; }; /** - * @brief Unregister doorbell command structure. + * Unregister doorbell command structure. * Request structure to unregister a doorbell for both HW and OS scheduling. * @see VPU_JSM_MSG_UNREGISTER_DB */ struct vpu_ipc_msg_payload_unregister_db { - /* Index of the doorbell to unregister. */ + /** Index of the doorbell to unregister. */ u32 db_idx; - /* Reserved */ + /** Reserved */ u32 reserved_0; }; +/** + * Heartbeat request structure + * @see VPU_JSM_MSG_QUERY_ENGINE_HB + */ struct vpu_ipc_msg_payload_query_engine_hb { - /* Engine to return heartbeat value. */ + /** Engine to return heartbeat value. */ u32 engine_idx; - /* Reserved */ + /** Reserved */ u32 reserved_0; }; @@ -723,10 +866,14 @@ struct vpu_ipc_msg_payload_power_level { u32 reserved_0; }; +/** + * Structure for requesting ssid release + * @see VPU_JSM_MSG_SSID_RELEASE + */ struct vpu_ipc_msg_payload_ssid_release { - /* Host sub-stream ID for the context to be released. */ + /** Host sub-stream ID for the context to be released. */ u32 host_ssid; - /* Reserved */ + /** Reserved */ u32 reserved_0; }; @@ -752,7 +899,7 @@ struct vpu_jsm_metric_streamer_start { u64 sampling_rate; /** * If > 0 the VPU will send a VPU_JSM_MSG_METRIC_STREAMER_NOTIFICATION message - * after every @notify_sample_count samples is collected or dropped by the VPU. + * after every @ref notify_sample_count samples is collected or dropped by the VPU. * If set to UINT_MAX the VPU will only generate a notification when the metric * buffer is full. If set to 0 the VPU will never generate a notification. */ @@ -762,9 +909,9 @@ struct vpu_jsm_metric_streamer_start { * Address and size of the buffer where the VPU will write metric data. The * VPU writes all counters from enabled metric groups one after another. If * there is no space left to write data at the next sample period the VPU - * will switch to the next buffer (@see next_buffer_addr) and will optionally - * send a notification to the host driver if @notify_sample_count is non-zero. - * If @next_buffer_addr is NULL the VPU will stop collecting metric data. + * will switch to the next buffer (@ref next_buffer_addr) and will optionally + * send a notification to the host driver if @ref notify_sample_count is non-zero. + * If @ref next_buffer_addr is NULL the VPU will stop collecting metric data. */ u64 buffer_addr; u64 buffer_size; @@ -827,63 +974,80 @@ struct vpu_jsm_metric_streamer_update { u64 next_buffer_size; }; +/** + * Device -> host job completion message. + * @see VPU_JSM_MSG_JOB_DONE + */ struct vpu_ipc_msg_payload_job_done { - /* Engine to which the job was submitted. */ + /** Engine to which the job was submitted. */ u32 engine_idx; - /* Index of the doorbell to which the job was submitted */ + /** Index of the doorbell to which the job was submitted */ u32 db_idx; - /* ID of the completed job */ + /** ID of the completed job */ u32 job_id; - /* Status of the completed job */ + /** Status of the completed job */ u32 job_status; - /* Host SSID */ + /** Host SSID */ u32 host_ssid; - /* Zero Padding */ + /** Zero Padding */ u32 reserved_0; - /* Command queue id */ + /** Command queue id */ u64 cmdq_id; }; -/* +/** * Notification message upon native fence signalling. * @see VPU_JSM_MSG_NATIVE_FENCE_SIGNALLED */ struct vpu_ipc_msg_payload_native_fence_signalled { - /* Engine ID. */ + /** Engine ID. */ u32 engine_idx; - /* Host SSID. */ + /** Host SSID. */ u32 host_ssid; - /* CMDQ ID */ + /** CMDQ ID */ u64 cmdq_id; - /* Fence object handle. */ + /** Fence object handle. */ u64 fence_handle; }; +/** + * vpu_ipc_msg_payload_engine_reset_done will contain an array of this structure + * which contains which queues caused reset if FW was able to detect any error. + * @see vpu_ipc_msg_payload_engine_reset_done + */ struct vpu_jsm_engine_reset_context { - /* Host SSID */ + /** Host SSID */ u32 host_ssid; - /* Zero Padding */ + /** Zero Padding */ u32 reserved_0; - /* Command queue id */ + /** Command queue id */ u64 cmdq_id; - /* See VPU_ENGINE_RESET_CONTEXT_* defines */ + /** See VPU_ENGINE_RESET_CONTEXT_* defines */ u64 flags; }; +/** + * Engine reset response. + * @see VPU_JSM_MSG_ENGINE_RESET_DONE + */ struct vpu_ipc_msg_payload_engine_reset_done { - /* Engine ordinal */ + /** Engine ordinal */ u32 engine_idx; - /* Number of impacted contexts */ + /** Number of impacted contexts */ u32 num_impacted_contexts; - /* Array of impacted command queue ids and their flags */ + /** Array of impacted command queue ids and their flags */ struct vpu_jsm_engine_reset_context impacted_contexts[VPU_MAX_ENGINE_RESET_IMPACTED_CONTEXTS]; }; +/** + * Preemption response struct + * @see VPU_JSM_MSG_ENGINE_PREEMPT_DONE + */ struct vpu_ipc_msg_payload_engine_preempt_done { - /* Engine preempted. */ + /** Engine preempted. */ u32 engine_idx; - /* ID of the preemption request. */ + /** ID of the preemption request. */ u32 preempt_id; }; @@ -912,12 +1076,16 @@ struct vpu_ipc_msg_payload_unregister_db_done { u32 reserved_0; }; +/** + * Structure for heartbeat response + * @see VPU_JSM_MSG_QUERY_ENGINE_HB_DONE + */ struct vpu_ipc_msg_payload_query_engine_hb_done { - /* Engine returning heartbeat value. */ + /** Engine returning heartbeat value. */ u32 engine_idx; - /* Reserved */ + /** Reserved */ u32 reserved_0; - /* Heartbeat value. */ + /** Heartbeat value. */ u64 heartbeat; }; @@ -937,7 +1105,10 @@ struct vpu_ipc_msg_payload_get_power_level_count_done { u8 power_limit[16]; }; -/* HWS priority band setup request / response */ +/** + * HWS priority band setup request / response + * @see VPU_JSM_MSG_SET_PRIORITY_BAND_SETUP + */ struct vpu_ipc_msg_payload_hws_priority_band_setup { /* * Grace period in 100ns units when preempting another priority band for @@ -964,15 +1135,23 @@ struct vpu_ipc_msg_payload_hws_priority_band_setup { * TDR timeout value in milliseconds. Default value of 0 meaning no timeout. */ u32 tdr_timeout; + /* Non-interactive queue timeout for no progress of heartbeat in milliseconds. + * Default value of 0 meaning no timeout. + */ + u32 non_interactive_no_progress_timeout; + /* + * Non-interactive queue upper limit timeout value in milliseconds. Default + * value of 0 meaning no timeout. + */ + u32 non_interactive_timeout; }; -/* +/** * @brief HWS create command queue request. * Host will create a command queue via this command. * Note: Cmdq group is a handle of an object which * may contain one or more command queues. * @see VPU_JSM_MSG_CREATE_CMD_QUEUE - * @see VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP */ struct vpu_ipc_msg_payload_hws_create_cmdq { /* Process id */ @@ -993,66 +1172,73 @@ struct vpu_ipc_msg_payload_hws_create_cmdq { u32 reserved_0; }; -/* - * @brief HWS create command queue response. - * @see VPU_JSM_MSG_CREATE_CMD_QUEUE +/** + * HWS create command queue response. * @see VPU_JSM_MSG_CREATE_CMD_QUEUE_RSP */ struct vpu_ipc_msg_payload_hws_create_cmdq_rsp { - /* Process id */ + /** Process id */ u64 process_id; - /* Host SSID */ + /** Host SSID */ u32 host_ssid; - /* Engine for which queue is being created */ + /** Engine for which queue is being created */ u32 engine_idx; - /* Command queue group */ + /** Command queue group */ u64 cmdq_group; - /* Command queue id */ + /** Command queue id */ u64 cmdq_id; }; -/* HWS destroy command queue request / response */ +/** + * HWS destroy command queue request / response + * @see VPU_JSM_MSG_DESTROY_CMD_QUEUE + * @see VPU_JSM_MSG_DESTROY_CMD_QUEUE_RSP + */ struct vpu_ipc_msg_payload_hws_destroy_cmdq { - /* Host SSID */ + /** Host SSID */ u32 host_ssid; - /* Zero Padding */ + /** Zero Padding */ u32 reserved; - /* Command queue id */ + /** Command queue id */ u64 cmdq_id; }; -/* HWS set context scheduling properties request / response */ +/** + * HWS set context scheduling properties request / response + * @see VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES + * @see VPU_JSM_MSG_SET_CONTEXT_SCHED_PROPERTIES_RSP + */ struct vpu_ipc_msg_payload_hws_set_context_sched_properties { - /* Host SSID */ + /** Host SSID */ u32 host_ssid; - /* Zero Padding */ + /** Zero Padding */ u32 reserved_0; - /* Command queue id */ + /** Command queue id */ u64 cmdq_id; - /* + /** * Priority band to assign to work of this context. * Available priority bands: @see enum vpu_job_scheduling_priority_band */ u32 priority_band; - /* Inside realtime band assigns a further priority */ + /** Inside realtime band assigns a further priority */ u32 realtime_priority_level; - /* Priority relative to other contexts in the same process */ + /** Priority relative to other contexts in the same process */ s32 in_process_priority; - /* Zero padding / Reserved */ + /** Zero padding / Reserved */ u32 reserved_1; - /* + /** * Context quantum relative to other contexts of same priority in the same process * Minimum value supported by NPU is 1ms (10000 in 100ns units). */ u64 context_quantum; - /* Grace period when preempting context of the same priority within the same process */ + /** Grace period when preempting context of the same priority within the same process */ u64 grace_period_same_priority; - /* Grace period when preempting context of a lower priority within the same process */ + /** Grace period when preempting context of a lower priority within the same process */ u64 grace_period_lower_priority; }; -/* - * @brief Register doorbell command structure. +/** + * Register doorbell command structure. * This structure supports doorbell registration for both HW and OS scheduling. * Note: Queue base and size are added here so that the same structure can be used for * OS scheduling and HW scheduling. For OS scheduling, cmdq_id will be ignored @@ -1061,27 +1247,27 @@ struct vpu_ipc_msg_payload_hws_set_context_sched_properties { * @see VPU_JSM_MSG_HWS_REGISTER_DB */ struct vpu_jsm_hws_register_db { - /* Index of the doorbell to register. */ + /** Index of the doorbell to register. */ u32 db_id; - /* Host sub-stream ID for the context assigned to the doorbell. */ + /** Host sub-stream ID for the context assigned to the doorbell. */ u32 host_ssid; - /* ID of the command queue associated with the doorbell. */ + /** ID of the command queue associated with the doorbell. */ u64 cmdq_id; - /* Virtual address pointing to the start of command queue. */ + /** Virtual address pointing to the start of command queue. */ u64 cmdq_base; - /* Size of the command queue in bytes. */ + /** Size of the command queue in bytes. */ u64 cmdq_size; }; -/* - * @brief Structure to set another buffer to be used for scheduling-related logging. +/** + * Structure to set another buffer to be used for scheduling-related logging. * The size of the logging buffer and the number of entries is defined as part of the * buffer itself as described next. * The log buffer received from the host is made up of; - * - header: 32 bytes in size, as shown in 'struct vpu_hws_log_buffer_header'. + * - header: 32 bytes in size, as shown in @ref vpu_hws_log_buffer_header. * The header contains the number of log entries in the buffer. * - log entry: 0 to n-1, each log entry is 32 bytes in size, as shown in - * 'struct vpu_hws_log_buffer_entry'. + * @ref vpu_hws_log_buffer_entry. * The entry contains the VPU timestamp, operation type and data. * The host should provide the notify index value of log buffer to VPU. This is a * value defined within the log buffer and when written to will generate the @@ -1095,30 +1281,30 @@ struct vpu_jsm_hws_register_db { * @see VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION */ struct vpu_ipc_msg_payload_hws_set_scheduling_log { - /* Engine ordinal */ + /** Engine ordinal */ u32 engine_idx; - /* Host SSID */ + /** Host SSID */ u32 host_ssid; - /* + /** * VPU log buffer virtual address. * Set to 0 to disable logging for this engine. */ u64 vpu_log_buffer_va; - /* + /** * Notify index of log buffer. VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION * is generated when an event log is written to this index. */ u64 notify_index; - /* + /** * Field is now deprecated, will be removed when KMD is updated to support removal */ u32 enable_extra_events; - /* Zero Padding */ + /** Zero Padding */ u32 reserved_0; }; -/* - * @brief The scheduling log notification is generated by VPU when it writes +/** + * The scheduling log notification is generated by VPU when it writes * an event into the log buffer at the notify_index. VPU notifies host with * VPU_JSM_MSG_HWS_SCHEDULING_LOG_NOTIFICATION. This is an asynchronous * message from VPU to host. @@ -1126,14 +1312,14 @@ struct vpu_ipc_msg_payload_hws_set_scheduling_log { * @see VPU_JSM_MSG_HWS_SET_SCHEDULING_LOG */ struct vpu_ipc_msg_payload_hws_scheduling_log_notification { - /* Engine ordinal */ + /** Engine ordinal */ u32 engine_idx; - /* Zero Padding */ + /** Zero Padding */ u32 reserved_0; }; -/* - * @brief HWS suspend command queue request and done structure. +/** + * HWS suspend command queue request and done structure. * Host will request the suspend of contexts and VPU will; * - Suspend all work on this context * - Preempt any running work @@ -1152,21 +1338,21 @@ struct vpu_ipc_msg_payload_hws_scheduling_log_notification { * @see VPU_JSM_MSG_HWS_SUSPEND_CMDQ_DONE */ struct vpu_ipc_msg_payload_hws_suspend_cmdq { - /* Host SSID */ + /** Host SSID */ u32 host_ssid; - /* Zero Padding */ + /** Zero Padding */ u32 reserved_0; - /* Command queue id */ + /** Command queue id */ u64 cmdq_id; - /* + /** * Suspend fence value - reported by the VPU suspend context * completed once suspend is complete. */ u64 suspend_fence_value; }; -/* - * @brief HWS Resume command queue request / response structure. +/** + * HWS Resume command queue request / response structure. * Host will request the resume of a context; * - VPU will resume all work on this context * - Scheduler will allow this context to be scheduled @@ -1174,25 +1360,25 @@ struct vpu_ipc_msg_payload_hws_suspend_cmdq { * @see VPU_JSM_MSG_HWS_RESUME_CMDQ_RSP */ struct vpu_ipc_msg_payload_hws_resume_cmdq { - /* Host SSID */ + /** Host SSID */ u32 host_ssid; - /* Zero Padding */ + /** Zero Padding */ u32 reserved_0; - /* Command queue id */ + /** Command queue id */ u64 cmdq_id; }; -/* - * @brief HWS Resume engine request / response structure. - * After a HWS engine reset, all scheduling is stopped on VPU until a engine resume. +/** + * HWS Resume engine request / response structure. + * After a HWS engine reset, all scheduling is stopped on VPU until an engine resume. * Host shall send this command to resume scheduling of any valid queue. - * @see VPU_JSM_MSG_HWS_RESUME_ENGINE + * @see VPU_JSM_MSG_HWS_ENGINE_RESUME * @see VPU_JSM_MSG_HWS_RESUME_ENGINE_DONE */ struct vpu_ipc_msg_payload_hws_resume_engine { - /* Engine to be resumed */ + /** Engine to be resumed */ u32 engine_idx; - /* Reserved */ + /** Reserved */ u32 reserved_0; }; @@ -1326,7 +1512,7 @@ struct vpu_jsm_metric_streamer_done { /** * Metric group description placed in the metric buffer after successful completion * of the VPU_JSM_MSG_METRIC_STREAMER_INFO command. This is followed by one or more - * @vpu_jsm_metric_counter_descriptor records. + * @ref vpu_jsm_metric_counter_descriptor records. * @see VPU_JSM_MSG_METRIC_STREAMER_INFO */ struct vpu_jsm_metric_group_descriptor { @@ -1413,29 +1599,24 @@ struct vpu_jsm_metric_counter_descriptor { }; /** - * Payload for VPU_JSM_MSG_DYNDBG_CONTROL requests. + * Payload for @ref VPU_JSM_MSG_DYNDBG_CONTROL requests. * - * VPU_JSM_MSG_DYNDBG_CONTROL are used to control the VPU FW Dynamic Debug - * feature, which allows developers to selectively enable / disable MVLOG_DEBUG - * messages. This is equivalent to the Dynamic Debug functionality provided by - * Linux - * (https://www.kernel.org/doc/html/latest/admin-guide/dynamic-debug-howto.html) - * The host can control Dynamic Debug behavior by sending dyndbg commands, which - * have the same syntax as Linux - * dyndbg commands. + * VPU_JSM_MSG_DYNDBG_CONTROL requests are used to control the VPU FW dynamic debug + * feature, which allows developers to selectively enable/disable code to obtain + * additional FW information. This is equivalent to the dynamic debug functionality + * provided by Linux. The host can control dynamic debug behavior by sending dyndbg + * commands, using the same syntax as for Linux dynamic debug commands. * - * NOTE: in order for MVLOG_DEBUG messages to be actually printed, the host - * still has to set the logging level to MVLOG_DEBUG, using the - * VPU_JSM_MSG_TRACE_SET_CONFIG command. + * @see https://www.kernel.org/doc/html/latest/admin-guide/dynamic-debug-howto.html. * - * The host can see the current dynamic debug configuration by executing a - * special 'show' command. The dyndbg configuration will be printed to the - * configured logging destination using MVLOG_INFO logging level. + * NOTE: + * As the dynamic debug feature uses MVLOG messages to provide information, the host + * must first set the logging level to MVLOG_DEBUG, using the @ref VPU_JSM_MSG_TRACE_SET_CONFIG + * command. */ struct vpu_ipc_msg_payload_dyndbg_control { /** - * Dyndbg command (same format as Linux dyndbg); must be a NULL-terminated - * string. + * Dyndbg command to be executed. */ char dyndbg_cmd[VPU_DYNDBG_CMD_MAX_LEN]; }; @@ -1456,7 +1637,7 @@ struct vpu_ipc_msg_payload_pwr_d0i3_enter { }; /** - * Payload for VPU_JSM_MSG_DCT_ENABLE message. + * Payload for @ref VPU_JSM_MSG_DCT_ENABLE message. * * Default values for DCT active/inactive times are 5.3ms and 30ms respectively, * corresponding to a 85% duty cycle. This payload allows the host to tune these @@ -1513,28 +1694,28 @@ union vpu_ipc_msg_payload { struct vpu_ipc_msg_payload_pwr_dct_control pwr_dct_control; }; -/* - * Host <-> LRT IPC message base structure. +/** + * Host <-> NPU IPC message base structure. * * NOTE: All instances of this object must be aligned on a 64B boundary * to allow proper handling of VPU cache operations. */ struct vpu_jsm_msg { - /* Reserved */ + /** Reserved */ u64 reserved_0; - /* Message type, see vpu_ipc_msg_type enum. */ + /** Message type, see @ref vpu_ipc_msg_type. */ u32 type; - /* Buffer status, see vpu_ipc_msg_status enum. */ + /** Buffer status, see @ref vpu_ipc_msg_status. */ u32 status; - /* + /** * Request ID, provided by the host in a request message and passed * back by VPU in the response message. */ u32 request_id; - /* Request return code set by the VPU, see VPU_JSM_STATUS_* defines. */ + /** Request return code set by the VPU, see VPU_JSM_STATUS_* defines. */ u32 result; u64 reserved_1; - /* Message payload depending on message type, see vpu_ipc_msg_payload union. */ + /** Message payload depending on message type, see vpu_ipc_msg_payload union. */ union vpu_ipc_msg_payload payload; }; |
