diff options
Diffstat (limited to 'drivers/gpu/drm/xe/xe_vram.c')
| -rw-r--r-- | drivers/gpu/drm/xe/xe_vram.c | 96 |
1 files changed, 37 insertions, 59 deletions
diff --git a/drivers/gpu/drm/xe/xe_vram.c b/drivers/gpu/drm/xe/xe_vram.c index 652df7a5f4f6..d50baefcd124 100644 --- a/drivers/gpu/drm/xe/xe_vram.c +++ b/drivers/gpu/drm/xe/xe_vram.c @@ -13,50 +13,25 @@ #include "regs/xe_gt_regs.h" #include "regs/xe_regs.h" #include "xe_assert.h" +#include "xe_bo.h" #include "xe_device.h" #include "xe_force_wake.h" #include "xe_gt_mcr.h" -#include "xe_gt_sriov_vf.h" #include "xe_mmio.h" #include "xe_module.h" #include "xe_sriov.h" +#include "xe_tile_sriov_vf.h" #include "xe_ttm_vram_mgr.h" #include "xe_vram.h" #include "xe_vram_types.h" -#define BAR_SIZE_SHIFT 20 - -/* - * Release all the BARs that could influence/block LMEMBAR resizing, i.e. - * assigned IORESOURCE_MEM_64 BARs - */ -static void release_bars(struct pci_dev *pdev) -{ - struct resource *res; - int i; - - pci_dev_for_each_resource(pdev, res, i) { - /* Resource already un-assigned, do not reset it */ - if (!res->parent) - continue; - - /* No need to release unrelated BARs */ - if (!(res->flags & IORESOURCE_MEM_64)) - continue; - - pci_release_resource(pdev, i); - } -} - static void resize_bar(struct xe_device *xe, int resno, resource_size_t size) { struct pci_dev *pdev = to_pci_dev(xe->drm.dev); int bar_size = pci_rebar_bytes_to_size(size); int ret; - release_bars(pdev); - - ret = pci_resize_resource(pdev, resno, bar_size); + ret = pci_resize_resource(pdev, resno, bar_size, 0); if (ret) { drm_info(&xe->drm, "Failed to resize BAR%d to %dM (%pe). Consider enabling 'Resizable BAR' support in your BIOS\n", resno, 1 << bar_size, ERR_PTR(ret)); @@ -78,41 +53,37 @@ void xe_vram_resize_bar(struct xe_device *xe) resource_size_t current_size; resource_size_t rebar_size; struct resource *root_res; - u32 bar_size_mask; + int max_size, i; u32 pci_cmd; - int i; /* gather some relevant info */ current_size = pci_resource_len(pdev, LMEM_BAR); - bar_size_mask = pci_rebar_get_possible_sizes(pdev, LMEM_BAR); - - if (!bar_size_mask) - return; if (force_vram_bar_size < 0) return; /* set to a specific size? */ if (force_vram_bar_size) { - u32 bar_size_bit; - - rebar_size = force_vram_bar_size * (resource_size_t)SZ_1M; + rebar_size = pci_rebar_bytes_to_size(force_vram_bar_size * + (resource_size_t)SZ_1M); - bar_size_bit = bar_size_mask & BIT(pci_rebar_bytes_to_size(rebar_size)); - - if (!bar_size_bit) { + if (!pci_rebar_size_supported(pdev, LMEM_BAR, rebar_size)) { drm_info(&xe->drm, - "Requested size: %lluMiB is not supported by rebar sizes: 0x%x. Leaving default: %lluMiB\n", - (u64)rebar_size >> 20, bar_size_mask, (u64)current_size >> 20); + "Requested size: %lluMiB is not supported by rebar sizes: 0x%llx. Leaving default: %lluMiB\n", + (u64)pci_rebar_size_to_bytes(rebar_size) >> 20, + pci_rebar_get_possible_sizes(pdev, LMEM_BAR), + (u64)current_size >> 20); return; } - rebar_size = 1ULL << (__fls(bar_size_bit) + BAR_SIZE_SHIFT); - + rebar_size = pci_rebar_size_to_bytes(rebar_size); if (rebar_size == current_size) return; } else { - rebar_size = 1ULL << (__fls(bar_size_mask) + BAR_SIZE_SHIFT); + max_size = pci_rebar_get_max_size(pdev, LMEM_BAR); + if (max_size < 0) + return; + rebar_size = pci_rebar_size_to_bytes(max_size); /* only resize if larger than current */ if (rebar_size <= current_size) @@ -182,12 +153,17 @@ static int determine_lmem_bar_size(struct xe_device *xe, struct xe_vram_region * return 0; } -static inline u64 get_flat_ccs_offset(struct xe_gt *gt, u64 tile_size) +static int get_flat_ccs_offset(struct xe_gt *gt, u64 tile_size, u64 *poffset) { struct xe_device *xe = gt_to_xe(gt); + unsigned int fw_ref; u64 offset; u32 reg; + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) + return -ETIMEDOUT; + if (GRAPHICS_VER(xe) >= 20) { u64 ccs_size = tile_size / 512; u64 offset_hi, offset_lo; @@ -217,7 +193,10 @@ static inline u64 get_flat_ccs_offset(struct xe_gt *gt, u64 tile_size) offset = (u64)REG_FIELD_GET(XEHP_FLAT_CCS_PTR, reg) * SZ_64K; } - return offset; + xe_force_wake_put(gt_to_fw(gt), fw_ref); + *poffset = offset; + + return 0; } /* @@ -244,7 +223,6 @@ static int tile_vram_size(struct xe_tile *tile, u64 *vram_size, { struct xe_device *xe = tile_to_xe(tile); struct xe_gt *gt = tile->primary_gt; - unsigned int fw_ref; u64 offset; u32 reg; @@ -255,32 +233,31 @@ static int tile_vram_size(struct xe_tile *tile, u64 *vram_size, offset = 0; for_each_tile(t, xe, id) for_each_if(t->id < tile->id) - offset += xe_gt_sriov_vf_lmem(t->primary_gt); + offset += xe_tile_sriov_vf_lmem(t); - *tile_size = xe_gt_sriov_vf_lmem(gt); + *tile_size = xe_tile_sriov_vf_lmem(tile); *vram_size = *tile_size; *tile_offset = offset; return 0; } - fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (!fw_ref) - return -ETIMEDOUT; - /* actual size */ if (unlikely(xe->info.platform == XE_DG1)) { *tile_size = pci_resource_len(to_pci_dev(xe->drm.dev), LMEM_BAR); *tile_offset = 0; } else { - reg = xe_gt_mcr_unicast_read_any(gt, XEHP_TILE_ADDR_RANGE(gt->info.id)); + reg = xe_mmio_read32(&tile->mmio, SG_TILE_ADDR_RANGE(tile->id)); *tile_size = (u64)REG_FIELD_GET(GENMASK(14, 8), reg) * SZ_1G; *tile_offset = (u64)REG_FIELD_GET(GENMASK(7, 1), reg) * SZ_1G; } /* minus device usage */ if (xe->info.has_flat_ccs) { - offset = get_flat_ccs_offset(gt, *tile_size); + int ret = get_flat_ccs_offset(gt, *tile_size, &offset); + + if (ret) + return ret; } else { offset = xe_mmio_read64_2x32(&tile->mmio, GSMBASE); } @@ -288,8 +265,6 @@ static int tile_vram_size(struct xe_tile *tile, u64 *vram_size, /* remove the tile offset so we have just the available size */ *vram_size = offset - *tile_offset; - xe_force_wake_put(gt_to_fw(gt), fw_ref); - return 0; } @@ -301,8 +276,11 @@ static void vram_fini(void *arg) xe->mem.vram->mapping = NULL; - for_each_tile(tile, xe, id) + for_each_tile(tile, xe, id) { tile->mem.vram->mapping = NULL; + if (tile->mem.kernel_vram) + tile->mem.kernel_vram->mapping = NULL; + } } struct xe_vram_region *xe_vram_region_alloc(struct xe_device *xe, u8 id, u32 placement) |
