diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/evergreen_cs.c')
-rw-r--r-- | drivers/gpu/drm/radeon/evergreen_cs.c | 523 |
1 files changed, 264 insertions, 259 deletions
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 266c57733136..1162cb5d75ed 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c @@ -951,13 +951,13 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p) u64 offset = (u64)track->vgt_strmout_bo_offset[i] + (u64)track->vgt_strmout_size[i]; if (offset > radeon_bo_size(track->vgt_strmout_bo[i])) { - DRM_ERROR("streamout %d bo too small: 0x%llx, 0x%lx\n", - i, offset, - radeon_bo_size(track->vgt_strmout_bo[i])); + dev_warn_once(p->dev, "streamout %d bo too small: 0x%llx, 0x%lx\n", + i, offset, + radeon_bo_size(track->vgt_strmout_bo[i])); return -EINVAL; } } else { - dev_warn(p->dev, "No buffer for streamout %d\n", i); + dev_warn_once(p->dev, "No buffer for streamout %d\n", i); return -EINVAL; } } @@ -979,8 +979,8 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p) (tmp >> (i * 4)) & 0xF) { /* at least one component is enabled */ if (track->cb_color_bo[i] == NULL) { - dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n", - __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i); + dev_warn_once(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n", + __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i); return -EINVAL; } /* check cb */ @@ -1056,8 +1056,8 @@ static int evergreen_packet0_check(struct radeon_cs_parser *p, case EVERGREEN_VLINE_START_END: r = evergreen_cs_packet_parse_vline(p); if (r) { - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", - idx, reg); + dev_warn_once(p->dev, "No reloc for ib[%d]=0x%04X\n", + idx, reg); return r; } break; @@ -1143,8 +1143,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case SQ_VSTMP_RING_BASE: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); @@ -1155,15 +1155,15 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) break; case CAYMAN_DB_EQAA: if (p->rdev->family < CHIP_CAYMAN) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } break; case CAYMAN_DB_DEPTH_INFO: if (p->rdev->family < CHIP_CAYMAN) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } break; @@ -1172,8 +1172,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } ib[idx] &= ~Z_ARRAY_MODE(0xf); @@ -1214,8 +1214,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case DB_Z_READ_BASE: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } track->db_z_read_offset = radeon_get_ib_value(p, idx); @@ -1226,8 +1226,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case DB_Z_WRITE_BASE: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } track->db_z_write_offset = radeon_get_ib_value(p, idx); @@ -1238,8 +1238,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case DB_STENCIL_READ_BASE: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } track->db_s_read_offset = radeon_get_ib_value(p, idx); @@ -1250,8 +1250,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case DB_STENCIL_WRITE_BASE: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } track->db_s_write_offset = radeon_get_ib_value(p, idx); @@ -1273,8 +1273,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case VGT_STRMOUT_BUFFER_BASE_3: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } tmp = (reg - VGT_STRMOUT_BUFFER_BASE_0) / 16; @@ -1295,8 +1295,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case CP_COHER_BASE: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "missing reloc for CP_COHER_BASE " - "0x%04X\n", reg); + dev_warn_once(p->dev, "missing reloc for CP_COHER_BASE " + "0x%04X\n", reg); return -EINVAL; } ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); @@ -1311,8 +1311,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) break; case PA_SC_AA_CONFIG: if (p->rdev->family >= CHIP_CAYMAN) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } tmp = radeon_get_ib_value(p, idx) & MSAA_NUM_SAMPLES_MASK; @@ -1320,8 +1320,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) break; case CAYMAN_PA_SC_AA_CONFIG: if (p->rdev->family < CHIP_CAYMAN) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } tmp = radeon_get_ib_value(p, idx) & CAYMAN_MSAA_NUM_SAMPLES_MASK; @@ -1360,8 +1360,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags)); @@ -1378,8 +1378,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->tiling_flags)); @@ -1439,8 +1439,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case CB_COLOR7_ATTRIB: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { @@ -1467,8 +1467,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case CB_COLOR11_ATTRIB: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { @@ -1555,8 +1555,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case CB_COLOR7_BASE: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } tmp = (reg - CB_COLOR0_BASE) / 0x3c; @@ -1571,8 +1571,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case CB_COLOR11_BASE: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } tmp = ((reg - CB_COLOR8_BASE) / 0x1c) + 8; @@ -1584,8 +1584,8 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case DB_HTILE_DATA_BASE: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } track->htile_offset = radeon_get_ib_value(p, idx); @@ -1702,36 +1702,36 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) case SQ_ALU_CONST_CACHE_LS_15: r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); break; case SX_MEMORY_EXPORT_BASE: if (p->rdev->family >= CHIP_CAYMAN) { - dev_warn(p->dev, "bad SET_CONFIG_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONFIG_REG " + "0x%04X\n", reg); return -EINVAL; } r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONFIG_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONFIG_REG " + "0x%04X\n", reg); return -EINVAL; } ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); break; case CAYMAN_SX_SCATTER_EXPORT_BASE: if (p->rdev->family < CHIP_CAYMAN) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - dev_warn(p->dev, "bad SET_CONTEXT_REG " - "0x%04X\n", reg); + dev_warn_once(p->dev, "bad SET_CONTEXT_REG " + "0x%04X\n", reg); return -EINVAL; } ib[idx] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); @@ -1740,7 +1740,7 @@ static int evergreen_cs_handle_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) track->sx_misc_kill_all_prims = (radeon_get_ib_value(p, idx) & 0x1) != 0; break; default: - dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); + dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", reg, idx); return -EINVAL; } return 0; @@ -1795,7 +1795,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, uint64_t offset; if (pkt->count != 1) { - DRM_ERROR("bad SET PREDICATION\n"); + dev_warn_once(p->dev, "bad SET PREDICATION\n"); return -EINVAL; } @@ -1807,13 +1807,13 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, return 0; if (pred_op > 2) { - DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op); + dev_warn_once(p->dev, "bad SET PREDICATION operation %d\n", pred_op); return -EINVAL; } r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad SET PREDICATION\n"); + dev_warn_once(p->dev, "bad SET PREDICATION\n"); return -EINVAL; } @@ -1827,7 +1827,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, break; case PACKET3_CONTEXT_CONTROL: if (pkt->count != 1) { - DRM_ERROR("bad CONTEXT_CONTROL\n"); + dev_warn_once(p->dev, "bad CONTEXT_CONTROL\n"); return -EINVAL; } break; @@ -1835,17 +1835,17 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, case PACKET3_NUM_INSTANCES: case PACKET3_CLEAR_STATE: if (pkt->count) { - DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n"); + dev_warn_once(p->dev, "bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n"); return -EINVAL; } break; case CAYMAN_PACKET3_DEALLOC_STATE: if (p->rdev->family < CHIP_CAYMAN) { - DRM_ERROR("bad PACKET3_DEALLOC_STATE\n"); + dev_warn_once(p->dev, "bad PACKET3_DEALLOC_STATE\n"); return -EINVAL; } if (pkt->count) { - DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n"); + dev_warn_once(p->dev, "bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n"); return -EINVAL; } break; @@ -1854,12 +1854,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, uint64_t offset; if (pkt->count != 1) { - DRM_ERROR("bad INDEX_BASE\n"); + dev_warn_once(p->dev, "bad INDEX_BASE\n"); return -EINVAL; } r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad INDEX_BASE\n"); + dev_warn_once(p->dev, "bad INDEX_BASE\n"); return -EINVAL; } @@ -1872,7 +1872,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, r = evergreen_cs_track_check(p); if (r) { - dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); + dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); return r; } break; @@ -1880,7 +1880,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, case PACKET3_INDEX_BUFFER_SIZE: { if (pkt->count != 0) { - DRM_ERROR("bad INDEX_BUFFER_SIZE\n"); + dev_warn_once(p->dev, "bad INDEX_BUFFER_SIZE\n"); return -EINVAL; } break; @@ -1889,12 +1889,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, { uint64_t offset; if (pkt->count != 3) { - DRM_ERROR("bad DRAW_INDEX\n"); + dev_warn_once(p->dev, "bad DRAW_INDEX\n"); return -EINVAL; } r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad DRAW_INDEX\n"); + dev_warn_once(p->dev, "bad DRAW_INDEX\n"); return -EINVAL; } @@ -1907,7 +1907,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, r = evergreen_cs_track_check(p); if (r) { - dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); + dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); return r; } break; @@ -1917,12 +1917,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, uint64_t offset; if (pkt->count != 4) { - DRM_ERROR("bad DRAW_INDEX_2\n"); + dev_warn_once(p->dev, "bad DRAW_INDEX_2\n"); return -EINVAL; } r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad DRAW_INDEX_2\n"); + dev_warn_once(p->dev, "bad DRAW_INDEX_2\n"); return -EINVAL; } @@ -1935,63 +1935,63 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, r = evergreen_cs_track_check(p); if (r) { - dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); + dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); return r; } break; } case PACKET3_DRAW_INDEX_AUTO: if (pkt->count != 1) { - DRM_ERROR("bad DRAW_INDEX_AUTO\n"); + dev_warn_once(p->dev, "bad DRAW_INDEX_AUTO\n"); return -EINVAL; } r = evergreen_cs_track_check(p); if (r) { - dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); + dev_warn_once(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); return r; } break; case PACKET3_DRAW_INDEX_MULTI_AUTO: if (pkt->count != 2) { - DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n"); + dev_warn_once(p->dev, "bad DRAW_INDEX_MULTI_AUTO\n"); return -EINVAL; } r = evergreen_cs_track_check(p); if (r) { - dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); + dev_warn_once(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); return r; } break; case PACKET3_DRAW_INDEX_IMMD: if (pkt->count < 2) { - DRM_ERROR("bad DRAW_INDEX_IMMD\n"); + dev_warn_once(p->dev, "bad DRAW_INDEX_IMMD\n"); return -EINVAL; } r = evergreen_cs_track_check(p); if (r) { - dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); + dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); return r; } break; case PACKET3_DRAW_INDEX_OFFSET: if (pkt->count != 2) { - DRM_ERROR("bad DRAW_INDEX_OFFSET\n"); + dev_warn_once(p->dev, "bad DRAW_INDEX_OFFSET\n"); return -EINVAL; } r = evergreen_cs_track_check(p); if (r) { - dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); + dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); return r; } break; case PACKET3_DRAW_INDEX_OFFSET_2: if (pkt->count != 3) { - DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n"); + dev_warn_once(p->dev, "bad DRAW_INDEX_OFFSET_2\n"); return -EINVAL; } r = evergreen_cs_track_check(p); if (r) { - dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); + dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); return r; } break; @@ -2005,19 +2005,19 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, 4 ADDRESS_HI Bits [31:8] - Reserved. Bits [7:0] - Upper bits of Address [47:32] */ if (pkt->count != 2) { - DRM_ERROR("bad SET_BASE\n"); + dev_warn_once(p->dev, "bad SET_BASE\n"); return -EINVAL; } /* currently only supporting setting indirect draw buffer base address */ if (idx_value != 1) { - DRM_ERROR("bad SET_BASE\n"); + dev_warn_once(p->dev, "bad SET_BASE\n"); return -EINVAL; } r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad SET_BASE\n"); + dev_warn_once(p->dev, "bad SET_BASE\n"); return -EINVAL; } @@ -2039,54 +2039,54 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, 3 DRAW_INITIATOR Draw Initiator Register. Written to the VGT_DRAW_INITIATOR register for the assigned context */ if (pkt->count != 1) { - DRM_ERROR("bad DRAW_INDIRECT\n"); + dev_warn_once(p->dev, "bad DRAW_INDIRECT\n"); return -EINVAL; } if (idx_value + size > track->indirect_draw_buffer_size) { - dev_warn(p->dev, "DRAW_INDIRECT buffer too small %u + %llu > %lu\n", - idx_value, size, track->indirect_draw_buffer_size); + dev_warn_once(p->dev, "DRAW_INDIRECT buffer too small %u + %llu > %lu\n", + idx_value, size, track->indirect_draw_buffer_size); return -EINVAL; } r = evergreen_cs_track_check(p); if (r) { - dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); + dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); return r; } break; } case PACKET3_DISPATCH_DIRECT: if (pkt->count != 3) { - DRM_ERROR("bad DISPATCH_DIRECT\n"); + dev_warn_once(p->dev, "bad DISPATCH_DIRECT\n"); return -EINVAL; } r = evergreen_cs_track_check(p); if (r) { - dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); + dev_warn_once(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx); return r; } break; case PACKET3_DISPATCH_INDIRECT: if (pkt->count != 1) { - DRM_ERROR("bad DISPATCH_INDIRECT\n"); + dev_warn_once(p->dev, "bad DISPATCH_INDIRECT\n"); return -EINVAL; } r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad DISPATCH_INDIRECT\n"); + dev_warn_once(p->dev, "bad DISPATCH_INDIRECT\n"); return -EINVAL; } ib[idx+0] = idx_value + (u32)(reloc->gpu_offset & 0xffffffff); r = evergreen_cs_track_check(p); if (r) { - dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); + dev_warn_once(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__); return r; } break; case PACKET3_WAIT_REG_MEM: if (pkt->count != 5) { - DRM_ERROR("bad WAIT_REG_MEM\n"); + dev_warn_once(p->dev, "bad WAIT_REG_MEM\n"); return -EINVAL; } /* bit 4 is reg (0) or mem (1) */ @@ -2095,7 +2095,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad WAIT_REG_MEM\n"); + dev_warn_once(p->dev, "bad WAIT_REG_MEM\n"); return -EINVAL; } @@ -2106,7 +2106,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, ib[idx+1] = (ib[idx+1] & 0x3) | (offset & 0xfffffffc); ib[idx+2] = upper_32_bits(offset) & 0xff; } else if (idx_value & 0x100) { - DRM_ERROR("cannot use PFP on REG wait\n"); + dev_warn_once(p->dev, "cannot use PFP on REG wait\n"); return -EINVAL; } break; @@ -2115,7 +2115,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, u32 command, size, info; u64 offset, tmp; if (pkt->count != 4) { - DRM_ERROR("bad CP DMA\n"); + dev_warn_once(p->dev, "bad CP DMA\n"); return -EINVAL; } command = radeon_get_ib_value(p, idx+4); @@ -2129,7 +2129,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */ /* non mem to mem copies requires dw aligned count */ if (size % 4) { - DRM_ERROR("CP DMA command requires dw count alignment\n"); + dev_warn_once(p->dev, "CP DMA command requires dw count alignment\n"); return -EINVAL; } } @@ -2137,19 +2137,19 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, /* src address space is register */ /* GDS is ok */ if (((info & 0x60000000) >> 29) != 1) { - DRM_ERROR("CP DMA SAS not supported\n"); + dev_warn_once(p->dev, "CP DMA SAS not supported\n"); return -EINVAL; } } else { if (command & PACKET3_CP_DMA_CMD_SAIC) { - DRM_ERROR("CP DMA SAIC only supported for registers\n"); + dev_warn_once(p->dev, "CP DMA SAIC only supported for registers\n"); return -EINVAL; } /* src address space is memory */ if (((info & 0x60000000) >> 29) == 0) { r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad CP DMA SRC\n"); + dev_warn_once(p->dev, "bad CP DMA SRC\n"); return -EINVAL; } @@ -2159,15 +2159,15 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, offset = reloc->gpu_offset + tmp; if ((tmp + size) > radeon_bo_size(reloc->robj)) { - dev_warn(p->dev, "CP DMA src buffer too small (%llu %lu)\n", - tmp + size, radeon_bo_size(reloc->robj)); + dev_warn_once(p->dev, "CP DMA src buffer too small (%llu %lu)\n", + tmp + size, radeon_bo_size(reloc->robj)); return -EINVAL; } ib[idx] = offset; ib[idx+1] = (ib[idx+1] & 0xffffff00) | (upper_32_bits(offset) & 0xff); } else if (((info & 0x60000000) >> 29) != 2) { - DRM_ERROR("bad CP DMA SRC_SEL\n"); + dev_warn_once(p->dev, "bad CP DMA SRC_SEL\n"); return -EINVAL; } } @@ -2175,19 +2175,19 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, /* dst address space is register */ /* GDS is ok */ if (((info & 0x00300000) >> 20) != 1) { - DRM_ERROR("CP DMA DAS not supported\n"); + dev_warn_once(p->dev, "CP DMA DAS not supported\n"); return -EINVAL; } } else { /* dst address space is memory */ if (command & PACKET3_CP_DMA_CMD_DAIC) { - DRM_ERROR("CP DMA DAIC only supported for registers\n"); + dev_warn_once(p->dev, "CP DMA DAIC only supported for registers\n"); return -EINVAL; } if (((info & 0x00300000) >> 20) == 0) { r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad CP DMA DST\n"); + dev_warn_once(p->dev, "bad CP DMA DST\n"); return -EINVAL; } @@ -2197,15 +2197,15 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, offset = reloc->gpu_offset + tmp; if ((tmp + size) > radeon_bo_size(reloc->robj)) { - dev_warn(p->dev, "CP DMA dst buffer too small (%llu %lu)\n", - tmp + size, radeon_bo_size(reloc->robj)); + dev_warn_once(p->dev, "CP DMA dst buffer too small (%llu %lu)\n", + tmp + size, radeon_bo_size(reloc->robj)); return -EINVAL; } ib[idx+2] = offset; ib[idx+3] = upper_32_bits(offset) & 0xff; } else { - DRM_ERROR("bad CP DMA DST_SEL\n"); + dev_warn_once(p->dev, "bad CP DMA DST_SEL\n"); return -EINVAL; } } @@ -2213,13 +2213,13 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, } case PACKET3_PFP_SYNC_ME: if (pkt->count) { - DRM_ERROR("bad PFP_SYNC_ME\n"); + dev_warn_once(p->dev, "bad PFP_SYNC_ME\n"); return -EINVAL; } break; case PACKET3_SURFACE_SYNC: if (pkt->count != 3) { - DRM_ERROR("bad SURFACE_SYNC\n"); + dev_warn_once(p->dev, "bad SURFACE_SYNC\n"); return -EINVAL; } /* 0xffffffff/0x0 is flush all cache flag */ @@ -2227,7 +2227,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, radeon_get_ib_value(p, idx + 2) != 0) { r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad SURFACE_SYNC\n"); + dev_warn_once(p->dev, "bad SURFACE_SYNC\n"); return -EINVAL; } ib[idx+2] += (u32)((reloc->gpu_offset >> 8) & 0xffffffff); @@ -2235,7 +2235,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, break; case PACKET3_EVENT_WRITE: if (pkt->count != 2 && pkt->count != 0) { - DRM_ERROR("bad EVENT_WRITE\n"); + dev_warn_once(p->dev, "bad EVENT_WRITE\n"); return -EINVAL; } if (pkt->count) { @@ -2243,7 +2243,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad EVENT_WRITE\n"); + dev_warn_once(p->dev, "bad EVENT_WRITE\n"); return -EINVAL; } offset = reloc->gpu_offset + @@ -2259,12 +2259,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, uint64_t offset; if (pkt->count != 4) { - DRM_ERROR("bad EVENT_WRITE_EOP\n"); + dev_warn_once(p->dev, "bad EVENT_WRITE_EOP\n"); return -EINVAL; } r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad EVENT_WRITE_EOP\n"); + dev_warn_once(p->dev, "bad EVENT_WRITE_EOP\n"); return -EINVAL; } @@ -2281,12 +2281,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, uint64_t offset; if (pkt->count != 3) { - DRM_ERROR("bad EVENT_WRITE_EOS\n"); + dev_warn_once(p->dev, "bad EVENT_WRITE_EOS\n"); return -EINVAL; } r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad EVENT_WRITE_EOS\n"); + dev_warn_once(p->dev, "bad EVENT_WRITE_EOS\n"); return -EINVAL; } @@ -2304,7 +2304,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, if ((start_reg < PACKET3_SET_CONFIG_REG_START) || (start_reg >= PACKET3_SET_CONFIG_REG_END) || (end_reg >= PACKET3_SET_CONFIG_REG_END)) { - DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n"); + dev_warn_once(p->dev, "bad PACKET3_SET_CONFIG_REG\n"); return -EINVAL; } for (reg = start_reg, idx++; reg <= end_reg; reg += 4, idx++) { @@ -2321,7 +2321,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, if ((start_reg < PACKET3_SET_CONTEXT_REG_START) || (start_reg >= PACKET3_SET_CONTEXT_REG_END) || (end_reg >= PACKET3_SET_CONTEXT_REG_END)) { - DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n"); + dev_warn_once(p->dev, "bad PACKET3_SET_CONTEXT_REG\n"); return -EINVAL; } for (reg = start_reg, idx++; reg <= end_reg; reg += 4, idx++) { @@ -2334,7 +2334,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, break; case PACKET3_SET_RESOURCE: if (pkt->count % 8) { - DRM_ERROR("bad SET_RESOURCE\n"); + dev_warn_once(p->dev, "bad SET_RESOURCE\n"); return -EINVAL; } start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_START; @@ -2342,7 +2342,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, if ((start_reg < PACKET3_SET_RESOURCE_START) || (start_reg >= PACKET3_SET_RESOURCE_END) || (end_reg >= PACKET3_SET_RESOURCE_END)) { - DRM_ERROR("bad SET_RESOURCE\n"); + dev_warn_once(p->dev, "bad SET_RESOURCE\n"); return -EINVAL; } for (i = 0; i < (pkt->count / 8); i++) { @@ -2355,7 +2355,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, /* tex base */ r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad SET_RESOURCE (tex)\n"); + dev_warn_once(p->dev, "bad SET_RESOURCE (tex)\n"); return -EINVAL; } if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { @@ -2392,7 +2392,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, } else { r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad SET_RESOURCE (tex)\n"); + dev_warn_once(p->dev, "bad SET_RESOURCE (tex)\n"); return -EINVAL; } moffset = (u32)((reloc->gpu_offset >> 8) & 0xffffffff); @@ -2411,14 +2411,15 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, /* vtx base */ r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad SET_RESOURCE (vtx)\n"); + dev_warn_once(p->dev, "bad SET_RESOURCE (vtx)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+1+(i*8)+0); size = radeon_get_ib_value(p, idx+1+(i*8)+1); if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) { /* force size to size of the buffer */ - dev_warn_ratelimited(p->dev, "vbo resource seems too big for the bo\n"); + dev_warn_once(p->dev, "vbo resource seems too big (%d) for the bo (%ld)\n", + size + offset, radeon_bo_size(reloc->robj)); ib[idx+1+(i*8)+1] = radeon_bo_size(reloc->robj) - offset; } @@ -2431,7 +2432,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, case SQ_TEX_VTX_INVALID_TEXTURE: case SQ_TEX_VTX_INVALID_BUFFER: default: - DRM_ERROR("bad SET_RESOURCE\n"); + dev_warn_once(p->dev, "bad SET_RESOURCE\n"); return -EINVAL; } } @@ -2445,7 +2446,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, if ((start_reg < PACKET3_SET_BOOL_CONST_START) || (start_reg >= PACKET3_SET_BOOL_CONST_END) || (end_reg >= PACKET3_SET_BOOL_CONST_END)) { - DRM_ERROR("bad SET_BOOL_CONST\n"); + dev_warn_once(p->dev, "bad SET_BOOL_CONST\n"); return -EINVAL; } break; @@ -2455,7 +2456,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, if ((start_reg < PACKET3_SET_LOOP_CONST_START) || (start_reg >= PACKET3_SET_LOOP_CONST_END) || (end_reg >= PACKET3_SET_LOOP_CONST_END)) { - DRM_ERROR("bad SET_LOOP_CONST\n"); + dev_warn_once(p->dev, "bad SET_LOOP_CONST\n"); return -EINVAL; } break; @@ -2465,13 +2466,13 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, if ((start_reg < PACKET3_SET_CTL_CONST_START) || (start_reg >= PACKET3_SET_CTL_CONST_END) || (end_reg >= PACKET3_SET_CTL_CONST_END)) { - DRM_ERROR("bad SET_CTL_CONST\n"); + dev_warn_once(p->dev, "bad SET_CTL_CONST\n"); return -EINVAL; } break; case PACKET3_SET_SAMPLER: if (pkt->count % 3) { - DRM_ERROR("bad SET_SAMPLER\n"); + dev_warn_once(p->dev, "bad SET_SAMPLER\n"); return -EINVAL; } start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_START; @@ -2479,13 +2480,13 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, if ((start_reg < PACKET3_SET_SAMPLER_START) || (start_reg >= PACKET3_SET_SAMPLER_END) || (end_reg >= PACKET3_SET_SAMPLER_END)) { - DRM_ERROR("bad SET_SAMPLER\n"); + dev_warn_once(p->dev, "bad SET_SAMPLER\n"); return -EINVAL; } break; case PACKET3_STRMOUT_BUFFER_UPDATE: if (pkt->count != 4) { - DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (invalid count)\n"); + dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (invalid count)\n"); return -EINVAL; } /* Updating memory at DST_ADDRESS. */ @@ -2493,14 +2494,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, u64 offset; r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n"); + dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (missing dst reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+1); offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; if ((offset + 4) > radeon_bo_size(reloc->robj)) { - DRM_ERROR("bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n", - offset + 4, radeon_bo_size(reloc->robj)); + dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE dst bo too small: 0x%llx, 0x%lx\n", + offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->gpu_offset; @@ -2512,14 +2513,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, u64 offset; r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n"); + dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE (missing src reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+3); offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; if ((offset + 4) > radeon_bo_size(reloc->robj)) { - DRM_ERROR("bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n", - offset + 4, radeon_bo_size(reloc->robj)); + dev_warn_once(p->dev, "bad STRMOUT_BUFFER_UPDATE src bo too small: 0x%llx, 0x%lx\n", + offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->gpu_offset; @@ -2532,23 +2533,23 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, u64 offset; if (pkt->count != 3) { - DRM_ERROR("bad MEM_WRITE (invalid count)\n"); + dev_warn_once(p->dev, "bad MEM_WRITE (invalid count)\n"); return -EINVAL; } r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad MEM_WRITE (missing reloc)\n"); + dev_warn_once(p->dev, "bad MEM_WRITE (missing reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+0); offset += ((u64)(radeon_get_ib_value(p, idx+1) & 0xff)) << 32UL; if (offset & 0x7) { - DRM_ERROR("bad MEM_WRITE (address not qwords aligned)\n"); + dev_warn_once(p->dev, "bad MEM_WRITE (address not qwords aligned)\n"); return -EINVAL; } if ((offset + 8) > radeon_bo_size(reloc->robj)) { - DRM_ERROR("bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n", - offset + 8, radeon_bo_size(reloc->robj)); + dev_warn_once(p->dev, "bad MEM_WRITE bo too small: 0x%llx, 0x%lx\n", + offset + 8, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->gpu_offset; @@ -2558,7 +2559,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, } case PACKET3_COPY_DW: if (pkt->count != 4) { - DRM_ERROR("bad COPY_DW (invalid count)\n"); + dev_warn_once(p->dev, "bad COPY_DW (invalid count)\n"); return -EINVAL; } if (idx_value & 0x1) { @@ -2566,14 +2567,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, /* SRC is memory. */ r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad COPY_DW (missing src reloc)\n"); + dev_warn_once(p->dev, "bad COPY_DW (missing src reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+1); offset += ((u64)(radeon_get_ib_value(p, idx+2) & 0xff)) << 32; if ((offset + 4) > radeon_bo_size(reloc->robj)) { - DRM_ERROR("bad COPY_DW src bo too small: 0x%llx, 0x%lx\n", - offset + 4, radeon_bo_size(reloc->robj)); + dev_warn_once(p->dev, "bad COPY_DW src bo too small: 0x%llx, 0x%lx\n", + offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->gpu_offset; @@ -2583,8 +2584,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, /* SRC is a reg. */ reg = radeon_get_ib_value(p, idx+1) << 2; if (!evergreen_is_safe_reg(p, reg)) { - dev_warn(p->dev, "forbidden register 0x%08x at %d\n", - reg, idx + 1); + dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", + reg, idx + 1); return -EINVAL; } } @@ -2593,14 +2594,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, /* DST is memory. */ r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad COPY_DW (missing dst reloc)\n"); + dev_warn_once(p->dev, "bad COPY_DW (missing dst reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx+3); offset += ((u64)(radeon_get_ib_value(p, idx+4) & 0xff)) << 32; if ((offset + 4) > radeon_bo_size(reloc->robj)) { - DRM_ERROR("bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n", - offset + 4, radeon_bo_size(reloc->robj)); + dev_warn_once(p->dev, "bad COPY_DW dst bo too small: 0x%llx, 0x%lx\n", + offset + 4, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->gpu_offset; @@ -2610,8 +2611,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, /* DST is a reg. */ reg = radeon_get_ib_value(p, idx+3) << 2; if (!evergreen_is_safe_reg(p, reg)) { - dev_warn(p->dev, "forbidden register 0x%08x at %d\n", - reg, idx + 3); + dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", + reg, idx + 3); return -EINVAL; } } @@ -2622,7 +2623,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, uint32_t allowed_reg_base; uint32_t source_sel; if (pkt->count != 2) { - DRM_ERROR("bad SET_APPEND_CNT (invalid count)\n"); + dev_warn_once(p->dev, "bad SET_APPEND_CNT (invalid count)\n"); return -EINVAL; } @@ -2632,8 +2633,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, areg = idx_value >> 16; if (areg < allowed_reg_base || areg > (allowed_reg_base + 11)) { - dev_warn(p->dev, "forbidden register for append cnt 0x%08x at %d\n", - areg, idx); + dev_warn_once(p->dev, "forbidden register for append cnt 0x%08x at %d\n", + areg, idx); return -EINVAL; } @@ -2643,7 +2644,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, uint32_t swap; r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad SET_APPEND_CNT (missing reloc)\n"); + dev_warn_once(p->dev, "bad SET_APPEND_CNT (missing reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx + 1); @@ -2656,7 +2657,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, ib[idx+1] = (offset & 0xfffffffc) | swap; ib[idx+2] = upper_32_bits(offset) & 0xff; } else { - DRM_ERROR("bad SET_APPEND_CNT (unsupported operation)\n"); + dev_warn_once(p->dev, "bad SET_APPEND_CNT (unsupported operation)\n"); return -EINVAL; } break; @@ -2666,23 +2667,23 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, u64 offset; if (pkt->count != 2) { - DRM_ERROR("bad COND_EXEC (invalid count)\n"); + dev_warn_once(p->dev, "bad COND_EXEC (invalid count)\n"); return -EINVAL; } r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad COND_EXEC (missing reloc)\n"); + dev_warn_once(p->dev, "bad COND_EXEC (missing reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx + 0); offset += ((u64)(radeon_get_ib_value(p, idx + 1) & 0xff)) << 32UL; if (offset & 0x7) { - DRM_ERROR("bad COND_EXEC (address not qwords aligned)\n"); + dev_warn_once(p->dev, "bad COND_EXEC (address not qwords aligned)\n"); return -EINVAL; } if ((offset + 8) > radeon_bo_size(reloc->robj)) { - DRM_ERROR("bad COND_EXEC bo too small: 0x%llx, 0x%lx\n", - offset + 8, radeon_bo_size(reloc->robj)); + dev_warn_once(p->dev, "bad COND_EXEC bo too small: 0x%llx, 0x%lx\n", + offset + 8, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->gpu_offset; @@ -2692,7 +2693,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, } case PACKET3_COND_WRITE: if (pkt->count != 7) { - DRM_ERROR("bad COND_WRITE (invalid count)\n"); + dev_warn_once(p->dev, "bad COND_WRITE (invalid count)\n"); return -EINVAL; } if (idx_value & 0x10) { @@ -2700,14 +2701,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, /* POLL is memory. */ r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad COND_WRITE (missing src reloc)\n"); + dev_warn_once(p->dev, "bad COND_WRITE (missing src reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx + 1); offset += ((u64)(radeon_get_ib_value(p, idx + 2) & 0xff)) << 32; if ((offset + 8) > radeon_bo_size(reloc->robj)) { - DRM_ERROR("bad COND_WRITE src bo too small: 0x%llx, 0x%lx\n", - offset + 8, radeon_bo_size(reloc->robj)); + dev_warn_once(p->dev, "bad COND_WRITE src bo too small: 0x%llx, 0x%lx\n", + offset + 8, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->gpu_offset; @@ -2717,8 +2718,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, /* POLL is a reg. */ reg = radeon_get_ib_value(p, idx + 1) << 2; if (!evergreen_is_safe_reg(p, reg)) { - dev_warn(p->dev, "forbidden register 0x%08x at %d\n", - reg, idx + 1); + dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", + reg, idx + 1); return -EINVAL; } } @@ -2727,14 +2728,14 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, /* WRITE is memory. */ r = radeon_cs_packet_next_reloc(p, &reloc, 0); if (r) { - DRM_ERROR("bad COND_WRITE (missing dst reloc)\n"); + dev_warn_once(p->dev, "bad COND_WRITE (missing dst reloc)\n"); return -EINVAL; } offset = radeon_get_ib_value(p, idx + 5); offset += ((u64)(radeon_get_ib_value(p, idx + 6) & 0xff)) << 32; if ((offset + 8) > radeon_bo_size(reloc->robj)) { - DRM_ERROR("bad COND_WRITE dst bo too small: 0x%llx, 0x%lx\n", - offset + 8, radeon_bo_size(reloc->robj)); + dev_warn_once(p->dev, "bad COND_WRITE dst bo too small: 0x%llx, 0x%lx\n", + offset + 8, radeon_bo_size(reloc->robj)); return -EINVAL; } offset += reloc->gpu_offset; @@ -2744,8 +2745,8 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, /* WRITE is a reg. */ reg = radeon_get_ib_value(p, idx + 5) << 2; if (!evergreen_is_safe_reg(p, reg)) { - dev_warn(p->dev, "forbidden register 0x%08x at %d\n", - reg, idx + 5); + dev_warn_once(p->dev, "forbidden register 0x%08x at %d\n", + reg, idx + 5); return -EINVAL; } } @@ -2753,7 +2754,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, case PACKET3_NOP: break; default: - DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode); + dev_warn_once(p->dev, "Packet3 opcode %x not supported\n", pkt->opcode); return -EINVAL; } return 0; @@ -2853,7 +2854,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p) r = evergreen_packet3_check(p, &pkt); break; default: - DRM_ERROR("Unknown packet type %d !\n", pkt.type); + dev_warn_once(p->dev, "Unknown packet type %d !\n", pkt.type); kfree(p->track); p->track = NULL; return -EINVAL; @@ -2896,8 +2897,8 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) do { if (p->idx >= ib_chunk->length_dw) { - DRM_ERROR("Can not parse packet at %d after CS end %d !\n", - p->idx, ib_chunk->length_dw); + dev_warn_once(p->dev, "Can not parse packet at %d after CS end %d !\n", + p->idx, ib_chunk->length_dw); return -EINVAL; } idx = p->idx; @@ -2910,7 +2911,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) case DMA_PACKET_WRITE: r = r600_dma_cs_next_reloc(p, &dst_reloc); if (r) { - DRM_ERROR("bad DMA_PACKET_WRITE\n"); + dev_warn_once(p->dev, "bad DMA_PACKET_WRITE\n"); return -EINVAL; } switch (sub_cmd) { @@ -2932,24 +2933,24 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) p->idx += count + 3; break; default: - DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, header); + dev_warn_once(p->dev, "bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, header); return -EINVAL; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA write buffer too small (%llu %lu)\n", - dst_offset, radeon_bo_size(dst_reloc->robj)); + dev_warn_once(p->dev, "DMA write buffer too small (%llu %lu)\n", + dst_offset, radeon_bo_size(dst_reloc->robj)); return -EINVAL; } break; case DMA_PACKET_COPY: r = r600_dma_cs_next_reloc(p, &src_reloc); if (r) { - DRM_ERROR("bad DMA_PACKET_COPY\n"); + dev_warn_once(p->dev, "bad DMA_PACKET_COPY\n"); return -EINVAL; } r = r600_dma_cs_next_reloc(p, &dst_reloc); if (r) { - DRM_ERROR("bad DMA_PACKET_COPY\n"); + dev_warn_once(p->dev, "bad DMA_PACKET_COPY\n"); return -EINVAL; } switch (sub_cmd) { @@ -2961,13 +2962,13 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) dst_offset = radeon_get_ib_value(p, idx+1); dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { - dev_warn(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n", - src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); + dev_warn_once(p->dev, "DMA L2L, dw src buffer too small (%llu %lu)\n", + src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n", - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); + dev_warn_once(p->dev, "DMA L2L, dw dst buffer too small (%llu %lu)\n", + dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); @@ -3001,13 +3002,13 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); } if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, src buffer too small (%llu %lu)\n", - src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); + dev_warn_once(p->dev, "DMA L2T, src buffer too small (%llu %lu)\n", + src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, dst buffer too small (%llu %lu)\n", - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); + dev_warn_once(p->dev, "DMA L2T, dst buffer too small (%llu %lu)\n", + dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } p->idx += 9; @@ -3020,13 +3021,13 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) dst_offset = radeon_get_ib_value(p, idx+1); dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0xff)) << 32; if ((src_offset + count) > radeon_bo_size(src_reloc->robj)) { - dev_warn(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n", - src_offset + count, radeon_bo_size(src_reloc->robj)); + dev_warn_once(p->dev, "DMA L2L, byte src buffer too small (%llu %lu)\n", + src_offset + count, radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + count) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n", - dst_offset + count, radeon_bo_size(dst_reloc->robj)); + dev_warn_once(p->dev, "DMA L2L, byte dst buffer too small (%llu %lu)\n", + dst_offset + count, radeon_bo_size(dst_reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xffffffff); @@ -3039,7 +3040,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) case 0x41: /* L2L, partial */ if (p->family < CHIP_CAYMAN) { - DRM_ERROR("L2L Partial is cayman only !\n"); + dev_warn_once(p->dev, "L2L Partial is cayman only !\n"); return -EINVAL; } ib[idx+1] += (u32)(src_reloc->gpu_offset & 0xffffffff); @@ -3054,7 +3055,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) /* L2L, dw, broadcast */ r = r600_dma_cs_next_reloc(p, &dst2_reloc); if (r) { - DRM_ERROR("bad L2L, dw, broadcast DMA_PACKET_COPY\n"); + dev_warn_once(p->dev, "bad L2L, dw, broadcast DMA_PACKET_COPY\n"); return -EINVAL; } dst_offset = radeon_get_ib_value(p, idx+1); @@ -3064,18 +3065,18 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) src_offset = radeon_get_ib_value(p, idx+3); src_offset |= ((u64)(radeon_get_ib_value(p, idx+6) & 0xff)) << 32; if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { - dev_warn(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n", - src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); + dev_warn_once(p->dev, "DMA L2L, dw, broadcast src buffer too small (%llu %lu)\n", + src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n", - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); + dev_warn_once(p->dev, "DMA L2L, dw, broadcast dst buffer too small (%llu %lu)\n", + dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) { - dev_warn(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n", - dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); + dev_warn_once(p->dev, "DMA L2L, dw, broadcast dst2 buffer too small (%llu %lu)\n", + dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); @@ -3089,12 +3090,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) /* Copy L2T Frame to Field */ case 0x48: if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) { - DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n"); + dev_warn_once(p->dev, "bad L2T, frame to fields DMA_PACKET_COPY\n"); return -EINVAL; } r = r600_dma_cs_next_reloc(p, &dst2_reloc); if (r) { - DRM_ERROR("bad L2T, frame to fields DMA_PACKET_COPY\n"); + dev_warn_once(p->dev, "bad L2T, frame to fields DMA_PACKET_COPY\n"); return -EINVAL; } dst_offset = radeon_get_ib_value(p, idx+1); @@ -3104,18 +3105,18 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) src_offset = radeon_get_ib_value(p, idx+8); src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32; if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n", - src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); + dev_warn_once(p->dev, "DMA L2T, frame to fields src buffer too small (%llu %lu)\n", + src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n", - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); + dev_warn_once(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n", + dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n", - dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); + dev_warn_once(p->dev, "DMA L2T, frame to fields buffer too small (%llu %lu)\n", + dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); @@ -3128,7 +3129,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) case 0x49: /* L2T, T2L partial */ if (p->family < CHIP_CAYMAN) { - DRM_ERROR("L2T, T2L Partial is cayman only !\n"); + dev_warn_once(p->dev, "L2T, T2L Partial is cayman only !\n"); return -EINVAL; } /* detile bit */ @@ -3151,12 +3152,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) case 0x4b: /* L2T, broadcast */ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) { - DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); + dev_warn_once(p->dev, "bad L2T, broadcast DMA_PACKET_COPY\n"); return -EINVAL; } r = r600_dma_cs_next_reloc(p, &dst2_reloc); if (r) { - DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); + dev_warn_once(p->dev, "bad L2T, broadcast DMA_PACKET_COPY\n"); return -EINVAL; } dst_offset = radeon_get_ib_value(p, idx+1); @@ -3166,18 +3167,18 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) src_offset = radeon_get_ib_value(p, idx+8); src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32; if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n", - src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); + dev_warn_once(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n", + src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n", - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); + dev_warn_once(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n", + dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n", - dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); + dev_warn_once(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n", + dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); @@ -3212,13 +3213,13 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); } if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n", - src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); + dev_warn_once(p->dev, "DMA L2T, T2L src buffer too small (%llu %lu)\n", + src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n", - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); + dev_warn_once(p->dev, "DMA L2T, T2L dst buffer too small (%llu %lu)\n", + dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } p->idx += 9; @@ -3227,7 +3228,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) case 0x4d: /* T2T partial */ if (p->family < CHIP_CAYMAN) { - DRM_ERROR("L2T, T2L Partial is cayman only !\n"); + dev_warn_once(p->dev, "L2T, T2L Partial is cayman only !\n"); return -EINVAL; } ib[idx+1] += (u32)(src_reloc->gpu_offset >> 8); @@ -3238,12 +3239,12 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) case 0x4f: /* L2T, broadcast */ if (radeon_get_ib_value(p, idx + 2) & (1 << 31)) { - DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); + dev_warn_once(p->dev, "bad L2T, broadcast DMA_PACKET_COPY\n"); return -EINVAL; } r = r600_dma_cs_next_reloc(p, &dst2_reloc); if (r) { - DRM_ERROR("bad L2T, broadcast DMA_PACKET_COPY\n"); + dev_warn_once(p->dev, "bad L2T, broadcast DMA_PACKET_COPY\n"); return -EINVAL; } dst_offset = radeon_get_ib_value(p, idx+1); @@ -3253,18 +3254,18 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) src_offset = radeon_get_ib_value(p, idx+8); src_offset |= ((u64)(radeon_get_ib_value(p, idx+9) & 0xff)) << 32; if ((src_offset + (count * 4)) > radeon_bo_size(src_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n", - src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); + dev_warn_once(p->dev, "DMA L2T, broadcast src buffer too small (%llu %lu)\n", + src_offset + (count * 4), radeon_bo_size(src_reloc->robj)); return -EINVAL; } if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n", - dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); + dev_warn_once(p->dev, "DMA L2T, broadcast dst buffer too small (%llu %lu)\n", + dst_offset + (count * 4), radeon_bo_size(dst_reloc->robj)); return -EINVAL; } if ((dst2_offset + (count * 4)) > radeon_bo_size(dst2_reloc->robj)) { - dev_warn(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n", - dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); + dev_warn_once(p->dev, "DMA L2T, broadcast dst2 buffer too small (%llu %lu)\n", + dst2_offset + (count * 4), radeon_bo_size(dst2_reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)(dst_reloc->gpu_offset >> 8); @@ -3274,21 +3275,21 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) p->idx += 10; break; default: - DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, header); + dev_warn_once(p->dev, "bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, header); return -EINVAL; } break; case DMA_PACKET_CONSTANT_FILL: r = r600_dma_cs_next_reloc(p, &dst_reloc); if (r) { - DRM_ERROR("bad DMA_PACKET_CONSTANT_FILL\n"); + dev_warn_once(p->dev, "bad DMA_PACKET_CONSTANT_FILL\n"); return -EINVAL; } dst_offset = radeon_get_ib_value(p, idx+1); dst_offset |= ((u64)(radeon_get_ib_value(p, idx+3) & 0x00ff0000)) << 16; if ((dst_offset + (count * 4)) > radeon_bo_size(dst_reloc->robj)) { - dev_warn(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", - dst_offset, radeon_bo_size(dst_reloc->robj)); + dev_warn_once(p->dev, "DMA constant fill buffer too small (%llu %lu)\n", + dst_offset, radeon_bo_size(dst_reloc->robj)); return -EINVAL; } ib[idx+1] += (u32)(dst_reloc->gpu_offset & 0xfffffffc); @@ -3299,7 +3300,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p) p->idx += 1; break; default: - DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx); + dev_warn_once(p->dev, "Unknown packet type %d at %d !\n", cmd, idx); return -EINVAL; } } while (p->idx < p->chunk_ib->length_dw); @@ -3430,7 +3431,7 @@ static bool evergreen_vm_reg_valid(u32 reg) case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS: return true; default: - DRM_ERROR("Invalid register 0x%x in CS\n", reg); + DRM_DEBUG("Invalid register 0x%x in CS\n", reg); return false; } } @@ -3448,7 +3449,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev, break; case PACKET3_SET_BASE: if (idx_value != 1) { - DRM_ERROR("bad SET_BASE"); + dev_warn_once(rdev->dev, "bad SET_BASE"); return -EINVAL; } break; @@ -3519,7 +3520,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev, if ((start_reg < PACKET3_SET_CONFIG_REG_START) || (start_reg >= PACKET3_SET_CONFIG_REG_END) || (end_reg >= PACKET3_SET_CONFIG_REG_END)) { - DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n"); + dev_warn_once(rdev->dev, "bad PACKET3_SET_CONFIG_REG\n"); return -EINVAL; } for (i = 0; i < pkt->count; i++) { @@ -3539,7 +3540,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev, (command & PACKET3_CP_DMA_CMD_SAS))) { /* src = register */ /* non mem to mem copies requires dw aligned count */ if ((command & 0x1fffff) % 4) { - DRM_ERROR("CP DMA command requires dw count alignment\n"); + dev_warn_once(rdev->dev, "CP DMA command requires dw count alignment\n"); return -EINVAL; } } @@ -3550,14 +3551,14 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev, if (command & PACKET3_CP_DMA_CMD_SAIC) { reg = start_reg; if (!evergreen_vm_reg_valid(reg)) { - DRM_ERROR("CP DMA Bad SRC register\n"); + dev_warn_once(rdev->dev, "CP DMA Bad SRC register\n"); return -EINVAL; } } else { for (i = 0; i < (command & 0x1fffff); i++) { reg = start_reg + (4 * i); if (!evergreen_vm_reg_valid(reg)) { - DRM_ERROR("CP DMA Bad SRC register\n"); + dev_warn_once(rdev->dev, "CP DMA Bad SRC register\n"); return -EINVAL; } } @@ -3571,14 +3572,14 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev, if (command & PACKET3_CP_DMA_CMD_DAIC) { reg = start_reg; if (!evergreen_vm_reg_valid(reg)) { - DRM_ERROR("CP DMA Bad DST register\n"); + dev_warn_once(rdev->dev, "CP DMA Bad DST register\n"); return -EINVAL; } } else { for (i = 0; i < (command & 0x1fffff); i++) { reg = start_reg + (4 * i); if (!evergreen_vm_reg_valid(reg)) { - DRM_ERROR("CP DMA Bad DST register\n"); + dev_warn_once(rdev->dev, "CP DMA Bad DST register\n"); return -EINVAL; } } @@ -3591,7 +3592,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev, uint32_t allowed_reg_base; if (pkt->count != 2) { - DRM_ERROR("bad SET_APPEND_CNT (invalid count)\n"); + dev_warn_once(rdev->dev, "bad SET_APPEND_CNT (invalid count)\n"); return -EINVAL; } @@ -3601,8 +3602,8 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev, areg = idx_value >> 16; if (areg < allowed_reg_base || areg > (allowed_reg_base + 11)) { - DRM_ERROR("forbidden register for append cnt 0x%08x at %d\n", - areg, idx); + dev_warn_once(rdev->dev, "forbidden register for append cnt 0x%08x at %d\n", + areg, idx); return -EINVAL; } break; @@ -3681,7 +3682,9 @@ int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib) idx += count + 3; break; default: - DRM_ERROR("bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", idx, ib->ptr[idx]); + dev_warn_once(rdev->dev, + "bad DMA_PACKET_WRITE [%6d] 0x%08x sub cmd is not 0 or 8\n", + idx, ib->ptr[idx]); return -EINVAL; } break; @@ -3732,7 +3735,9 @@ int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib) idx += 10; break; default: - DRM_ERROR("bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", idx, ib->ptr[idx]); + dev_warn_once(rdev->dev, + "bad DMA_PACKET_COPY [%6d] 0x%08x invalid sub cmd\n", + idx, ib->ptr[idx]); return -EINVAL; } break; @@ -3743,7 +3748,7 @@ int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib) idx += 1; break; default: - DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx); + dev_warn_once(rdev->dev, "Unknown packet type %d at %d !\n", cmd, idx); return -EINVAL; } } while (idx < ib->length_dw); |