diff options
Diffstat (limited to 'drivers/infiniband/hw/irdma')
| -rw-r--r-- | drivers/infiniband/hw/irdma/cm.c | 2 | ||||
| -rw-r--r-- | drivers/infiniband/hw/irdma/ctrl.c | 107 | ||||
| -rw-r--r-- | drivers/infiniband/hw/irdma/hw.c | 3 | ||||
| -rw-r--r-- | drivers/infiniband/hw/irdma/icrdma_if.c | 6 | ||||
| -rw-r--r-- | drivers/infiniband/hw/irdma/ig3rdma_if.c | 4 | ||||
| -rw-r--r-- | drivers/infiniband/hw/irdma/main.h | 3 | ||||
| -rw-r--r-- | drivers/infiniband/hw/irdma/pble.c | 6 | ||||
| -rw-r--r-- | drivers/infiniband/hw/irdma/puda.c | 20 | ||||
| -rw-r--r-- | drivers/infiniband/hw/irdma/type.h | 5 | ||||
| -rw-r--r-- | drivers/infiniband/hw/irdma/uk.c | 67 | ||||
| -rw-r--r-- | drivers/infiniband/hw/irdma/user.h | 6 | ||||
| -rw-r--r-- | drivers/infiniband/hw/irdma/utils.c | 58 | ||||
| -rw-r--r-- | drivers/infiniband/hw/irdma/verbs.c | 49 | ||||
| -rw-r--r-- | drivers/infiniband/hw/irdma/verbs.h | 3 |
14 files changed, 114 insertions, 225 deletions
diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c index c6a0a661d6e7..f4f4f92ba63a 100644 --- a/drivers/infiniband/hw/irdma/cm.c +++ b/drivers/infiniband/hw/irdma/cm.c @@ -3710,7 +3710,7 @@ int irdma_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) iwpd = iwqp->iwpd; tagged_offset = (uintptr_t)iwqp->ietf_mem.va; ibmr = irdma_reg_phys_mr(&iwpd->ibpd, iwqp->ietf_mem.pa, buf_len, - IB_ACCESS_LOCAL_WRITE, &tagged_offset); + IB_ACCESS_LOCAL_WRITE, &tagged_offset, false); if (IS_ERR(ibmr)) { ret = -ENOMEM; goto error; diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c index 4ef1c29032f7..ce5cf89c463c 100644 --- a/drivers/infiniband/hw/irdma/ctrl.c +++ b/drivers/infiniband/hw/irdma/ctrl.c @@ -2943,8 +2943,6 @@ static int irdma_sc_cq_create(struct irdma_sc_cq *cq, u64 scratch, __le64 *wqe; struct irdma_sc_cqp *cqp; u64 hdr; - struct irdma_sc_ceq *ceq; - int ret_code = 0; cqp = cq->dev->cqp; if (cq->cq_uk.cq_id >= cqp->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].max_cnt) @@ -2953,19 +2951,9 @@ static int irdma_sc_cq_create(struct irdma_sc_cq *cq, u64 scratch, if (cq->ceq_id >= cq->dev->hmc_fpm_misc.max_ceqs) return -EINVAL; - ceq = cq->dev->ceq[cq->ceq_id]; - if (ceq && ceq->reg_cq) - ret_code = irdma_sc_add_cq_ctx(ceq, cq); - - if (ret_code) - return ret_code; - wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); - if (!wqe) { - if (ceq && ceq->reg_cq) - irdma_sc_remove_cq_ctx(ceq, cq); + if (!wqe) return -ENOMEM; - } set_64bit_val(wqe, 0, cq->cq_uk.cq_size); set_64bit_val(wqe, 8, (uintptr_t)cq >> 1); @@ -3018,17 +3006,12 @@ int irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch, bool post_sq) struct irdma_sc_cqp *cqp; __le64 *wqe; u64 hdr; - struct irdma_sc_ceq *ceq; cqp = cq->dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch); if (!wqe) return -ENOMEM; - ceq = cq->dev->ceq[cq->ceq_id]; - if (ceq && ceq->reg_cq) - irdma_sc_remove_cq_ctx(ceq, cq); - set_64bit_val(wqe, 0, cq->cq_uk.cq_size); set_64bit_val(wqe, 8, (uintptr_t)cq >> 1); set_64bit_val(wqe, 40, cq->shadow_area_pa); @@ -3602,71 +3585,6 @@ static int irdma_sc_parse_fpm_query_buf(struct irdma_sc_dev *dev, __le64 *buf, } /** - * irdma_sc_find_reg_cq - find cq ctx index - * @ceq: ceq sc structure - * @cq: cq sc structure - */ -static u32 irdma_sc_find_reg_cq(struct irdma_sc_ceq *ceq, - struct irdma_sc_cq *cq) -{ - u32 i; - - for (i = 0; i < ceq->reg_cq_size; i++) { - if (cq == ceq->reg_cq[i]) - return i; - } - - return IRDMA_INVALID_CQ_IDX; -} - -/** - * irdma_sc_add_cq_ctx - add cq ctx tracking for ceq - * @ceq: ceq sc structure - * @cq: cq sc structure - */ -int irdma_sc_add_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq) -{ - unsigned long flags; - - spin_lock_irqsave(&ceq->req_cq_lock, flags); - - if (ceq->reg_cq_size == ceq->elem_cnt) { - spin_unlock_irqrestore(&ceq->req_cq_lock, flags); - return -ENOMEM; - } - - ceq->reg_cq[ceq->reg_cq_size++] = cq; - - spin_unlock_irqrestore(&ceq->req_cq_lock, flags); - - return 0; -} - -/** - * irdma_sc_remove_cq_ctx - remove cq ctx tracking for ceq - * @ceq: ceq sc structure - * @cq: cq sc structure - */ -void irdma_sc_remove_cq_ctx(struct irdma_sc_ceq *ceq, struct irdma_sc_cq *cq) -{ - unsigned long flags; - u32 cq_ctx_idx; - - spin_lock_irqsave(&ceq->req_cq_lock, flags); - cq_ctx_idx = irdma_sc_find_reg_cq(ceq, cq); - if (cq_ctx_idx == IRDMA_INVALID_CQ_IDX) - goto exit; - - ceq->reg_cq_size--; - if (cq_ctx_idx != ceq->reg_cq_size) - ceq->reg_cq[cq_ctx_idx] = ceq->reg_cq[ceq->reg_cq_size]; - ceq->reg_cq[ceq->reg_cq_size] = NULL; - -exit: - spin_unlock_irqrestore(&ceq->req_cq_lock, flags); -} - -/** * irdma_sc_cqp_init - Initialize buffers for a control Queue Pair * @cqp: IWARP control queue pair pointer * @info: IWARP control queue pair init info pointer @@ -3950,11 +3868,13 @@ int irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp) */ void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq) { + unsigned long flags; u64 temp_val; u16 sw_cq_sel; u8 arm_next_se; u8 arm_seq_num; + spin_lock_irqsave(&ccq->dev->cqp_lock, flags); get_64bit_val(ccq->cq_uk.shadow_area, 32, &temp_val); sw_cq_sel = (u16)FIELD_GET(IRDMA_CQ_DBSA_SW_CQ_SELECT, temp_val); arm_next_se = (u8)FIELD_GET(IRDMA_CQ_DBSA_ARM_NEXT_SE, temp_val); @@ -3965,6 +3885,7 @@ void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq) FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT_SE, arm_next_se) | FIELD_PREP(IRDMA_CQ_DBSA_ARM_NEXT, 1); set_64bit_val(ccq->cq_uk.shadow_area, 32, temp_val); + spin_unlock_irqrestore(&ccq->dev->cqp_lock, flags); dma_wmb(); /* make sure shadow area is updated before arming */ @@ -4387,9 +4308,6 @@ int irdma_sc_ceq_init(struct irdma_sc_ceq *ceq, ceq->ceq_elem_pa = info->ceqe_pa; ceq->virtual_map = info->virtual_map; ceq->itr_no_expire = info->itr_no_expire; - ceq->reg_cq = info->reg_cq; - ceq->reg_cq_size = 0; - spin_lock_init(&ceq->req_cq_lock); ceq->pbl_chunk_size = (ceq->virtual_map ? info->pbl_chunk_size : 0); ceq->first_pm_pbl_idx = (ceq->virtual_map ? info->first_pm_pbl_idx : 0); ceq->pbl_list = (ceq->virtual_map ? info->pbl_list : NULL); @@ -4472,9 +4390,6 @@ int irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq) { struct irdma_sc_cqp *cqp; - if (ceq->reg_cq) - irdma_sc_remove_cq_ctx(ceq, ceq->dev->ccq); - cqp = ceq->dev->cqp; cqp->process_cqp_sds = irdma_update_sds_noccq; @@ -4493,11 +4408,6 @@ int irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch) struct irdma_sc_dev *dev = ceq->dev; dev->ccq->vsi_idx = ceq->vsi_idx; - if (ceq->reg_cq) { - ret_code = irdma_sc_add_cq_ctx(ceq, ceq->dev->ccq); - if (ret_code) - return ret_code; - } ret_code = irdma_sc_ceq_create(ceq, scratch, true); if (!ret_code) @@ -4562,7 +4472,6 @@ void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq) struct irdma_sc_cq *temp_cq; u8 polarity; u32 cq_idx; - unsigned long flags; do { cq_idx = 0; @@ -4583,11 +4492,6 @@ void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq) } cq = temp_cq; - if (ceq->reg_cq) { - spin_lock_irqsave(&ceq->req_cq_lock, flags); - cq_idx = irdma_sc_find_reg_cq(ceq, cq); - spin_unlock_irqrestore(&ceq->req_cq_lock, flags); - } IRDMA_RING_MOVE_TAIL(ceq->ceq_ring); if (!IRDMA_RING_CURRENT_TAIL(ceq->ceq_ring)) @@ -4731,7 +4635,8 @@ static int irdma_sc_aeq_destroy(struct irdma_sc_aeq *aeq, u64 scratch, u64 hdr; dev = aeq->dev; - if (dev->privileged) + + if (dev->hw_attrs.uk_attrs.hw_rev <= IRDMA_GEN_2) writel(0, dev->hw_regs[IRDMA_PFINT_AEQCTL]); cqp = dev->cqp; diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c index 7bad0e38786a..d1fc5726b979 100644 --- a/drivers/infiniband/hw/irdma/hw.c +++ b/drivers/infiniband/hw/irdma/hw.c @@ -2365,7 +2365,6 @@ static int irdma_cqp_manage_apbvt_cmd(struct irdma_device *iwdev, cqp_info = &cqp_request->info; info = &cqp_info->in.u.manage_apbvt_entry.info; - memset(info, 0, sizeof(*info)); info->add = add_port; info->port = accel_local_port; cqp_info->cqp_cmd = IRDMA_OP_MANAGE_APBVT_ENTRY; @@ -2474,7 +2473,6 @@ void irdma_manage_arp_cache(struct irdma_pci_f *rf, if (action == IRDMA_ARP_ADD) { cqp_info->cqp_cmd = IRDMA_OP_ADD_ARP_CACHE_ENTRY; info = &cqp_info->in.u.add_arp_cache_entry.info; - memset(info, 0, sizeof(*info)); info->arp_index = (u16)arp_index; info->permanent = true; ether_addr_copy(info->mac_addr, mac_addr); @@ -2533,7 +2531,6 @@ int irdma_manage_qhash(struct irdma_device *iwdev, struct irdma_cm_info *cminfo, cqp_info = &cqp_request->info; info = &cqp_info->in.u.manage_qhash_table_entry.info; - memset(info, 0, sizeof(*info)); info->vsi = &iwdev->vsi; info->manage = mtype; info->entry_type = etype; diff --git a/drivers/infiniband/hw/irdma/icrdma_if.c b/drivers/infiniband/hw/irdma/icrdma_if.c index 27b191f61caf..b49fd9cf2476 100644 --- a/drivers/infiniband/hw/irdma/icrdma_if.c +++ b/drivers/infiniband/hw/irdma/icrdma_if.c @@ -302,7 +302,8 @@ err_rt_init: err_ctrl_init: icrdma_deinit_interrupts(rf, cdev_info); err_init_interrupts: - kfree(iwdev->rf); + mutex_destroy(&rf->ah_tbl_lock); + kfree(rf); ib_dealloc_device(&iwdev->ibdev); return err; @@ -319,6 +320,9 @@ static void icrdma_remove(struct auxiliary_device *aux_dev) ice_rdma_update_vsi_filter(cdev_info, iwdev->vsi_num, false); irdma_ib_unregister_device(iwdev); icrdma_deinit_interrupts(iwdev->rf, cdev_info); + mutex_destroy(&iwdev->rf->ah_tbl_lock); + + kfree(iwdev->rf); pr_debug("INIT: Gen[%d] func[%d] device remove success\n", rdma_ver, PCI_FUNC(cdev_info->pdev->devfn)); diff --git a/drivers/infiniband/hw/irdma/ig3rdma_if.c b/drivers/infiniband/hw/irdma/ig3rdma_if.c index 1bb42eb298ba..e1d6670d9396 100644 --- a/drivers/infiniband/hw/irdma/ig3rdma_if.c +++ b/drivers/infiniband/hw/irdma/ig3rdma_if.c @@ -55,6 +55,7 @@ static int ig3rdma_vchnl_init(struct irdma_pci_f *rf, ret = irdma_sc_vchnl_init(&rf->sc_dev, &virt_info); if (ret) { destroy_workqueue(rf->vchnl_wq); + mutex_destroy(&rf->sc_dev.vchnl_mutex); return ret; } @@ -124,7 +125,9 @@ static void ig3rdma_decfg_rf(struct irdma_pci_f *rf) { struct irdma_hw *hw = &rf->hw; + mutex_destroy(&rf->ah_tbl_lock); destroy_workqueue(rf->vchnl_wq); + mutex_destroy(&rf->sc_dev.vchnl_mutex); kfree(hw->io_regs); iounmap(hw->rdma_reg.addr); } @@ -149,6 +152,7 @@ static int ig3rdma_cfg_rf(struct irdma_pci_f *rf, err = ig3rdma_cfg_regions(&rf->hw, cdev_info); if (err) { destroy_workqueue(rf->vchnl_wq); + mutex_destroy(&rf->sc_dev.vchnl_mutex); return err; } diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h index 886b30da188a..baab61e424a2 100644 --- a/drivers/infiniband/hw/irdma/main.h +++ b/drivers/infiniband/hw/irdma/main.h @@ -556,7 +556,7 @@ void irdma_copy_ip_htonl(__be32 *dst, u32 *src); u16 irdma_get_vlan_ipv4(u32 *addr); void irdma_get_vlan_mac_ipv6(u32 *addr, u16 *vlan_id, u8 *mac); struct ib_mr *irdma_reg_phys_mr(struct ib_pd *ib_pd, u64 addr, u64 size, - int acc, u64 *iova_start); + int acc, u64 *iova_start, bool dma_mr); int irdma_upload_qp_context(struct irdma_qp *iwqp, bool freeze, bool raw); void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq); int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd, @@ -564,7 +564,6 @@ int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd, void (*callback_fcn)(struct irdma_cqp_request *cqp_request), void *cb_param); void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request); -bool irdma_cq_empty(struct irdma_cq *iwcq); int irdma_inetaddr_event(struct notifier_block *notifier, unsigned long event, void *ptr); int irdma_inet6addr_event(struct notifier_block *notifier, unsigned long event, diff --git a/drivers/infiniband/hw/irdma/pble.c b/drivers/infiniband/hw/irdma/pble.c index fa6325adaede..28dfad7f940c 100644 --- a/drivers/infiniband/hw/irdma/pble.c +++ b/drivers/infiniband/hw/irdma/pble.c @@ -506,12 +506,14 @@ exit: void irdma_free_pble(struct irdma_hmc_pble_rsrc *pble_rsrc, struct irdma_pble_alloc *palloc) { - pble_rsrc->freedpbles += palloc->total_cnt; - if (palloc->level == PBLE_LEVEL_2) free_lvl2(pble_rsrc, palloc); else irdma_prm_return_pbles(&pble_rsrc->pinfo, &palloc->level1.chunkinfo); + + mutex_lock(&pble_rsrc->pble_mutex_lock); + pble_rsrc->freedpbles += palloc->total_cnt; pble_rsrc->stats_alloc_freed++; + mutex_unlock(&pble_rsrc->pble_mutex_lock); } diff --git a/drivers/infiniband/hw/irdma/puda.c b/drivers/infiniband/hw/irdma/puda.c index 694e5a9ed15d..cee47ddbd1b5 100644 --- a/drivers/infiniband/hw/irdma/puda.c +++ b/drivers/infiniband/hw/irdma/puda.c @@ -685,7 +685,6 @@ static int irdma_puda_qp_create(struct irdma_puda_rsrc *rsrc) ukqp->rq_size = rsrc->rq_size; IRDMA_RING_INIT(ukqp->sq_ring, ukqp->sq_size); - IRDMA_RING_INIT(ukqp->initial_ring, ukqp->sq_size); IRDMA_RING_INIT(ukqp->rq_ring, ukqp->rq_size); ukqp->wqe_alloc_db = qp->pd->dev->wqe_alloc_db; @@ -726,7 +725,6 @@ static int irdma_puda_cq_wqe(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq) struct irdma_sc_cqp *cqp; u64 hdr; struct irdma_ccq_cqe_info compl_info; - int status = 0; cqp = dev->cqp; wqe = irdma_sc_cqp_get_next_send_wqe(cqp, 0); @@ -756,16 +754,8 @@ static int irdma_puda_cq_wqe(struct irdma_sc_dev *dev, struct irdma_sc_cq *cq) print_hex_dump_debug("PUDA: PUDA CREATE CQ", DUMP_PREFIX_OFFSET, 16, 8, wqe, IRDMA_CQP_WQE_SIZE * 8, false); irdma_sc_cqp_post_sq(dev->cqp); - status = irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_CREATE_CQ, - &compl_info); - if (!status) { - struct irdma_sc_ceq *ceq = dev->ceq[0]; - - if (ceq && ceq->reg_cq) - status = irdma_sc_add_cq_ctx(ceq, cq); - } - - return status; + return irdma_sc_poll_for_cqp_op_done(dev->cqp, IRDMA_CQP_OP_CREATE_CQ, + &compl_info); } /** @@ -897,23 +887,17 @@ void irdma_puda_dele_rsrc(struct irdma_sc_vsi *vsi, enum puda_rsrc_type type, struct irdma_puda_buf *buf = NULL; struct irdma_puda_buf *nextbuf = NULL; struct irdma_virt_mem *vmem; - struct irdma_sc_ceq *ceq; - ceq = vsi->dev->ceq[0]; switch (type) { case IRDMA_PUDA_RSRC_TYPE_ILQ: rsrc = vsi->ilq; vmem = &vsi->ilq_mem; vsi->ilq = NULL; - if (ceq && ceq->reg_cq) - irdma_sc_remove_cq_ctx(ceq, &rsrc->cq); break; case IRDMA_PUDA_RSRC_TYPE_IEQ: rsrc = vsi->ieq; vmem = &vsi->ieq_mem; vsi->ieq = NULL; - if (ceq && ceq->reg_cq) - irdma_sc_remove_cq_ctx(ceq, &rsrc->cq); break; default: ibdev_dbg(to_ibdev(dev), "PUDA: error resource type = 0x%x\n", diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h index c1b8f81ea283..cab4896640a1 100644 --- a/drivers/infiniband/hw/irdma/type.h +++ b/drivers/infiniband/hw/irdma/type.h @@ -492,9 +492,6 @@ struct irdma_sc_ceq { u32 first_pm_pbl_idx; u8 polarity; u16 vsi_idx; - struct irdma_sc_cq **reg_cq; - u32 reg_cq_size; - spinlock_t req_cq_lock; /* protect access to reg_cq array */ bool virtual_map:1; bool tph_en:1; bool itr_no_expire:1; @@ -894,8 +891,6 @@ struct irdma_ceq_init_info { u8 tph_val; u16 vsi_idx; u32 first_pm_pbl_idx; - struct irdma_sc_cq **reg_cq; - u32 reg_cq_idx; }; struct irdma_aeq_init_info { diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c index ce1ae10c30fc..f0846b800913 100644 --- a/drivers/infiniband/hw/irdma/uk.c +++ b/drivers/infiniband/hw/irdma/uk.c @@ -114,33 +114,8 @@ void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx) */ void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp) { - u64 temp; - u32 hw_sq_tail; - u32 sw_sq_head; - - /* valid bit is written and loads completed before reading shadow */ - mb(); - - /* read the doorbell shadow area */ - get_64bit_val(qp->shadow_area, 0, &temp); - - hw_sq_tail = (u32)FIELD_GET(IRDMA_QP_DBSA_HW_SQ_TAIL, temp); - sw_sq_head = IRDMA_RING_CURRENT_HEAD(qp->sq_ring); - if (sw_sq_head != qp->initial_ring.head) { - if (sw_sq_head != hw_sq_tail) { - if (sw_sq_head > qp->initial_ring.head) { - if (hw_sq_tail >= qp->initial_ring.head && - hw_sq_tail < sw_sq_head) - writel(qp->qp_id, qp->wqe_alloc_db); - } else { - if (hw_sq_tail >= qp->initial_ring.head || - hw_sq_tail < sw_sq_head) - writel(qp->qp_id, qp->wqe_alloc_db); - } - } - } - - qp->initial_ring.head = qp->sq_ring.head; + dma_wmb(); + writel(qp->qp_id, qp->wqe_alloc_db); } /** @@ -194,6 +169,7 @@ __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx, qp->sq_wrtrk_array[*wqe_idx].wrid = info->wr_id; qp->sq_wrtrk_array[*wqe_idx].wr_len = total_size; qp->sq_wrtrk_array[*wqe_idx].quanta = quanta; + qp->sq_wrtrk_array[*wqe_idx].signaled = info->signaled; return wqe; } @@ -1137,6 +1113,27 @@ void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq, } /** + * irdma_uk_cq_empty - Check if CQ is empty + * @cq: hw cq + */ +bool irdma_uk_cq_empty(struct irdma_cq_uk *cq) +{ + __le64 *cqe; + u8 polarity; + u64 qword3; + + if (cq->avoid_mem_cflct) + cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq); + else + cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq); + + get_64bit_val(cqe, 24, &qword3); + polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3); + + return polarity != cq->polarity; +} + +/** * irdma_uk_cq_poll_cmpl - get cq completion info * @cq: hw cq * @info: cq poll information returned @@ -1287,6 +1284,8 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3); if (info->q_type == IRDMA_CQE_QTYPE_RQ && is_srq) { + unsigned long flags; + srq = qp->srq_uk; get_64bit_val(cqe, 8, &info->wr_id); @@ -1299,8 +1298,11 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, } else { info->stag_invalid_set = false; } + spin_lock_irqsave(srq->lock, flags); IRDMA_RING_MOVE_TAIL(srq->srq_ring); + spin_unlock_irqrestore(srq->lock, flags); pring = &srq->srq_ring; + } else if (info->q_type == IRDMA_CQE_QTYPE_RQ && !is_srq) { u32 array_idx; @@ -1355,6 +1357,10 @@ int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid; if (!info->comp_status) info->bytes_xfered = qp->sq_wrtrk_array[wqe_idx].wr_len; + if (!qp->sq_wrtrk_array[wqe_idx].signaled) { + ret_code = -EFAULT; + goto exit; + } info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3); IRDMA_RING_SET_TAIL(qp->sq_ring, wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta); @@ -1420,8 +1426,9 @@ exit: IRDMA_RING_MOVE_TAIL(cq->cq_ring); if (!cq->avoid_mem_cflct && ext_valid) IRDMA_RING_MOVE_TAIL(cq->cq_ring); - set_64bit_val(cq->shadow_area, 0, - IRDMA_RING_CURRENT_HEAD(cq->cq_ring)); + if (IRDMA_RING_CURRENT_HEAD(cq->cq_ring) & 0x3F || irdma_uk_cq_empty(cq)) + set_64bit_val(cq->shadow_area, 0, + IRDMA_RING_CURRENT_HEAD(cq->cq_ring)); } else { qword3 &= ~IRDMA_CQ_WQEIDX; qword3 |= FIELD_PREP(IRDMA_CQ_WQEIDX, pring->tail); @@ -1574,7 +1581,6 @@ static void irdma_setup_connection_wqes(struct irdma_qp_uk *qp, qp->conn_wqes = move_cnt; IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->sq_ring, move_cnt); IRDMA_RING_MOVE_TAIL_BY_COUNT(qp->sq_ring, move_cnt); - IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(qp->initial_ring, move_cnt); } /** @@ -1719,7 +1725,6 @@ int irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info) qp->max_sq_frag_cnt = info->max_sq_frag_cnt; sq_ring_size = qp->sq_size << info->sq_shift; IRDMA_RING_INIT(qp->sq_ring, sq_ring_size); - IRDMA_RING_INIT(qp->initial_ring, sq_ring_size); if (info->first_sq_wq) { irdma_setup_connection_wqes(qp, info); qp->swqe_polarity = 1; diff --git a/drivers/infiniband/hw/irdma/user.h b/drivers/infiniband/hw/irdma/user.h index ab57f689827a..9eb7fd0b1cbf 100644 --- a/drivers/infiniband/hw/irdma/user.h +++ b/drivers/infiniband/hw/irdma/user.h @@ -429,6 +429,7 @@ struct irdma_wqe_uk_ops { struct irdma_bind_window *op_info); }; +bool irdma_uk_cq_empty(struct irdma_cq_uk *cq); int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info); void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq, @@ -456,7 +457,6 @@ struct irdma_srq_uk { struct irdma_uk_attrs *uk_attrs; __le64 *shadow_area; struct irdma_ring srq_ring; - struct irdma_ring initial_ring; u32 srq_id; u32 srq_size; u32 max_srq_frag_cnt; @@ -465,6 +465,7 @@ struct irdma_srq_uk { u8 wqe_size; u8 wqe_size_multiplier; u8 deferred_flag; + spinlock_t *lock; }; struct irdma_srq_uk_init_info { @@ -482,7 +483,8 @@ struct irdma_sq_uk_wr_trk_info { u64 wrid; u32 wr_len; u16 quanta; - u8 reserved[2]; + u8 signaled; + u8 reserved[1]; }; struct irdma_qp_quanta { diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c index 8b94d87b0192..cc2a12f735d3 100644 --- a/drivers/infiniband/hw/irdma/utils.c +++ b/drivers/infiniband/hw/irdma/utils.c @@ -452,6 +452,7 @@ struct irdma_cqp_request *irdma_alloc_and_get_cqp_request(struct irdma_cqp *cqp, cqp_request->waiting = wait; refcount_set(&cqp_request->refcnt, 1); memset(&cqp_request->compl_info, 0, sizeof(cqp_request->compl_info)); + memset(&cqp_request->info, 0, sizeof(cqp_request->info)); return cqp_request; } @@ -1068,7 +1069,6 @@ int irdma_cqp_qp_create_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp) cqp_info = &cqp_request->info; qp_info = &cqp_request->info.in.u.qp_create.info; - memset(qp_info, 0, sizeof(*qp_info)); qp_info->cq_num_valid = true; qp_info->next_iwarp_state = IRDMA_QP_STATE_RTS; cqp_info->cqp_cmd = IRDMA_OP_QP_CREATE; @@ -1343,7 +1343,6 @@ int irdma_cqp_qp_destroy_cmd(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp) return -ENOMEM; cqp_info = &cqp_request->info; - memset(cqp_info, 0, sizeof(*cqp_info)); cqp_info->cqp_cmd = IRDMA_OP_QP_DESTROY; cqp_info->post_sq = 1; cqp_info->in.u.qp_destroy.qp = qp; @@ -1749,7 +1748,6 @@ int irdma_cqp_gather_stats_cmd(struct irdma_sc_dev *dev, return -ENOMEM; cqp_info = &cqp_request->info; - memset(cqp_info, 0, sizeof(*cqp_info)); cqp_info->cqp_cmd = IRDMA_OP_STATS_GATHER; cqp_info->post_sq = 1; cqp_info->in.u.stats_gather.info = pestat->gather_info; @@ -1789,7 +1787,6 @@ int irdma_cqp_stats_inst_cmd(struct irdma_sc_vsi *vsi, u8 cmd, return -ENOMEM; cqp_info = &cqp_request->info; - memset(cqp_info, 0, sizeof(*cqp_info)); cqp_info->cqp_cmd = cmd; cqp_info->post_sq = 1; cqp_info->in.u.stats_manage.info = *stats_info; @@ -1890,7 +1887,6 @@ int irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd, return -ENOMEM; cqp_info = &cqp_request->info; - memset(cqp_info, 0, sizeof(*cqp_info)); cqp_info->cqp_cmd = cmd; cqp_info->post_sq = 1; cqp_info->in.u.ws_node.info = *node_info; @@ -2357,24 +2353,6 @@ void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event) iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context); } -bool irdma_cq_empty(struct irdma_cq *iwcq) -{ - struct irdma_cq_uk *ukcq; - u64 qword3; - __le64 *cqe; - u8 polarity; - - ukcq = &iwcq->sc_cq.cq_uk; - if (ukcq->avoid_mem_cflct) - cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(ukcq); - else - cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq); - get_64bit_val(cqe, 24, &qword3); - polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3); - - return polarity != ukcq->polarity; -} - void irdma_remove_cmpls_list(struct irdma_cq *iwcq) { struct irdma_cmpl_gen *cmpl_node; @@ -2436,6 +2414,8 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp) struct irdma_qp_uk *qp = &iwqp->sc_qp.qp_uk; struct irdma_ring *sq_ring = &qp->sq_ring; struct irdma_ring *rq_ring = &qp->rq_ring; + struct irdma_cq *iwscq = iwqp->iwscq; + struct irdma_cq *iwrcq = iwqp->iwrcq; struct irdma_cmpl_gen *cmpl; __le64 *sw_wqe; u64 wqe_qword; @@ -2443,8 +2423,8 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp) bool compl_generated = false; unsigned long flags1; - spin_lock_irqsave(&iwqp->iwscq->lock, flags1); - if (irdma_cq_empty(iwqp->iwscq)) { + spin_lock_irqsave(&iwscq->lock, flags1); + if (irdma_uk_cq_empty(&iwscq->sc_cq.cq_uk)) { unsigned long flags2; spin_lock_irqsave(&iwqp->lock, flags2); @@ -2452,7 +2432,7 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp) cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC); if (!cmpl) { spin_unlock_irqrestore(&iwqp->lock, flags2); - spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1); + spin_unlock_irqrestore(&iwscq->lock, flags1); return; } @@ -2471,24 +2451,24 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp) kfree(cmpl); continue; } - ibdev_dbg(iwqp->iwscq->ibcq.device, + ibdev_dbg(iwscq->ibcq.device, "DEV: %s: adding wr_id = 0x%llx SQ Completion to list qp_id=%d\n", __func__, cmpl->cpi.wr_id, qp->qp_id); - list_add_tail(&cmpl->list, &iwqp->iwscq->cmpl_generated); + list_add_tail(&cmpl->list, &iwscq->cmpl_generated); compl_generated = true; } spin_unlock_irqrestore(&iwqp->lock, flags2); - spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1); + spin_unlock_irqrestore(&iwscq->lock, flags1); if (compl_generated) - irdma_comp_handler(iwqp->iwscq); + irdma_comp_handler(iwscq); } else { - spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1); + spin_unlock_irqrestore(&iwscq->lock, flags1); mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)); } - spin_lock_irqsave(&iwqp->iwrcq->lock, flags1); - if (irdma_cq_empty(iwqp->iwrcq)) { + spin_lock_irqsave(&iwrcq->lock, flags1); + if (irdma_uk_cq_empty(&iwrcq->sc_cq.cq_uk)) { unsigned long flags2; spin_lock_irqsave(&iwqp->lock, flags2); @@ -2496,7 +2476,7 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp) cmpl = kzalloc(sizeof(*cmpl), GFP_ATOMIC); if (!cmpl) { spin_unlock_irqrestore(&iwqp->lock, flags2); - spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1); + spin_unlock_irqrestore(&iwrcq->lock, flags1); return; } @@ -2508,20 +2488,20 @@ void irdma_generate_flush_completions(struct irdma_qp *iwqp) cmpl->cpi.q_type = IRDMA_CQE_QTYPE_RQ; /* remove the RQ WR by moving RQ tail */ IRDMA_RING_SET_TAIL(*rq_ring, rq_ring->tail + 1); - ibdev_dbg(iwqp->iwrcq->ibcq.device, + ibdev_dbg(iwrcq->ibcq.device, "DEV: %s: adding wr_id = 0x%llx RQ Completion to list qp_id=%d, wqe_idx=%d\n", __func__, cmpl->cpi.wr_id, qp->qp_id, wqe_idx); - list_add_tail(&cmpl->list, &iwqp->iwrcq->cmpl_generated); + list_add_tail(&cmpl->list, &iwrcq->cmpl_generated); compl_generated = true; } spin_unlock_irqrestore(&iwqp->lock, flags2); - spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1); + spin_unlock_irqrestore(&iwrcq->lock, flags1); if (compl_generated) - irdma_comp_handler(iwqp->iwrcq); + irdma_comp_handler(iwrcq); } else { - spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1); + spin_unlock_irqrestore(&iwrcq->lock, flags1); mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush, msecs_to_jiffies(IRDMA_FLUSH_DELAY_MS)); } diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index c883c9ea5a83..6d9af41a2884 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -27,7 +27,8 @@ static int irdma_query_device(struct ib_device *ibdev, irdma_fw_minor_ver(&rf->sc_dev); props->device_cap_flags = IB_DEVICE_MEM_WINDOW | IB_DEVICE_MEM_MGT_EXTENSIONS; - props->kernel_cap_flags = IBK_LOCAL_DMA_LKEY; + if (hw_attrs->uk_attrs.hw_rev < IRDMA_GEN_3) + props->kernel_cap_flags = IBK_LOCAL_DMA_LKEY; props->vendor_id = pcidev->vendor; props->vendor_part_id = pcidev->device; @@ -771,7 +772,6 @@ static int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp) cqp_info = &cqp_request->info; qp_info = &cqp_request->info.in.u.qp_create.info; - memset(qp_info, 0, sizeof(*qp_info)); qp_info->mac_valid = true; qp_info->cq_num_valid = true; qp_info->next_iwarp_state = IRDMA_QP_STATE_IDLE; @@ -2029,6 +2029,7 @@ static int irdma_resize_cq(struct ib_cq *ibcq, int entries, struct irdma_pci_f *rf; struct irdma_cq_buf *cq_buf = NULL; unsigned long flags; + u8 cqe_size; int ret; iwdev = to_iwdev(ibcq->device); @@ -2045,7 +2046,7 @@ static int irdma_resize_cq(struct ib_cq *ibcq, int entries, return -EINVAL; if (!iwcq->user_mode) { - entries++; + entries += 2; if (!iwcq->sc_cq.cq_uk.avoid_mem_cflct && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) @@ -2053,6 +2054,10 @@ static int irdma_resize_cq(struct ib_cq *ibcq, int entries, if (entries & 1) entries += 1; /* cq size must be an even number */ + + cqe_size = iwcq->sc_cq.cq_uk.avoid_mem_cflct ? 64 : 32; + if (entries * cqe_size == IRDMA_HW_PAGE_SIZE) + entries += 2; } info.cq_size = max(entries, 4); @@ -2306,8 +2311,8 @@ static int irdma_setup_kmode_srq(struct irdma_device *iwdev, ukinfo->srq_size = depth >> shift; ukinfo->shadow_area = mem->va + ring_size; - info->shadow_area_pa = info->srq_pa + ring_size; info->srq_pa = mem->pa; + info->shadow_area_pa = info->srq_pa + ring_size; return 0; } @@ -2384,6 +2389,7 @@ static int irdma_create_srq(struct ib_srq *ibsrq, info.vsi = &iwdev->vsi; info.pd = &iwpd->sc_pd; + iwsrq->sc_srq.srq_uk.lock = &iwsrq->lock; err_code = irdma_sc_srq_init(&iwsrq->sc_srq, &info); if (err_code) goto free_dmem; @@ -2483,6 +2489,7 @@ static int irdma_create_cq(struct ib_cq *ibcq, int err_code; int entries = attr->cqe; bool cqe_64byte_ena; + u8 cqe_size; err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev); if (err_code) @@ -2509,6 +2516,7 @@ static int irdma_create_cq(struct ib_cq *ibcq, ukinfo->cq_id = cq_num; cqe_64byte_ena = dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_64_BYTE_CQE ? true : false; + cqe_size = cqe_64byte_ena ? 64 : 32; ukinfo->avoid_mem_cflct = cqe_64byte_ena; iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size; if (attr->comp_vector < rf->ceqs_count) @@ -2581,13 +2589,16 @@ static int irdma_create_cq(struct ib_cq *ibcq, goto cq_free_rsrc; } - entries++; + entries += 2; if (!cqe_64byte_ena && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) entries *= 2; if (entries & 1) entries += 1; /* cq size must be an even number */ + if (entries * cqe_size == IRDMA_HW_PAGE_SIZE) + entries += 2; + ukinfo->cq_size = entries; if (cqe_64byte_ena) @@ -3103,12 +3114,10 @@ static int irdma_hw_alloc_stag(struct irdma_device *iwdev, cqp_info = &cqp_request->info; info = &cqp_info->in.u.alloc_stag.info; - memset(info, 0, sizeof(*info)); info->page_size = PAGE_SIZE; info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S; info->pd_id = iwpd->sc_pd.pd_id; info->total_len = iwmr->len; - info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY; info->remote_access = true; cqp_info->cqp_cmd = IRDMA_OP_ALLOC_STAG; cqp_info->post_sq = 1; @@ -3119,7 +3128,7 @@ static int irdma_hw_alloc_stag(struct irdma_device *iwdev, if (status) return status; - iwmr->is_hwreg = 1; + iwmr->is_hwreg = true; return 0; } @@ -3253,7 +3262,6 @@ static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr, cqp_info = &cqp_request->info; stag_info = &cqp_info->in.u.mr_reg_non_shared.info; - memset(stag_info, 0, sizeof(*stag_info)); stag_info->va = iwpbl->user_base; stag_info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S; stag_info->stag_key = (u8)iwmr->stag; @@ -3263,7 +3271,7 @@ static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr, if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_ATOMIC_OPS) stag_info->remote_atomics_en = (access & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; stag_info->pd_id = iwpd->sc_pd.pd_id; - stag_info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY; + stag_info->all_memory = iwmr->dma_mr; if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED) stag_info->addr_type = IRDMA_ADDR_TYPE_ZERO_BASED; else @@ -3290,7 +3298,7 @@ static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr, irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); if (!ret) - iwmr->is_hwreg = 1; + iwmr->is_hwreg = true; return ret; } @@ -3647,7 +3655,6 @@ static int irdma_hwdereg_mr(struct ib_mr *ib_mr) cqp_info = &cqp_request->info; info = &cqp_info->in.u.dealloc_stag.info; - memset(info, 0, sizeof(*info)); info->pd_id = iwpd->sc_pd.pd_id; info->stag_idx = ib_mr->rkey >> IRDMA_CQPSQ_STAG_IDX_S; info->mr = true; @@ -3663,7 +3670,7 @@ static int irdma_hwdereg_mr(struct ib_mr *ib_mr) if (status) return status; - iwmr->is_hwreg = 0; + iwmr->is_hwreg = false; return 0; } @@ -3786,9 +3793,10 @@ static struct ib_mr *irdma_rereg_user_mr(struct ib_mr *ib_mr, int flags, * @size: size of memory to register * @access: Access rights * @iova_start: start of virtual address for physical buffers + * @dma_mr: Flag indicating whether this region is a PD DMA MR */ struct ib_mr *irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access, - u64 *iova_start) + u64 *iova_start, bool dma_mr) { struct irdma_device *iwdev = to_iwdev(pd->device); struct irdma_pbl *iwpbl; @@ -3805,6 +3813,7 @@ struct ib_mr *irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access iwpbl = &iwmr->iwpbl; iwpbl->iwmr = iwmr; iwmr->type = IRDMA_MEMREG_TYPE_MEM; + iwmr->dma_mr = dma_mr; iwpbl->user_base = *iova_start; stag = irdma_create_stag(iwdev); if (!stag) { @@ -3843,7 +3852,7 @@ static struct ib_mr *irdma_get_dma_mr(struct ib_pd *pd, int acc) { u64 kva = 0; - return irdma_reg_phys_mr(pd, 0, 0, acc, &kva); + return irdma_reg_phys_mr(pd, 0, 0, acc, &kva, true); } /** @@ -4078,7 +4087,7 @@ static int irdma_post_send(struct ib_qp *ibqp, break; case IB_WR_LOCAL_INV: info.op_type = IRDMA_OP_TYPE_INV_STAG; - info.local_fence = info.read_fence; + info.local_fence = true; info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey; err = irdma_uk_stag_local_invalidate(ukqp, &info, true); break; @@ -4505,7 +4514,7 @@ static int irdma_req_notify_cq(struct ib_cq *ibcq, } if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) && - (!irdma_cq_empty(iwcq) || !list_empty(&iwcq->cmpl_generated))) + (!irdma_uk_cq_empty(ukcq) || !list_empty(&iwcq->cmpl_generated))) ret = 1; spin_unlock_irqrestore(&iwcq->lock, flags); @@ -5204,7 +5213,7 @@ static int irdma_create_user_ah(struct ib_ah *ibah, struct irdma_ah *parent_ah; int err; - if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN) + if (udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN) return -EINVAL; err = irdma_setup_ah(ibah, attr); @@ -5500,7 +5509,9 @@ void irdma_ib_dealloc_device(struct ib_device *ibdev) irdma_rt_deinit_hw(iwdev); if (!iwdev->is_vport) { irdma_ctrl_deinit_hw(iwdev->rf); - if (iwdev->rf->vchnl_wq) + if (iwdev->rf->vchnl_wq) { destroy_workqueue(iwdev->rf->vchnl_wq); + mutex_destroy(&iwdev->rf->sc_dev.vchnl_mutex); + } } } diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h index ac8b38701835..aabbb3442098 100644 --- a/drivers/infiniband/hw/irdma/verbs.h +++ b/drivers/infiniband/hw/irdma/verbs.h @@ -111,7 +111,8 @@ struct irdma_mr { }; struct ib_umem *region; int access; - u8 is_hwreg; + bool is_hwreg:1; + bool dma_mr:1; u16 type; u32 page_cnt; u64 page_size; |
