diff options
Diffstat (limited to 'drivers/infiniband/hw/bnxt_re/ib_verbs.c')
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/ib_verbs.c | 156 |
1 files changed, 149 insertions, 7 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 260dc67b8b87..4dab5ca7362b 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -288,7 +288,9 @@ int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num, } port_attr->max_mtu = IB_MTU_4096; port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu); - port_attr->gid_tbl_len = dev_attr->max_sgid; + /* One GID is reserved for RawEth QP. Report one less */ + port_attr->gid_tbl_len = (rdev->rcfw.roce_mirror ? (dev_attr->max_sgid - 1) : + dev_attr->max_sgid); port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP | IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP; @@ -375,7 +377,7 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context) if (!ctx) return -EINVAL; - if (sgid_tbl && sgid_tbl->active) { + if (sgid_tbl->active) { if (ctx->idx >= sgid_tbl->max) return -EINVAL; gid_to_del = &sgid_tbl->tbl[ctx->idx].gid; @@ -429,7 +431,7 @@ int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context) rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid, rdev->qplib_res.netdev->dev_addr, - vlan_id, true, &tbl_idx); + vlan_id, true, &tbl_idx, false, 0); if (rc == -EALREADY) { ctx_tbl = sgid_tbl->ctx; ctx_tbl[tbl_idx]->refcnt++; @@ -955,6 +957,20 @@ fail: return rc; } +static void bnxt_re_del_unique_gid(struct bnxt_re_dev *rdev) +{ + int rc; + + if (!rdev->rcfw.roce_mirror) + return; + + rc = bnxt_qplib_del_sgid(&rdev->qplib_res.sgid_tbl, + (struct bnxt_qplib_gid *)&rdev->ugid, + 0xFFFF, true); + if (rc) + dev_err(rdev_to_dev(rdev), "Failed to delete unique GID, rc: %d\n", rc); +} + /* Queue Pairs */ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) { @@ -994,6 +1010,9 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD) atomic_dec(&rdev->stats.res.ud_qp_count); + if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE) + bnxt_re_del_unique_gid(rdev); + ib_umem_release(qp->rumem); ib_umem_release(qp->sumem); @@ -1018,6 +1037,8 @@ static u8 __from_ib_qp_type(enum ib_qp_type type) return CMDQ_CREATE_QP_TYPE_RC; case IB_QPT_UD: return CMDQ_CREATE_QP_TYPE_UD; + case IB_QPT_RAW_PACKET: + return CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE; default: return IB_QPT_MAX; } @@ -1595,6 +1616,29 @@ static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev, return rc; } +static int bnxt_re_add_unique_gid(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx; + struct bnxt_qplib_res *res = &rdev->qplib_res; + int rc; + + if (!rdev->rcfw.roce_mirror) + return 0; + + rdev->ugid.global.subnet_prefix = cpu_to_be64(0xfe8000000000abcdLL); + addrconf_ifid_eui48(&rdev->ugid.raw[8], rdev->netdev); + + rc = bnxt_qplib_add_sgid(&res->sgid_tbl, + (struct bnxt_qplib_gid *)&rdev->ugid, + rdev->qplib_res.netdev->dev_addr, + 0xFFFF, true, &rdev->ugid_index, true, + hctx->stats3.fw_id); + if (rc) + dev_err(rdev_to_dev(rdev), "Failed to add unique GID. rc = %d\n", rc); + + return rc; +} + int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr, struct ib_udata *udata) { @@ -1656,6 +1700,17 @@ int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr, } } + /* Support for RawEth QP is added to capture TCP pkt dump. + * So unique SGID is used to avoid incorrect statistics on per + * function stats_ctx + */ + if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE) { + rc = bnxt_re_add_unique_gid(rdev); + if (rc) + goto qp_destroy; + qp->qplib_qp.ugid_index = rdev->ugid_index; + } + qp->ib_qp.qp_num = qp->qplib_qp.id; if (qp_init_attr->qp_type == IB_QPT_GSI) rdev->gsi_ctx.gsi_qp = qp; @@ -2301,7 +2356,7 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, qp_attr->pkey_index = qplib_qp->pkey_index; qp_attr->qkey = qplib_qp->qkey; qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; - rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label, + rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->udp_sport, qplib_qp->ah.host_sgid_index, qplib_qp->ah.hop_limit, qplib_qp->ah.traffic_class); @@ -3248,9 +3303,9 @@ int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) IB_ACCESS_LOCAL_WRITE); if (IS_ERR(cq->resize_umem)) { rc = PTR_ERR(cq->resize_umem); + ibdev_err(&rdev->ibdev, "%s: ib_umem_get failed! rc = %pe\n", + __func__, cq->resize_umem); cq->resize_umem = NULL; - ibdev_err(&rdev->ibdev, "%s: ib_umem_get failed! rc = %d\n", - __func__, rc); goto fail; } cq->resize_cqe = entries; @@ -4392,6 +4447,93 @@ void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx) } } +static int bnxt_re_setup_vnic(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp) +{ + int rc; + + rc = bnxt_re_hwrm_alloc_vnic(rdev); + if (rc) + return rc; + + rc = bnxt_re_hwrm_cfg_vnic(rdev, qp->qplib_qp.id); + if (rc) + goto out_free_vnic; + + return 0; +out_free_vnic: + bnxt_re_hwrm_free_vnic(rdev); + return rc; +} + +struct ib_flow *bnxt_re_create_flow(struct ib_qp *ib_qp, + struct ib_flow_attr *attr, + struct ib_udata *udata) +{ + struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); + struct bnxt_re_dev *rdev = qp->rdev; + struct bnxt_re_flow *flow; + int rc; + + if (attr->type != IB_FLOW_ATTR_SNIFFER || + !rdev->rcfw.roce_mirror) + return ERR_PTR(-EOPNOTSUPP); + + mutex_lock(&rdev->qp_lock); + if (rdev->sniffer_flow_created) { + ibdev_err(&rdev->ibdev, "RoCE Mirroring is already Configured\n"); + mutex_unlock(&rdev->qp_lock); + return ERR_PTR(-EBUSY); + } + + flow = kzalloc(sizeof(*flow), GFP_KERNEL); + if (!flow) { + mutex_unlock(&rdev->qp_lock); + return ERR_PTR(-ENOMEM); + } + + flow->rdev = rdev; + + rc = bnxt_re_setup_vnic(rdev, qp); + if (rc) + goto out_free_flow; + + rc = bnxt_qplib_create_flow(&rdev->qplib_res); + if (rc) + goto out_free_vnic; + + rdev->sniffer_flow_created = 1; + mutex_unlock(&rdev->qp_lock); + + return &flow->ib_flow; + +out_free_vnic: + bnxt_re_hwrm_free_vnic(rdev); +out_free_flow: + mutex_unlock(&rdev->qp_lock); + kfree(flow); + return ERR_PTR(rc); +} + +int bnxt_re_destroy_flow(struct ib_flow *flow_id) +{ + struct bnxt_re_flow *flow = + container_of(flow_id, struct bnxt_re_flow, ib_flow); + struct bnxt_re_dev *rdev = flow->rdev; + int rc; + + mutex_lock(&rdev->qp_lock); + rc = bnxt_qplib_destroy_flow(&rdev->qplib_res); + if (rc) + ibdev_dbg(&rdev->ibdev, "failed to destroy_flow rc = %d\n", rc); + rdev->sniffer_flow_created = 0; + + bnxt_re_hwrm_free_vnic(rdev); + mutex_unlock(&rdev->qp_lock); + kfree(flow); + + return rc; +} + static struct bnxt_re_cq *bnxt_re_search_for_cq(struct bnxt_re_dev *rdev, u32 cq_id) { struct bnxt_re_cq *cq = NULL, *tmp_cq; @@ -4604,7 +4746,7 @@ static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle * return err; err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_DPI, - &dpi, sizeof(length)); + &dpi, sizeof(dpi)); if (err) return err; |