diff options
Diffstat (limited to 'drivers/infiniband/hw/bnxt_re')
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/bnxt_re.h | 19 | ||||
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/debugfs.c | 37 | ||||
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/hw_counters.c | 109 | ||||
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/hw_counters.h | 26 | ||||
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/ib_verbs.c | 156 | ||||
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/ib_verbs.h | 10 | ||||
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/main.c | 378 | ||||
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/qplib_fp.c | 13 | ||||
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/qplib_fp.h | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/qplib_rcfw.c | 10 | ||||
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/qplib_rcfw.h | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/qplib_res.c | 38 | ||||
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/qplib_res.h | 21 | ||||
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/qplib_sp.c | 98 | ||||
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/qplib_sp.h | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/bnxt_re/roce_hsi.h | 44 |
16 files changed, 622 insertions, 346 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index 6df5a2738c95..3485e495ac6a 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h @@ -172,9 +172,9 @@ struct bnxt_re_dev { struct list_head list; unsigned long flags; #define BNXT_RE_FLAG_NETDEV_REGISTERED 0 +#define BNXT_RE_FLAG_STATS_CTX3_ALLOC 1 #define BNXT_RE_FLAG_HAVE_L2_REF 3 #define BNXT_RE_FLAG_RCFW_CHANNEL_EN 4 -#define BNXT_RE_FLAG_QOS_WORK_REG 5 #define BNXT_RE_FLAG_RESOURCES_ALLOCATED 7 #define BNXT_RE_FLAG_RESOURCES_INITIALIZED 8 #define BNXT_RE_FLAG_ERR_DEVICE_DETACHED 17 @@ -187,9 +187,6 @@ struct bnxt_re_dev { int id; - struct delayed_work worker; - u8 cur_prio_map; - /* RCFW Channel */ struct bnxt_qplib_rcfw rcfw; @@ -227,6 +224,13 @@ struct bnxt_re_dev { struct workqueue_struct *dcb_wq; struct dentry *cc_config; struct bnxt_re_dbg_cc_config_params *cc_config_params; +#define BNXT_VPD_FLD_LEN 32 + char board_partno[BNXT_VPD_FLD_LEN]; + /* RoCE mirror */ + u16 mirror_vnic_id; + union ib_gid ugid; + u32 ugid_index; + u8 sniffer_flow_created : 1; }; #define to_bnxt_re_dev(ptr, member) \ @@ -243,6 +247,10 @@ int bnxt_re_assign_pma_port_counters(struct bnxt_re_dev *rdev, struct ib_mad *ou int bnxt_re_assign_pma_port_ext_counters(struct bnxt_re_dev *rdev, struct ib_mad *out_mad); +void bnxt_re_hwrm_free_vnic(struct bnxt_re_dev *rdev); +int bnxt_re_hwrm_alloc_vnic(struct bnxt_re_dev *rdev); +int bnxt_re_hwrm_cfg_vnic(struct bnxt_re_dev *rdev, u32 qp_id); + static inline struct device *rdev_to_dev(struct bnxt_re_dev *rdev) { if (rdev) @@ -276,4 +284,7 @@ static inline int bnxt_re_read_context_allowed(struct bnxt_re_dev *rdev) #define BNXT_RE_CONTEXT_TYPE_MRW_SIZE_P7 192 #define BNXT_RE_CONTEXT_TYPE_SRQ_SIZE_P7 192 +#define BNXT_RE_HWRM_CMD_TIMEOUT(rdev) \ + ((rdev)->chip_ctx->hwrm_cmd_max_timeout * 1000) + #endif diff --git a/drivers/infiniband/hw/bnxt_re/debugfs.c b/drivers/infiniband/hw/bnxt_re/debugfs.c index e632f1661b92..be5e9b5ca2f0 100644 --- a/drivers/infiniband/hw/bnxt_re/debugfs.c +++ b/drivers/infiniband/hw/bnxt_re/debugfs.c @@ -8,6 +8,7 @@ #include <linux/debugfs.h> #include <linux/pci.h> +#include <linux/seq_file.h> #include <rdma/ib_addr.h> #include "bnxt_ulp.h" @@ -314,6 +315,40 @@ static const struct file_operations bnxt_re_cc_config_ops = { .write = bnxt_re_cc_config_set, }; +static int info_show(struct seq_file *m, void *unused) +{ + struct bnxt_re_dev *rdev = m->private; + struct bnxt_re_res_cntrs *res_s = &rdev->stats.res; + + seq_puts(m, "Info:\n"); + seq_printf(m, "Device Name\t\t: %s\n", dev_name(&rdev->ibdev.dev)); + seq_printf(m, "PD Watermark\t\t: %llu\n", res_s->pd_watermark); + seq_printf(m, "AH Watermark\t\t: %llu\n", res_s->ah_watermark); + seq_printf(m, "QP Watermark\t\t: %llu\n", res_s->qp_watermark); + seq_printf(m, "RC QP Watermark\t\t: %llu\n", res_s->rc_qp_watermark); + seq_printf(m, "UD QP Watermark\t\t: %llu\n", res_s->ud_qp_watermark); + seq_printf(m, "SRQ Watermark\t\t: %llu\n", res_s->srq_watermark); + seq_printf(m, "CQ Watermark\t\t: %llu\n", res_s->cq_watermark); + seq_printf(m, "MR Watermark\t\t: %llu\n", res_s->mr_watermark); + seq_printf(m, "MW Watermark\t\t: %llu\n", res_s->mw_watermark); + seq_printf(m, "CQ Resize Count\t\t: %d\n", atomic_read(&res_s->resize_count)); + if (rdev->pacing.dbr_pacing) { + seq_printf(m, "DB Pacing Reschedule\t: %llu\n", rdev->stats.pacing.resched); + seq_printf(m, "DB Pacing Complete\t: %llu\n", rdev->stats.pacing.complete); + seq_printf(m, "DB Pacing Alerts\t: %llu\n", rdev->stats.pacing.alerts); + seq_printf(m, "DB FIFO Register\t: 0x%x\n", + readl(rdev->en_dev->bar0 + rdev->pacing.dbr_db_fifo_reg_off)); + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(info); + +static void bnxt_re_debugfs_add_info(struct bnxt_re_dev *rdev) +{ + debugfs_create_file("info", 0400, rdev->dbg_root, rdev, &info_fops); +} + void bnxt_re_debugfs_add_pdev(struct bnxt_re_dev *rdev) { struct pci_dev *pdev = rdev->en_dev->pdev; @@ -325,6 +360,8 @@ void bnxt_re_debugfs_add_pdev(struct bnxt_re_dev *rdev) rdev->qp_debugfs = debugfs_create_dir("QPs", rdev->dbg_root); rdev->cc_config = debugfs_create_dir("cc_config", rdev->dbg_root); + bnxt_re_debugfs_add_info(rdev); + rdev->cc_config_params = kzalloc(sizeof(*cc_params), GFP_KERNEL); for (i = 0; i < BNXT_RE_CC_PARAM_GEN0; i++) { diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c index 44bb082e0a60..651cf9d0e0c7 100644 --- a/drivers/infiniband/hw/bnxt_re/hw_counters.c +++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c @@ -51,25 +51,6 @@ #include "hw_counters.h" static const struct rdma_stat_desc bnxt_re_stat_descs[] = { - [BNXT_RE_ACTIVE_PD].name = "active_pds", - [BNXT_RE_ACTIVE_AH].name = "active_ahs", - [BNXT_RE_ACTIVE_QP].name = "active_qps", - [BNXT_RE_ACTIVE_RC_QP].name = "active_rc_qps", - [BNXT_RE_ACTIVE_UD_QP].name = "active_ud_qps", - [BNXT_RE_ACTIVE_SRQ].name = "active_srqs", - [BNXT_RE_ACTIVE_CQ].name = "active_cqs", - [BNXT_RE_ACTIVE_MR].name = "active_mrs", - [BNXT_RE_ACTIVE_MW].name = "active_mws", - [BNXT_RE_WATERMARK_PD].name = "watermark_pds", - [BNXT_RE_WATERMARK_AH].name = "watermark_ahs", - [BNXT_RE_WATERMARK_QP].name = "watermark_qps", - [BNXT_RE_WATERMARK_RC_QP].name = "watermark_rc_qps", - [BNXT_RE_WATERMARK_UD_QP].name = "watermark_ud_qps", - [BNXT_RE_WATERMARK_SRQ].name = "watermark_srqs", - [BNXT_RE_WATERMARK_CQ].name = "watermark_cqs", - [BNXT_RE_WATERMARK_MR].name = "watermark_mrs", - [BNXT_RE_WATERMARK_MW].name = "watermark_mws", - [BNXT_RE_RESIZE_CQ_CNT].name = "resize_cq_cnt", [BNXT_RE_RX_PKTS].name = "rx_pkts", [BNXT_RE_RX_BYTES].name = "rx_bytes", [BNXT_RE_TX_PKTS].name = "tx_pkts", @@ -79,22 +60,22 @@ static const struct rdma_stat_desc bnxt_re_stat_descs[] = { [BNXT_RE_TX_DISCARDS].name = "tx_roce_discards", [BNXT_RE_RX_ERRORS].name = "rx_roce_errors", [BNXT_RE_RX_DISCARDS].name = "rx_roce_discards", - [BNXT_RE_TO_RETRANSMITS].name = "to_retransmits", - [BNXT_RE_SEQ_ERR_NAKS_RCVD].name = "seq_err_naks_rcvd", - [BNXT_RE_MAX_RETRY_EXCEEDED].name = "max_retry_exceeded", - [BNXT_RE_RNR_NAKS_RCVD].name = "rnr_naks_rcvd", - [BNXT_RE_MISSING_RESP].name = "missing_resp", + [BNXT_RE_TO_RETRANSMITS].name = "local_ack_timeout_err", + [BNXT_RE_SEQ_ERR_NAKS_RCVD].name = "packet_seq_err", + [BNXT_RE_MAX_RETRY_EXCEEDED].name = "max_retry_exceeded", + [BNXT_RE_RNR_NAKS_RCVD].name = "rnr_nak_retry_err", + [BNXT_RE_MISSING_RESP].name = "implied_nak_seq_err", [BNXT_RE_UNRECOVERABLE_ERR].name = "unrecoverable_err", [BNXT_RE_BAD_RESP_ERR].name = "bad_resp_err", [BNXT_RE_LOCAL_QP_OP_ERR].name = "local_qp_op_err", [BNXT_RE_LOCAL_PROTECTION_ERR].name = "local_protection_err", [BNXT_RE_MEM_MGMT_OP_ERR].name = "mem_mgmt_op_err", - [BNXT_RE_REMOTE_INVALID_REQ_ERR].name = "remote_invalid_req_err", - [BNXT_RE_REMOTE_ACCESS_ERR].name = "remote_access_err", + [BNXT_RE_REMOTE_INVALID_REQ_ERR].name = "req_remote_invalid_request", + [BNXT_RE_REMOTE_ACCESS_ERR].name = "req_remote_access_errors", [BNXT_RE_REMOTE_OP_ERR].name = "remote_op_err", - [BNXT_RE_DUP_REQ].name = "dup_req", + [BNXT_RE_DUP_REQ].name = "duplicate_request", [BNXT_RE_RES_EXCEED_MAX].name = "res_exceed_max", - [BNXT_RE_RES_LENGTH_MISMATCH].name = "res_length_mismatch", + [BNXT_RE_RES_LENGTH_MISMATCH].name = "resp_local_length_error", [BNXT_RE_RES_EXCEEDS_WQE].name = "res_exceeds_wqe", [BNXT_RE_RES_OPCODE_ERR].name = "res_opcode_err", [BNXT_RE_RES_RX_INVALID_RKEY].name = "res_rx_invalid_rkey", @@ -118,7 +99,7 @@ static const struct rdma_stat_desc bnxt_re_stat_descs[] = { [BNXT_RE_RES_SRQ_LOAD_ERR].name = "res_srq_load_err", [BNXT_RE_RES_TX_PCI_ERR].name = "res_tx_pci_err", [BNXT_RE_RES_RX_PCI_ERR].name = "res_rx_pci_err", - [BNXT_RE_OUT_OF_SEQ_ERR].name = "oos_drop_count", + [BNXT_RE_OUT_OF_SEQ_ERR].name = "out_of_sequence", [BNXT_RE_TX_ATOMIC_REQ].name = "tx_atomic_req", [BNXT_RE_TX_READ_REQ].name = "tx_read_req", [BNXT_RE_TX_READ_RES].name = "tx_read_resp", @@ -126,23 +107,22 @@ static const struct rdma_stat_desc bnxt_re_stat_descs[] = { [BNXT_RE_TX_SEND_REQ].name = "tx_send_req", [BNXT_RE_TX_ROCE_PKTS].name = "tx_roce_only_pkts", [BNXT_RE_TX_ROCE_BYTES].name = "tx_roce_only_bytes", - [BNXT_RE_RX_ATOMIC_REQ].name = "rx_atomic_req", - [BNXT_RE_RX_READ_REQ].name = "rx_read_req", + [BNXT_RE_RX_ATOMIC_REQ].name = "rx_atomic_requests", + [BNXT_RE_RX_READ_REQ].name = "rx_read_requests", [BNXT_RE_RX_READ_RESP].name = "rx_read_resp", - [BNXT_RE_RX_WRITE_REQ].name = "rx_write_req", + [BNXT_RE_RX_WRITE_REQ].name = "rx_write_requests", [BNXT_RE_RX_SEND_REQ].name = "rx_send_req", [BNXT_RE_RX_ROCE_PKTS].name = "rx_roce_only_pkts", [BNXT_RE_RX_ROCE_BYTES].name = "rx_roce_only_bytes", [BNXT_RE_RX_ROCE_GOOD_PKTS].name = "rx_roce_good_pkts", [BNXT_RE_RX_ROCE_GOOD_BYTES].name = "rx_roce_good_bytes", - [BNXT_RE_OOB].name = "rx_out_of_buffer", - [BNXT_RE_TX_CNP].name = "tx_cnp_pkts", - [BNXT_RE_RX_CNP].name = "rx_cnp_pkts", - [BNXT_RE_RX_ECN].name = "rx_ecn_marked_pkts", - [BNXT_RE_PACING_RESCHED].name = "pacing_reschedule", - [BNXT_RE_PACING_CMPL].name = "pacing_complete", - [BNXT_RE_PACING_ALERT].name = "pacing_alerts", - [BNXT_RE_DB_FIFO_REG].name = "db_fifo_register", + [BNXT_RE_OOB].name = "out_of_buffer", + [BNXT_RE_TX_CNP].name = "np_cnp_pkts", + [BNXT_RE_RX_CNP].name = "rp_cnp_handled", + [BNXT_RE_RX_ECN].name = "np_ecn_marked_roce_packets", + [BNXT_RE_REQ_CQE_ERROR].name = "req_cqe_error", + [BNXT_RE_RESP_CQE_ERROR].name = "resp_cqe_error", + [BNXT_RE_RESP_REMOTE_ACCESS_ERRS].name = "resp_remote_access_errors", }; static void bnxt_re_copy_ext_stats(struct bnxt_re_dev *rdev, @@ -273,18 +253,20 @@ static void bnxt_re_copy_err_stats(struct bnxt_re_dev *rdev, err_s->res_rx_pci_err; stats->value[BNXT_RE_OUT_OF_SEQ_ERR] = err_s->res_oos_drop_count; -} - -static void bnxt_re_copy_db_pacing_stats(struct bnxt_re_dev *rdev, - struct rdma_hw_stats *stats) -{ - struct bnxt_re_db_pacing_stats *pacing_s = &rdev->stats.pacing; - - stats->value[BNXT_RE_PACING_RESCHED] = pacing_s->resched; - stats->value[BNXT_RE_PACING_CMPL] = pacing_s->complete; - stats->value[BNXT_RE_PACING_ALERT] = pacing_s->alerts; - stats->value[BNXT_RE_DB_FIFO_REG] = - readl(rdev->en_dev->bar0 + rdev->pacing.dbr_db_fifo_reg_off); + stats->value[BNXT_RE_REQ_CQE_ERROR] = + err_s->bad_resp_err + + err_s->local_qp_op_err + + err_s->local_protection_err + + err_s->mem_mgmt_op_err + + err_s->remote_invalid_req_err + + err_s->remote_access_err + + err_s->remote_op_err; + stats->value[BNXT_RE_RESP_CQE_ERROR] = + err_s->res_cmp_err + + err_s->res_cq_load_err; + stats->value[BNXT_RE_RESP_REMOTE_ACCESS_ERRS] = + err_s->res_rx_no_perm + + err_s->res_tx_no_perm; } int bnxt_re_assign_pma_port_ext_counters(struct bnxt_re_dev *rdev, struct ib_mad *out_mad) @@ -382,7 +364,6 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev, u32 port, int index) { struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); - struct bnxt_re_res_cntrs *res_s = &rdev->stats.res; struct bnxt_qplib_roce_stats *err_s = NULL; struct ctx_hw_stats *hw_stats = NULL; int rc = 0; @@ -391,26 +372,6 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev, if (!port || !stats) return -EINVAL; - stats->value[BNXT_RE_ACTIVE_QP] = atomic_read(&res_s->qp_count); - stats->value[BNXT_RE_ACTIVE_RC_QP] = atomic_read(&res_s->rc_qp_count); - stats->value[BNXT_RE_ACTIVE_UD_QP] = atomic_read(&res_s->ud_qp_count); - stats->value[BNXT_RE_ACTIVE_SRQ] = atomic_read(&res_s->srq_count); - stats->value[BNXT_RE_ACTIVE_CQ] = atomic_read(&res_s->cq_count); - stats->value[BNXT_RE_ACTIVE_MR] = atomic_read(&res_s->mr_count); - stats->value[BNXT_RE_ACTIVE_MW] = atomic_read(&res_s->mw_count); - stats->value[BNXT_RE_ACTIVE_PD] = atomic_read(&res_s->pd_count); - stats->value[BNXT_RE_ACTIVE_AH] = atomic_read(&res_s->ah_count); - stats->value[BNXT_RE_WATERMARK_QP] = res_s->qp_watermark; - stats->value[BNXT_RE_WATERMARK_RC_QP] = res_s->rc_qp_watermark; - stats->value[BNXT_RE_WATERMARK_UD_QP] = res_s->ud_qp_watermark; - stats->value[BNXT_RE_WATERMARK_SRQ] = res_s->srq_watermark; - stats->value[BNXT_RE_WATERMARK_CQ] = res_s->cq_watermark; - stats->value[BNXT_RE_WATERMARK_MR] = res_s->mr_watermark; - stats->value[BNXT_RE_WATERMARK_MW] = res_s->mw_watermark; - stats->value[BNXT_RE_WATERMARK_PD] = res_s->pd_watermark; - stats->value[BNXT_RE_WATERMARK_AH] = res_s->ah_watermark; - stats->value[BNXT_RE_RESIZE_CQ_CNT] = atomic_read(&res_s->resize_count); - if (hw_stats) { stats->value[BNXT_RE_RECOVERABLE_ERRORS] = le64_to_cpu(hw_stats->tx_bcast_pkts); @@ -449,8 +410,6 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev, goto done; } } - if (rdev->pacing.dbr_pacing && bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) - bnxt_re_copy_db_pacing_stats(rdev, stats); } done: diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.h b/drivers/infiniband/hw/bnxt_re/hw_counters.h index e541b6f8ca9f..09d371d442aa 100644 --- a/drivers/infiniband/hw/bnxt_re/hw_counters.h +++ b/drivers/infiniband/hw/bnxt_re/hw_counters.h @@ -41,25 +41,6 @@ #define __BNXT_RE_HW_STATS_H__ enum bnxt_re_hw_stats { - BNXT_RE_ACTIVE_PD, - BNXT_RE_ACTIVE_AH, - BNXT_RE_ACTIVE_QP, - BNXT_RE_ACTIVE_RC_QP, - BNXT_RE_ACTIVE_UD_QP, - BNXT_RE_ACTIVE_SRQ, - BNXT_RE_ACTIVE_CQ, - BNXT_RE_ACTIVE_MR, - BNXT_RE_ACTIVE_MW, - BNXT_RE_WATERMARK_PD, - BNXT_RE_WATERMARK_AH, - BNXT_RE_WATERMARK_QP, - BNXT_RE_WATERMARK_RC_QP, - BNXT_RE_WATERMARK_UD_QP, - BNXT_RE_WATERMARK_SRQ, - BNXT_RE_WATERMARK_CQ, - BNXT_RE_WATERMARK_MR, - BNXT_RE_WATERMARK_MW, - BNXT_RE_RESIZE_CQ_CNT, BNXT_RE_RX_PKTS, BNXT_RE_RX_BYTES, BNXT_RE_TX_PKTS, @@ -129,10 +110,9 @@ enum bnxt_re_hw_stats { BNXT_RE_TX_CNP, BNXT_RE_RX_CNP, BNXT_RE_RX_ECN, - BNXT_RE_PACING_RESCHED, - BNXT_RE_PACING_CMPL, - BNXT_RE_PACING_ALERT, - BNXT_RE_DB_FIFO_REG, + BNXT_RE_REQ_CQE_ERROR, + BNXT_RE_RESP_CQE_ERROR, + BNXT_RE_RESP_REMOTE_ACCESS_ERRS, BNXT_RE_NUM_EXT_COUNTERS }; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 260dc67b8b87..4dab5ca7362b 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -288,7 +288,9 @@ int bnxt_re_query_port(struct ib_device *ibdev, u32 port_num, } port_attr->max_mtu = IB_MTU_4096; port_attr->active_mtu = iboe_get_mtu(rdev->netdev->mtu); - port_attr->gid_tbl_len = dev_attr->max_sgid; + /* One GID is reserved for RawEth QP. Report one less */ + port_attr->gid_tbl_len = (rdev->rcfw.roce_mirror ? (dev_attr->max_sgid - 1) : + dev_attr->max_sgid); port_attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP | IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP; @@ -375,7 +377,7 @@ int bnxt_re_del_gid(const struct ib_gid_attr *attr, void **context) if (!ctx) return -EINVAL; - if (sgid_tbl && sgid_tbl->active) { + if (sgid_tbl->active) { if (ctx->idx >= sgid_tbl->max) return -EINVAL; gid_to_del = &sgid_tbl->tbl[ctx->idx].gid; @@ -429,7 +431,7 @@ int bnxt_re_add_gid(const struct ib_gid_attr *attr, void **context) rc = bnxt_qplib_add_sgid(sgid_tbl, (struct bnxt_qplib_gid *)&attr->gid, rdev->qplib_res.netdev->dev_addr, - vlan_id, true, &tbl_idx); + vlan_id, true, &tbl_idx, false, 0); if (rc == -EALREADY) { ctx_tbl = sgid_tbl->ctx; ctx_tbl[tbl_idx]->refcnt++; @@ -955,6 +957,20 @@ fail: return rc; } +static void bnxt_re_del_unique_gid(struct bnxt_re_dev *rdev) +{ + int rc; + + if (!rdev->rcfw.roce_mirror) + return; + + rc = bnxt_qplib_del_sgid(&rdev->qplib_res.sgid_tbl, + (struct bnxt_qplib_gid *)&rdev->ugid, + 0xFFFF, true); + if (rc) + dev_err(rdev_to_dev(rdev), "Failed to delete unique GID, rc: %d\n", rc); +} + /* Queue Pairs */ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) { @@ -994,6 +1010,9 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) else if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_UD) atomic_dec(&rdev->stats.res.ud_qp_count); + if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE) + bnxt_re_del_unique_gid(rdev); + ib_umem_release(qp->rumem); ib_umem_release(qp->sumem); @@ -1018,6 +1037,8 @@ static u8 __from_ib_qp_type(enum ib_qp_type type) return CMDQ_CREATE_QP_TYPE_RC; case IB_QPT_UD: return CMDQ_CREATE_QP_TYPE_UD; + case IB_QPT_RAW_PACKET: + return CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE; default: return IB_QPT_MAX; } @@ -1595,6 +1616,29 @@ static bool bnxt_re_test_qp_limits(struct bnxt_re_dev *rdev, return rc; } +static int bnxt_re_add_unique_gid(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx; + struct bnxt_qplib_res *res = &rdev->qplib_res; + int rc; + + if (!rdev->rcfw.roce_mirror) + return 0; + + rdev->ugid.global.subnet_prefix = cpu_to_be64(0xfe8000000000abcdLL); + addrconf_ifid_eui48(&rdev->ugid.raw[8], rdev->netdev); + + rc = bnxt_qplib_add_sgid(&res->sgid_tbl, + (struct bnxt_qplib_gid *)&rdev->ugid, + rdev->qplib_res.netdev->dev_addr, + 0xFFFF, true, &rdev->ugid_index, true, + hctx->stats3.fw_id); + if (rc) + dev_err(rdev_to_dev(rdev), "Failed to add unique GID. rc = %d\n", rc); + + return rc; +} + int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr, struct ib_udata *udata) { @@ -1656,6 +1700,17 @@ int bnxt_re_create_qp(struct ib_qp *ib_qp, struct ib_qp_init_attr *qp_init_attr, } } + /* Support for RawEth QP is added to capture TCP pkt dump. + * So unique SGID is used to avoid incorrect statistics on per + * function stats_ctx + */ + if (qp->qplib_qp.type == CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE) { + rc = bnxt_re_add_unique_gid(rdev); + if (rc) + goto qp_destroy; + qp->qplib_qp.ugid_index = rdev->ugid_index; + } + qp->ib_qp.qp_num = qp->qplib_qp.id; if (qp_init_attr->qp_type == IB_QPT_GSI) rdev->gsi_ctx.gsi_qp = qp; @@ -2301,7 +2356,7 @@ int bnxt_re_query_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, qp_attr->pkey_index = qplib_qp->pkey_index; qp_attr->qkey = qplib_qp->qkey; qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; - rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->ah.flow_label, + rdma_ah_set_grh(&qp_attr->ah_attr, NULL, qplib_qp->udp_sport, qplib_qp->ah.host_sgid_index, qplib_qp->ah.hop_limit, qplib_qp->ah.traffic_class); @@ -3248,9 +3303,9 @@ int bnxt_re_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) IB_ACCESS_LOCAL_WRITE); if (IS_ERR(cq->resize_umem)) { rc = PTR_ERR(cq->resize_umem); + ibdev_err(&rdev->ibdev, "%s: ib_umem_get failed! rc = %pe\n", + __func__, cq->resize_umem); cq->resize_umem = NULL; - ibdev_err(&rdev->ibdev, "%s: ib_umem_get failed! rc = %d\n", - __func__, rc); goto fail; } cq->resize_cqe = entries; @@ -4392,6 +4447,93 @@ void bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx) } } +static int bnxt_re_setup_vnic(struct bnxt_re_dev *rdev, struct bnxt_re_qp *qp) +{ + int rc; + + rc = bnxt_re_hwrm_alloc_vnic(rdev); + if (rc) + return rc; + + rc = bnxt_re_hwrm_cfg_vnic(rdev, qp->qplib_qp.id); + if (rc) + goto out_free_vnic; + + return 0; +out_free_vnic: + bnxt_re_hwrm_free_vnic(rdev); + return rc; +} + +struct ib_flow *bnxt_re_create_flow(struct ib_qp *ib_qp, + struct ib_flow_attr *attr, + struct ib_udata *udata) +{ + struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); + struct bnxt_re_dev *rdev = qp->rdev; + struct bnxt_re_flow *flow; + int rc; + + if (attr->type != IB_FLOW_ATTR_SNIFFER || + !rdev->rcfw.roce_mirror) + return ERR_PTR(-EOPNOTSUPP); + + mutex_lock(&rdev->qp_lock); + if (rdev->sniffer_flow_created) { + ibdev_err(&rdev->ibdev, "RoCE Mirroring is already Configured\n"); + mutex_unlock(&rdev->qp_lock); + return ERR_PTR(-EBUSY); + } + + flow = kzalloc(sizeof(*flow), GFP_KERNEL); + if (!flow) { + mutex_unlock(&rdev->qp_lock); + return ERR_PTR(-ENOMEM); + } + + flow->rdev = rdev; + + rc = bnxt_re_setup_vnic(rdev, qp); + if (rc) + goto out_free_flow; + + rc = bnxt_qplib_create_flow(&rdev->qplib_res); + if (rc) + goto out_free_vnic; + + rdev->sniffer_flow_created = 1; + mutex_unlock(&rdev->qp_lock); + + return &flow->ib_flow; + +out_free_vnic: + bnxt_re_hwrm_free_vnic(rdev); +out_free_flow: + mutex_unlock(&rdev->qp_lock); + kfree(flow); + return ERR_PTR(rc); +} + +int bnxt_re_destroy_flow(struct ib_flow *flow_id) +{ + struct bnxt_re_flow *flow = + container_of(flow_id, struct bnxt_re_flow, ib_flow); + struct bnxt_re_dev *rdev = flow->rdev; + int rc; + + mutex_lock(&rdev->qp_lock); + rc = bnxt_qplib_destroy_flow(&rdev->qplib_res); + if (rc) + ibdev_dbg(&rdev->ibdev, "failed to destroy_flow rc = %d\n", rc); + rdev->sniffer_flow_created = 0; + + bnxt_re_hwrm_free_vnic(rdev); + mutex_unlock(&rdev->qp_lock); + kfree(flow); + + return rc; +} + static struct bnxt_re_cq *bnxt_re_search_for_cq(struct bnxt_re_dev *rdev, u32 cq_id) { struct bnxt_re_cq *cq = NULL, *tmp_cq; @@ -4604,7 +4746,7 @@ static int UVERBS_HANDLER(BNXT_RE_METHOD_ALLOC_PAGE)(struct uverbs_attr_bundle * return err; err = uverbs_copy_to(attrs, BNXT_RE_ALLOC_PAGE_DPI, - &dpi, sizeof(length)); + &dpi, sizeof(dpi)); if (err) return err; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h index fe00ab691a51..76ba9ab04d5c 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h @@ -164,6 +164,11 @@ struct bnxt_re_user_mmap_entry { u8 mmap_flag; }; +struct bnxt_re_flow { + struct ib_flow ib_flow; + struct bnxt_re_dev *rdev; +}; + static inline u16 bnxt_re_get_swqe_size(int nsge) { return sizeof(struct sq_send_hdr) + nsge * sizeof(struct sq_sge); @@ -267,6 +272,11 @@ struct ib_mr *bnxt_re_reg_user_mr_dmabuf(struct ib_pd *ib_pd, u64 start, struct uverbs_attr_bundle *attrs); int bnxt_re_alloc_ucontext(struct ib_ucontext *ctx, struct ib_udata *udata); void bnxt_re_dealloc_ucontext(struct ib_ucontext *context); +struct ib_flow *bnxt_re_create_flow(struct ib_qp *ib_qp, + struct ib_flow_attr *attr, + struct ib_udata *udata); +int bnxt_re_destroy_flow(struct ib_flow *flow_id); + int bnxt_re_mmap(struct ib_ucontext *context, struct vm_area_struct *vma); void bnxt_re_mmap_free(struct rdma_user_mmap_entry *rdma_entry); diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index df7cf8d68e27..b13810572c2e 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -80,6 +80,7 @@ MODULE_LICENSE("Dual BSD/GPL"); static DEFINE_MUTEX(bnxt_re_mutex); static int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev); +static int bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev); static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len, u32 *offset); @@ -188,6 +189,10 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev) rdev->qplib_res.is_vf = BNXT_EN_VF(en_dev); rdev->qplib_res.en_dev = en_dev; + rc = bnxt_re_query_hwrm_intf_version(rdev); + if (rc) + goto free_dev_attr; + bnxt_re_set_drv_mode(rdev); bnxt_re_set_db_offset(rdev); @@ -540,6 +545,72 @@ static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg, fw_msg->timeout = timeout; } +void bnxt_re_hwrm_free_vnic(struct bnxt_re_dev *rdev) +{ + struct bnxt_en_dev *en_dev = rdev->en_dev; + struct hwrm_vnic_free_input req = {}; + struct bnxt_fw_msg fw_msg = {}; + int rc; + + bnxt_re_init_hwrm_hdr((void *)&req, HWRM_VNIC_FREE); + + req.vnic_id = cpu_to_le32(rdev->mirror_vnic_id); + bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), NULL, + 0, BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); + rc = bnxt_send_msg(en_dev, &fw_msg); + if (rc) + ibdev_dbg(&rdev->ibdev, + "Failed to free vnic, rc = %d\n", rc); +} + +int bnxt_re_hwrm_alloc_vnic(struct bnxt_re_dev *rdev) +{ + struct bnxt_en_dev *en_dev = rdev->en_dev; + struct hwrm_vnic_alloc_output resp = {}; + struct hwrm_vnic_alloc_input req = {}; + struct bnxt_fw_msg fw_msg = {}; + int rc; + + bnxt_re_init_hwrm_hdr((void *)&req, HWRM_VNIC_ALLOC); + + req.vnic_id = cpu_to_le16(rdev->mirror_vnic_id); + req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_VNIC_ID_VALID); + bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, + sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); + rc = bnxt_send_msg(en_dev, &fw_msg); + if (rc) + ibdev_dbg(&rdev->ibdev, + "Failed to alloc vnic, rc = %d\n", rc); + + return rc; +} + +int bnxt_re_hwrm_cfg_vnic(struct bnxt_re_dev *rdev, u32 qp_id) +{ + struct bnxt_en_dev *en_dev = rdev->en_dev; + struct hwrm_vnic_cfg_input req = {}; + struct bnxt_fw_msg fw_msg = {}; + int rc; + + bnxt_re_init_hwrm_hdr((void *)&req, HWRM_VNIC_CFG); + + req.flags = cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE); + req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_RAW_QP_ID | + VNIC_CFG_REQ_ENABLES_MRU); + req.vnic_id = cpu_to_le16(rdev->mirror_vnic_id); + req.raw_qp_id = cpu_to_le32(qp_id); + req.mru = cpu_to_le16(rdev->netdev->mtu + VLAN_ETH_HLEN); + + bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), NULL, + 0, BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); + rc = bnxt_send_msg(en_dev, &fw_msg); + if (rc) + ibdev_dbg(&rdev->ibdev, + "Failed to cfg vnic, rc = %d\n", rc); + + return rc; +} + /* Query device config using common hwrm */ static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len, u32 *offset) @@ -553,11 +624,12 @@ static int bnxt_re_hwrm_qcfg(struct bnxt_re_dev *rdev, u32 *db_len, bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_QCFG); req.fid = cpu_to_le16(0xffff); bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, - sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); + sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); rc = bnxt_send_msg(en_dev, &fw_msg); if (!rc) { *db_len = PAGE_ALIGN(le16_to_cpu(resp.l2_doorbell_bar_size_kb) * 1024); *offset = PAGE_ALIGN(le16_to_cpu(resp.legacy_l2_db_size_kb) * 1024); + rdev->mirror_vnic_id = le16_to_cpu(resp.mirror_vnic_id); } return rc; } @@ -577,7 +649,7 @@ int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev) bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_QCAPS); req.fid = cpu_to_le16(0xffff); bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, - sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); + sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); rc = bnxt_send_msg(en_dev, &fw_msg); if (rc) @@ -587,6 +659,8 @@ int bnxt_re_hwrm_qcaps(struct bnxt_re_dev *rdev) flags_ext2 = le32_to_cpu(resp.flags_ext2); cctx->modes.dbr_pacing = flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_EXT_SUPPORTED || flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_DBR_PACING_V0_SUPPORTED; + cctx->modes.roce_mirror = !!(le32_to_cpu(resp.flags_ext3) & + FUNC_QCAPS_RESP_FLAGS_EXT3_MIRROR_ON_ROCE_SUPPORTED); return 0; } @@ -603,7 +677,7 @@ static int bnxt_re_hwrm_dbr_pacing_qcfg(struct bnxt_re_dev *rdev) cctx = rdev->chip_ctx; bnxt_re_init_hwrm_hdr((void *)&req, HWRM_FUNC_DBR_PACING_QCFG); bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, - sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); + sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); rc = bnxt_send_msg(en_dev, &fw_msg); if (rc) return rc; @@ -842,20 +916,12 @@ static void bnxt_re_deinitialize_dbr_pacing(struct bnxt_re_dev *rdev) static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id, int type) { - struct bnxt_en_dev *en_dev; + struct bnxt_en_dev *en_dev = rdev->en_dev; struct hwrm_ring_free_input req = {}; struct hwrm_ring_free_output resp; struct bnxt_fw_msg fw_msg = {}; int rc = -EINVAL; - if (!rdev) - return rc; - - en_dev = rdev->en_dev; - - if (!en_dev) - return rc; - if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags)) return 0; @@ -863,7 +929,7 @@ static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, req.ring_type = type; req.ring_id = cpu_to_le16(fw_ring_id); bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, - sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); + sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); rc = bnxt_send_msg(en_dev, &fw_msg); if (rc) ibdev_err(&rdev->ibdev, "Failed to free HW ring:%d :%#x", @@ -881,9 +947,6 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, struct bnxt_fw_msg fw_msg = {}; int rc = -EINVAL; - if (!en_dev) - return rc; - bnxt_re_init_hwrm_hdr((void *)&req, HWRM_RING_ALLOC); req.enables = 0; req.page_tbl_addr = cpu_to_le64(ring_attr->dma_arr[0]); @@ -899,7 +962,7 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, req.ring_type = ring_attr->type; req.int_mode = ring_attr->mode; bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, - sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); + sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); rc = bnxt_send_msg(en_dev, &fw_msg); if (!rc) *fw_ring_id = le16_to_cpu(resp.ring_id); @@ -916,16 +979,13 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev, struct bnxt_fw_msg fw_msg = {}; int rc = -EINVAL; - if (!en_dev) - return rc; - if (test_bit(BNXT_RE_FLAG_ERR_DEVICE_DETACHED, &rdev->flags)) return 0; bnxt_re_init_hwrm_hdr((void *)&req, HWRM_STAT_CTX_FREE); req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id); bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, - sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); + sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); rc = bnxt_send_msg(en_dev, &fw_msg); if (rc) ibdev_err(&rdev->ibdev, "Failed to free HW stats context %#x", @@ -935,8 +995,7 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev, } static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev, - dma_addr_t dma_map, - u32 *fw_stats_ctx_id) + struct bnxt_qplib_stats *stats) { struct bnxt_qplib_chip_ctx *chip_ctx = rdev->chip_ctx; struct hwrm_stat_ctx_alloc_output resp = {}; @@ -945,21 +1004,18 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev, struct bnxt_fw_msg fw_msg = {}; int rc = -EINVAL; - *fw_stats_ctx_id = INVALID_STATS_CTX_ID; - - if (!en_dev) - return rc; + stats->fw_id = INVALID_STATS_CTX_ID; bnxt_re_init_hwrm_hdr((void *)&req, HWRM_STAT_CTX_ALLOC); req.update_period_ms = cpu_to_le32(1000); - req.stats_dma_addr = cpu_to_le64(dma_map); + req.stats_dma_addr = cpu_to_le64(stats->dma_map); req.stats_dma_length = cpu_to_le16(chip_ctx->hw_stats_size); req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE; bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, - sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); + sizeof(resp), BNXT_RE_HWRM_CMD_TIMEOUT(rdev)); rc = bnxt_send_msg(en_dev, &fw_msg); if (!rc) - *fw_stats_ctx_id = le32_to_cpu(resp.stat_ctx_id); + stats->fw_id = le32_to_cpu(resp.stat_ctx_id); return rc; } @@ -975,7 +1031,7 @@ static ssize_t hw_rev_show(struct device *device, struct device_attribute *attr, struct bnxt_re_dev *rdev = rdma_device_to_drv_device(device, struct bnxt_re_dev, ibdev); - return sysfs_emit(buf, "0x%x\n", rdev->en_dev->pdev->vendor); + return sysfs_emit(buf, "0x%x\n", rdev->en_dev->pdev->revision); } static DEVICE_ATTR_RO(hw_rev); @@ -985,13 +1041,31 @@ static ssize_t hca_type_show(struct device *device, struct bnxt_re_dev *rdev = rdma_device_to_drv_device(device, struct bnxt_re_dev, ibdev); - return sysfs_emit(buf, "%s\n", rdev->ibdev.node_desc); + return sysfs_emit(buf, "0x%x\n", rdev->en_dev->pdev->device); } static DEVICE_ATTR_RO(hca_type); +static ssize_t board_id_show(struct device *device, struct device_attribute *attr, + char *buf) +{ + struct bnxt_re_dev *rdev = rdma_device_to_drv_device(device, + struct bnxt_re_dev, ibdev); + char buffer[BNXT_VPD_FLD_LEN] = {}; + + if (!rdev->is_virtfn) + memcpy(buffer, rdev->board_partno, BNXT_VPD_FLD_LEN - 1); + else + scnprintf(buffer, BNXT_VPD_FLD_LEN, "0x%x-VF", + rdev->en_dev->pdev->device); + + return sysfs_emit(buf, "%s\n", buffer); +} +static DEVICE_ATTR_RO(board_id); + static struct attribute *bnxt_re_attributes[] = { &dev_attr_hw_rev.attr, &dev_attr_hca_type.attr, + &dev_attr_board_id.attr, NULL }; @@ -1207,6 +1281,8 @@ static int bnxt_re_fill_res_srq_entry(struct sk_buff *msg, struct ib_srq *ib_srq goto err; if (rdma_nl_put_driver_u32_hex(msg, "max_sge", srq->qplib_srq.max_sge)) goto err; + if (rdma_nl_put_driver_u32_hex(msg, "srq_limit", srq->qplib_srq.threshold)) + goto err; nla_nest_end(msg, table_attr); return 0; @@ -1297,6 +1373,8 @@ static const struct ib_device_ops bnxt_re_dev_ops = { .reg_user_mr_dmabuf = bnxt_re_reg_user_mr_dmabuf, .req_notify_cq = bnxt_re_req_notify_cq, .resize_cq = bnxt_re_resize_cq, + .create_flow = bnxt_re_create_flow, + .destroy_flow = bnxt_re_destroy_flow, INIT_RDMA_OBJ_SIZE(ib_ah, bnxt_re_ah, ib_ah), INIT_RDMA_OBJ_SIZE(ib_cq, bnxt_re_cq, ib_cq), INIT_RDMA_OBJ_SIZE(ib_pd, bnxt_re_pd, ib_pd), @@ -1323,8 +1401,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev) /* ib device init */ ibdev->node_type = RDMA_NODE_IB_CA; - strscpy(ibdev->node_desc, BNXT_RE_DESC " HCA", - strlen(BNXT_RE_DESC) + 5); + strscpy(ibdev->node_desc, BNXT_RE_DESC " HCA"); ibdev->phys_port_cnt = 1; addrconf_addr_eui48((u8 *)&ibdev->node_guid, rdev->netdev->dev_addr); @@ -1850,81 +1927,6 @@ static void bnxt_re_dev_stop(struct bnxt_re_dev *rdev) mutex_unlock(&rdev->qp_lock); } -static int bnxt_re_update_gid(struct bnxt_re_dev *rdev) -{ - struct bnxt_qplib_sgid_tbl *sgid_tbl = &rdev->qplib_res.sgid_tbl; - struct bnxt_qplib_gid gid; - u16 gid_idx, index; - int rc = 0; - - if (!ib_device_try_get(&rdev->ibdev)) - return 0; - - for (index = 0; index < sgid_tbl->active; index++) { - gid_idx = sgid_tbl->hw_id[index]; - - if (!memcmp(&sgid_tbl->tbl[index], &bnxt_qplib_gid_zero, - sizeof(bnxt_qplib_gid_zero))) - continue; - /* need to modify the VLAN enable setting of non VLAN GID only - * as setting is done for VLAN GID while adding GID - */ - if (sgid_tbl->vlan[index]) - continue; - - memcpy(&gid, &sgid_tbl->tbl[index], sizeof(gid)); - - rc = bnxt_qplib_update_sgid(sgid_tbl, &gid, gid_idx, - rdev->qplib_res.netdev->dev_addr); - } - - ib_device_put(&rdev->ibdev); - return rc; -} - -static u32 bnxt_re_get_priority_mask(struct bnxt_re_dev *rdev) -{ - u32 prio_map = 0, tmp_map = 0; - struct net_device *netdev; - struct dcb_app app = {}; - - netdev = rdev->netdev; - - app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE; - app.protocol = ETH_P_IBOE; - tmp_map = dcb_ieee_getapp_mask(netdev, &app); - prio_map = tmp_map; - - app.selector = IEEE_8021QAZ_APP_SEL_DGRAM; - app.protocol = ROCE_V2_UDP_DPORT; - tmp_map = dcb_ieee_getapp_mask(netdev, &app); - prio_map |= tmp_map; - - return prio_map; -} - -static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev) -{ - u8 prio_map = 0; - - /* Get priority for roce */ - prio_map = bnxt_re_get_priority_mask(rdev); - - if (prio_map == rdev->cur_prio_map) - return 0; - rdev->cur_prio_map = prio_map; - /* Actual priorities are not programmed as they are already - * done by L2 driver; just enable or disable priority vlan tagging - */ - if ((prio_map == 0 && rdev->qplib_res.prio) || - (prio_map != 0 && !rdev->qplib_res.prio)) { - rdev->qplib_res.prio = prio_map; - bnxt_re_update_gid(rdev); - } - - return 0; -} - static void bnxt_re_net_unregister_async_event(struct bnxt_re_dev *rdev) { if (rdev->is_virtfn) @@ -1945,7 +1947,31 @@ static void bnxt_re_net_register_async_event(struct bnxt_re_dev *rdev) ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE); } -static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev) +static void bnxt_re_read_vpd_info(struct bnxt_re_dev *rdev) +{ + struct pci_dev *pdev = rdev->en_dev->pdev; + unsigned int vpd_size, kw_len; + int pos, size; + u8 *vpd_data; + + vpd_data = pci_vpd_alloc(pdev, &vpd_size); + if (IS_ERR(vpd_data)) { + pci_warn(pdev, "Unable to read VPD, err=%pe\n", vpd_data); + return; + } + + pos = pci_vpd_find_ro_info_keyword(vpd_data, vpd_size, + PCI_VPD_RO_KEYWORD_PARTNO, &kw_len); + if (pos < 0) + goto free; + + size = min_t(int, kw_len, BNXT_VPD_FLD_LEN - 1); + memcpy(rdev->board_partno, &vpd_data[pos], size); +free: + kfree(vpd_data); +} + +static int bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev) { struct bnxt_en_dev *en_dev = rdev->en_dev; struct hwrm_ver_get_output resp = {}; @@ -1964,7 +1990,7 @@ static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev) if (rc) { ibdev_err(&rdev->ibdev, "Failed to query HW version, rc = 0x%x", rc); - return; + return rc; } cctx = rdev->chip_ctx; @@ -1978,6 +2004,8 @@ static void bnxt_re_query_hwrm_intf_version(struct bnxt_re_dev *rdev) if (!cctx->hwrm_cmd_max_timeout) cctx->hwrm_cmd_max_timeout = RCFW_FW_STALL_MAX_TIMEOUT; + + return 0; } static int bnxt_re_ib_init(struct bnxt_re_dev *rdev) @@ -2039,6 +2067,72 @@ static void bnxt_re_free_gid_ctx(struct bnxt_re_dev *rdev) } } +static int bnxt_re_get_stats_ctx(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx; + struct bnxt_qplib_res *res = &rdev->qplib_res; + int rc; + + rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &hctx->stats); + if (rc) + return rc; + + rc = bnxt_re_net_stats_ctx_alloc(rdev, &hctx->stats); + if (rc) + goto free_stat_mem; + + return 0; +free_stat_mem: + bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats); + + return rc; +} + +static int bnxt_re_get_stats3_ctx(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx; + struct bnxt_qplib_res *res = &rdev->qplib_res; + int rc; + + if (!rdev->rcfw.roce_mirror) + return 0; + + rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &hctx->stats3); + if (rc) + return rc; + + rc = bnxt_re_net_stats_ctx_alloc(rdev, &hctx->stats3); + if (rc) + goto free_stat_mem; + + return 0; +free_stat_mem: + bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats3); + + return rc; +} + +static void bnxt_re_put_stats3_ctx(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx; + struct bnxt_qplib_res *res = &rdev->qplib_res; + + if (!rdev->rcfw.roce_mirror) + return; + + bnxt_re_net_stats_ctx_free(rdev, hctx->stats3.fw_id); + bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats3); +} + +static void bnxt_re_put_stats_ctx(struct bnxt_re_dev *rdev) +{ + struct bnxt_qplib_ctx *hctx = &rdev->qplib_ctx; + struct bnxt_qplib_res *res = &rdev->qplib_res; + + bnxt_re_net_stats_ctx_free(rdev, hctx->stats.fw_id); + bnxt_qplib_free_stats_ctx(res->pdev, &hctx->stats); +} + static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type) { u8 type; @@ -2049,8 +2143,7 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type) bnxt_re_net_unregister_async_event(rdev); bnxt_re_uninit_dcb_wq(rdev); - if (test_and_clear_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags)) - cancel_delayed_work_sync(&rdev->worker); + bnxt_re_put_stats3_ctx(rdev); bnxt_re_free_gid_ctx(rdev); if (test_and_clear_bit(BNXT_RE_FLAG_RESOURCES_INITIALIZED, @@ -2064,8 +2157,8 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type) if (rc) ibdev_warn(&rdev->ibdev, "Failed to deinitialize RCFW: %#x", rc); - bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id); - bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx); + bnxt_re_put_stats_ctx(rdev); + bnxt_qplib_free_hwctx(&rdev->qplib_res, &rdev->qplib_ctx); bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); type = bnxt_qplib_get_ring_type(rdev->chip_ctx); bnxt_re_net_ring_free(rdev, rdev->rcfw.creq.ring_id, type); @@ -2085,16 +2178,6 @@ static void bnxt_re_dev_uninit(struct bnxt_re_dev *rdev, u8 op_type) } } -/* worker thread for polling periodic events. Now used for QoS programming*/ -static void bnxt_re_worker(struct work_struct *work) -{ - struct bnxt_re_dev *rdev = container_of(work, struct bnxt_re_dev, - worker.work); - - bnxt_re_setup_qos(rdev); - schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000)); -} - static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type) { struct bnxt_re_ring_attr rattr = {}; @@ -2109,8 +2192,9 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type) rc = bnxt_re_register_netdev(rdev); if (rc) { ibdev_err(&rdev->ibdev, - "Failed to register with netedev: %#x\n", rc); - return -EINVAL; + "Failed to register with Ethernet driver, rc %d\n", + rc); + return rc; } } set_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags); @@ -2148,8 +2232,6 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type) /* Check whether VF or PF */ bnxt_re_get_sriov_func_type(rdev); - bnxt_re_query_hwrm_intf_version(rdev); - /* Establish RCFW Communication Channel to initialize the context * memory for the function and all child VFs */ @@ -2199,18 +2281,20 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type) if (rc) goto disable_rcfw; + bnxt_qplib_query_version(&rdev->rcfw); bnxt_re_set_resource_limits(rdev); - rc = bnxt_qplib_alloc_ctx(&rdev->qplib_res, &rdev->qplib_ctx, 0, - bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)); - if (rc) { - ibdev_err(&rdev->ibdev, - "Failed to allocate QPLIB context: %#x\n", rc); - goto disable_rcfw; + if (!rdev->is_virtfn && + !bnxt_qplib_is_chip_gen_p5_p7(rdev->chip_ctx)) { + rc = bnxt_qplib_alloc_hwctx(&rdev->qplib_res, &rdev->qplib_ctx); + if (rc) { + ibdev_err(&rdev->ibdev, + "Failed to allocate hw context: %#x\n", rc); + goto disable_rcfw; + } } - rc = bnxt_re_net_stats_ctx_alloc(rdev, - rdev->qplib_ctx.stats.dma_map, - &rdev->qplib_ctx.stats.fw_id); + + rc = bnxt_re_get_stats_ctx(rdev); if (rc) { ibdev_err(&rdev->ibdev, "Failed to allocate stats context: %#x\n", rc); @@ -2249,15 +2333,6 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type) if (rc) ibdev_warn(&rdev->ibdev, "Failed to query CC defaults\n"); - rc = bnxt_re_setup_qos(rdev); - if (rc) - ibdev_info(&rdev->ibdev, - "RoCE priority not yet configured\n"); - - INIT_DELAYED_WORK(&rdev->worker, bnxt_re_worker); - set_bit(BNXT_RE_FLAG_QOS_WORK_REG, &rdev->flags); - schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000)); - if (!(rdev->qplib_res.en_dev->flags & BNXT_EN_FLAG_ROCE_VF_RES_MGMT)) bnxt_re_vf_res_config(rdev); } @@ -2270,11 +2345,18 @@ static int bnxt_re_dev_init(struct bnxt_re_dev *rdev, u8 op_type) bnxt_re_init_dcb_wq(rdev); bnxt_re_net_register_async_event(rdev); + if (!rdev->is_virtfn) + bnxt_re_read_vpd_info(rdev); + + rc = bnxt_re_get_stats3_ctx(rdev); + if (rc) + goto fail; + return 0; free_sctx: bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id); free_ctx: - bnxt_qplib_free_ctx(&rdev->qplib_res, &rdev->qplib_ctx); + bnxt_qplib_free_hwctx(&rdev->qplib_res, &rdev->qplib_ctx); disable_rcfw: bnxt_qplib_disable_rcfw_channel(&rdev->rcfw); free_ring: diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index ee36b3d82cc0..ce90d3d834d4 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -1307,6 +1307,7 @@ static bool is_optimized_state_transition(struct bnxt_qplib_qp *qp) int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) { + struct bnxt_qplib_sgid_tbl *sgid_tbl = &res->sgid_tbl; struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct creq_modify_qp_resp resp = {}; struct bnxt_qplib_cmdqmsg msg = {}; @@ -1358,9 +1359,14 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_FLOW_LABEL) req.flow_label = cpu_to_le32(qp->ah.flow_label); - if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX) - req.sgid_index = cpu_to_le16(res->sgid_tbl.hw_id - [qp->ah.sgid_index]); + if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_SGID_INDEX) { + if (qp->type == CMDQ_CREATE_QP_TYPE_RAW_ETHERTYPE) + req.sgid_index = + cpu_to_le16(sgid_tbl->hw_id[qp->ugid_index]); + else + req.sgid_index = + cpu_to_le16(sgid_tbl->hw_id[qp->ah.sgid_index]); + } if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_HOP_LIMIT) req.hop_limit = qp->ah.hop_limit; @@ -1464,6 +1470,7 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) qp->access = sb->access; qp->pkey_index = le16_to_cpu(sb->pkey); qp->qkey = le32_to_cpu(sb->qkey); + qp->udp_sport = le16_to_cpu(sb->udp_src_port); temp32[0] = le32_to_cpu(sb->dgid[0]); temp32[1] = le32_to_cpu(sb->dgid[1]); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h index 4921a214c34c..b990d0c0ce1a 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h @@ -299,6 +299,7 @@ struct bnxt_qplib_qp { u8 smac[6]; u16 vlan_id; u16 port_id; + u16 udp_sport; u8 nw_type; struct bnxt_qplib_ah ah; @@ -344,6 +345,7 @@ struct bnxt_qplib_qp { u32 msn_tbl_sz; bool is_host_msn_tbl; u8 tos_dscp; + u32 ugid_index; }; #define BNXT_RE_MAX_MSG_SIZE 0x80000000 diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 804bc773b4ef..295a9610f3e6 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -186,7 +186,7 @@ static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) * wait for command completion. Maximum holding interval is 8 second. * * Returns: - * -ETIMEOUT if command is not completed in specific time interval. + * -ETIMEDOUT if command is not completed in specific time interval. * 0 if command is completed by firmware. */ static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) @@ -366,6 +366,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, wmb(); writel(cmdq_prod, cmdq->cmdq_mbox.prod); writel(RCFW_CMDQ_TRIG_VAL, cmdq->cmdq_mbox.db); + print_hex_dump_bytes("req: ", DUMP_PREFIX_OFFSET, msg->req, msg->req_sz); spin_unlock_bh(&hwq->lock); /* Return the CREQ response pointer */ return 0; @@ -381,7 +382,7 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, * This function can not be called from non-sleepable context. * * Returns: - * -ETIMEOUT if command is not completed in specific time interval. + * -ETIMEDOUT if command is not completed in specific time interval. * 0 if command is completed by firmware. */ static int __poll_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) @@ -631,6 +632,7 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, int rc = 0; pdev = rcfw->pdev; + print_hex_dump_bytes("event: ", DUMP_PREFIX_OFFSET, qp_event, sizeof(*qp_event)); switch (qp_event->event) { case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION: err_event = (struct creq_qp_error_notification *)qp_event; @@ -903,6 +905,10 @@ skip_ctx_setup: flags |= CMDQ_INITIALIZE_FW_FLAGS_OPTIMIZE_MODIFY_QP_SUPPORTED; if (rcfw->res->en_dev->flags & BNXT_EN_FLAG_ROCE_VF_RES_MGMT) flags |= CMDQ_INITIALIZE_FW_FLAGS_L2_VF_RESOURCE_MGMT; + if (bnxt_qplib_roce_mirror_supported(rcfw->res->cctx)) { + flags |= CMDQ_INITIALIZE_FW_FLAGS_MIRROR_ON_ROCE_SUPPORTED; + rcfw->roce_mirror = true; + } req.flags |= cpu_to_le16(flags); req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id); bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index ff873c5f1b25..988c89b4232e 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -236,6 +236,7 @@ struct bnxt_qplib_rcfw { atomic_t timeout_send; /* cached from chip cctx for quick reference in slow path */ u16 max_timeout; + bool roce_mirror; }; struct bnxt_qplib_cmdqmsg { diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c index cc5c82d96839..875d7b52c06a 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c @@ -53,12 +53,6 @@ #include "qplib_sp.h" #include "qplib_rcfw.h" -static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev, - struct bnxt_qplib_stats *stats); -static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev, - struct bnxt_qplib_chip_ctx *cctx, - struct bnxt_qplib_stats *stats); - /* PBL */ static void __free_pbl(struct bnxt_qplib_res *res, struct bnxt_qplib_pbl *pbl, bool is_umem) @@ -352,8 +346,8 @@ fail: } /* Context Tables */ -void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res, - struct bnxt_qplib_ctx *ctx) +void bnxt_qplib_free_hwctx(struct bnxt_qplib_res *res, + struct bnxt_qplib_ctx *ctx) { int i; @@ -367,7 +361,6 @@ void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res, /* restore original pde level before destroy */ ctx->tqm_ctx.pde.level = ctx->tqm_ctx.pde_level; bnxt_qplib_free_hwq(res, &ctx->tqm_ctx.pde); - bnxt_qplib_free_stats_ctx(res->pdev, &ctx->stats); } static int bnxt_qplib_alloc_tqm_rings(struct bnxt_qplib_res *res, @@ -466,7 +459,7 @@ fail: } /* - * Routine: bnxt_qplib_alloc_ctx + * Routine: bnxt_qplib_alloc_hwctx * Description: * Context tables are memories which are used by the chip fw. * The 6 tables defined are: @@ -486,17 +479,13 @@ fail: * Returns: * 0 if success, else -ERRORS */ -int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res, - struct bnxt_qplib_ctx *ctx, - bool virt_fn, bool is_p5) +int bnxt_qplib_alloc_hwctx(struct bnxt_qplib_res *res, + struct bnxt_qplib_ctx *ctx) { struct bnxt_qplib_hwq_attr hwq_attr = {}; struct bnxt_qplib_sg_info sginfo = {}; int rc; - if (virt_fn || is_p5) - goto stats_alloc; - /* QPC Tables */ sginfo.pgsize = PAGE_SIZE; sginfo.pgshft = PAGE_SHIFT; @@ -542,16 +531,11 @@ int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res, rc = bnxt_qplib_alloc_init_hwq(&ctx->tim_tbl, &hwq_attr); if (rc) goto fail; -stats_alloc: - /* Stats */ - rc = bnxt_qplib_alloc_stats_ctx(res->pdev, res->cctx, &ctx->stats); - if (rc) - goto fail; return 0; fail: - bnxt_qplib_free_ctx(res, ctx); + bnxt_qplib_free_hwctx(res, ctx); return rc; } @@ -832,8 +816,8 @@ static int bnxt_qplib_alloc_dpi_tbl(struct bnxt_qplib_res *res, } /* Stats */ -static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev, - struct bnxt_qplib_stats *stats) +void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev, + struct bnxt_qplib_stats *stats) { if (stats->dma) { dma_free_coherent(&pdev->dev, stats->size, @@ -843,9 +827,9 @@ static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev, stats->fw_id = -1; } -static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev, - struct bnxt_qplib_chip_ctx *cctx, - struct bnxt_qplib_stats *stats) +int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev, + struct bnxt_qplib_chip_ctx *cctx, + struct bnxt_qplib_stats *stats) { memset(stats, 0, sizeof(*stats)); stats->fw_id = -1; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h index 6a13927674b4..2ea3b7f232a3 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h @@ -65,6 +65,7 @@ struct bnxt_qplib_drv_modes { bool db_push; bool dbr_pacing; u32 toggle_bits; + u8 roce_mirror; }; enum bnxt_re_toggle_modes { @@ -303,6 +304,7 @@ struct bnxt_qplib_ctx { struct bnxt_qplib_hwq tim_tbl; struct bnxt_qplib_tqm_ctx tqm_ctx; struct bnxt_qplib_stats stats; + struct bnxt_qplib_stats stats3; struct bnxt_qplib_vf_res vf_res; }; @@ -432,15 +434,19 @@ void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res); int bnxt_qplib_init_res(struct bnxt_qplib_res *res); void bnxt_qplib_free_res(struct bnxt_qplib_res *res); int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct net_device *netdev); -void bnxt_qplib_free_ctx(struct bnxt_qplib_res *res, - struct bnxt_qplib_ctx *ctx); -int bnxt_qplib_alloc_ctx(struct bnxt_qplib_res *res, - struct bnxt_qplib_ctx *ctx, - bool virt_fn, bool is_p5); +void bnxt_qplib_free_hwctx(struct bnxt_qplib_res *res, + struct bnxt_qplib_ctx *ctx); +int bnxt_qplib_alloc_hwctx(struct bnxt_qplib_res *res, + struct bnxt_qplib_ctx *ctx); int bnxt_qplib_map_db_bar(struct bnxt_qplib_res *res); void bnxt_qplib_unmap_db_bar(struct bnxt_qplib_res *res); int bnxt_qplib_determine_atomics(struct pci_dev *dev); +int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev, + struct bnxt_qplib_chip_ctx *cctx, + struct bnxt_qplib_stats *stats); +void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev, + struct bnxt_qplib_stats *stats); static inline void bnxt_qplib_hwq_incr_prod(struct bnxt_qplib_db_info *dbinfo, struct bnxt_qplib_hwq *hwq, u32 cnt) @@ -582,6 +588,11 @@ static inline u8 bnxt_qplib_dbr_pacing_en(struct bnxt_qplib_chip_ctx *cctx) return cctx->modes.dbr_pacing; } +static inline u8 bnxt_qplib_roce_mirror_supported(struct bnxt_qplib_chip_ctx *cctx) +{ + return cctx->modes.roce_mirror; +} + static inline bool _is_alloc_mr_unified(u16 dev_cap_flags) { return dev_cap_flags & CREQ_QUERY_FUNC_RESP_SB_MR_REGISTER_ALLOC; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 68981399598d..9ef581ed785c 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -66,14 +66,15 @@ static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw) return (pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ); } -static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw, - char *fw_ver) +void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw) { struct creq_query_version_resp resp = {}; struct bnxt_qplib_cmdqmsg msg = {}; struct cmdq_query_version req = {}; + struct bnxt_qplib_dev_attr *attr; int rc; + attr = rcfw->res->dattr; bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, CMDQ_BASE_OPCODE_QUERY_VERSION, sizeof(req)); @@ -82,10 +83,10 @@ static void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw, rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); if (rc) return; - fw_ver[0] = resp.fw_maj; - fw_ver[1] = resp.fw_minor; - fw_ver[2] = resp.fw_bld; - fw_ver[3] = resp.fw_rsvd; + attr->fw_ver[0] = resp.fw_maj; + attr->fw_ver[1] = resp.fw_minor; + attr->fw_ver[2] = resp.fw_bld; + attr->fw_ver[3] = resp.fw_rsvd; } int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw) @@ -179,8 +180,6 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw) if (_is_max_srq_ext_supported(attr->dev_cap_flags2)) attr->max_srq += le16_to_cpu(sb->max_srq_ext); - bnxt_qplib_query_version(rcfw, attr->fw_ver); - for (i = 0; i < MAX_TQM_ALLOC_REQ / 4; i++) { temp = le32_to_cpu(sb->tqm_alloc_reqs[i]); tqm_alloc = (u8 *)&temp; @@ -309,7 +308,8 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, struct bnxt_qplib_gid *gid, const u8 *smac, - u16 vlan_id, bool update, u32 *index) + u16 vlan_id, bool update, u32 *index, + bool is_ugid, u32 stats_ctx_id) { struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl, struct bnxt_qplib_res, @@ -374,6 +374,9 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]); req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]); + req.stats_ctx = cpu_to_le16(CMDQ_ADD_GID_STATS_CTX_STATS_CTX_VALID | + (u16)stats_ctx_id); + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), sizeof(resp), 0); rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); @@ -397,46 +400,6 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, return 0; } -int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, - struct bnxt_qplib_gid *gid, u16 gid_idx, - const u8 *smac) -{ - struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl, - struct bnxt_qplib_res, - sgid_tbl); - struct bnxt_qplib_rcfw *rcfw = res->rcfw; - struct creq_modify_gid_resp resp = {}; - struct bnxt_qplib_cmdqmsg msg = {}; - struct cmdq_modify_gid req = {}; - int rc; - - bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, - CMDQ_BASE_OPCODE_MODIFY_GID, - sizeof(req)); - - req.gid[0] = cpu_to_be32(((u32 *)gid->data)[3]); - req.gid[1] = cpu_to_be32(((u32 *)gid->data)[2]); - req.gid[2] = cpu_to_be32(((u32 *)gid->data)[1]); - req.gid[3] = cpu_to_be32(((u32 *)gid->data)[0]); - if (res->prio) { - req.vlan |= cpu_to_le16 - (CMDQ_ADD_GID_VLAN_TPID_TPID_8100 | - CMDQ_ADD_GID_VLAN_VLAN_EN); - } - - /* MAC in network format */ - req.src_mac[0] = cpu_to_be16(((u16 *)smac)[0]); - req.src_mac[1] = cpu_to_be16(((u16 *)smac)[1]); - req.src_mac[2] = cpu_to_be16(((u16 *)smac)[2]); - - req.gid_index = cpu_to_le16(gid_idx); - - bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), - sizeof(resp), 0); - rc = bnxt_qplib_rcfw_send_message(rcfw, &msg); - return rc; -} - /* AH */ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah, bool block) @@ -1143,3 +1106,40 @@ out: dma_free_coherent(&rcfw->pdev->dev, sbuf.size, sbuf.sb, sbuf.dma_addr); return rc; } + +int bnxt_qplib_create_flow(struct bnxt_qplib_res *res) +{ + struct creq_roce_mirror_cfg_resp resp = {}; + struct bnxt_qplib_rcfw *rcfw = res->rcfw; + struct cmdq_roce_mirror_cfg req = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + + bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, + CMDQ_BASE_OPCODE_ROCE_MIRROR_CFG, + sizeof(req)); + + req.mirror_flags = (u8)CMDQ_ROCE_MIRROR_CFG_MIRROR_ENABLE; + + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), 0); + return bnxt_qplib_rcfw_send_message(rcfw, &msg); +} + +int bnxt_qplib_destroy_flow(struct bnxt_qplib_res *res) +{ + struct creq_roce_mirror_cfg_resp resp = {}; + struct bnxt_qplib_rcfw *rcfw = res->rcfw; + struct cmdq_roce_mirror_cfg req = {}; + struct bnxt_qplib_cmdqmsg msg = {}; + + bnxt_qplib_rcfw_cmd_prep((struct cmdq_base *)&req, + CMDQ_BASE_OPCODE_ROCE_MIRROR_CFG, + sizeof(req)); + + req.mirror_flags &= ~((u8)CMDQ_ROCE_MIRROR_CFG_MIRROR_ENABLE); + + bnxt_qplib_fill_cmdqmsg(&msg, &req, &resp, NULL, sizeof(req), + sizeof(resp), 0); + + return bnxt_qplib_rcfw_send_message(rcfw, &msg); +} diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h index 09faf4a1e849..147b5d9c0313 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h @@ -323,7 +323,8 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, struct bnxt_qplib_gid *gid, u16 vlan_id, bool update); int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, struct bnxt_qplib_gid *gid, const u8 *mac, u16 vlan_id, - bool update, u32 *index); + bool update, u32 *index, + bool is_ugid, u32 stats_ctx_id); int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, struct bnxt_qplib_gid *gid, u16 gid_idx, const u8 *smac); @@ -358,6 +359,9 @@ int bnxt_qplib_read_context(struct bnxt_qplib_rcfw *rcfw, u8 type, u32 xid, u32 resp_size, void *resp_va); int bnxt_qplib_query_cc_param(struct bnxt_qplib_res *res, struct bnxt_qplib_cc_param *cc_param); +void bnxt_qplib_query_version(struct bnxt_qplib_rcfw *rcfw); +int bnxt_qplib_create_flow(struct bnxt_qplib_res *res); +int bnxt_qplib_destroy_flow(struct bnxt_qplib_res *res); #define BNXT_VAR_MAX_WQE 4352 #define BNXT_VAR_MAX_SLOT_ALIGN 256 diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h index 024845f945ff..99ecd72e72e2 100644 --- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h +++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h @@ -144,7 +144,8 @@ struct cmdq_base { #define CMDQ_BASE_OPCODE_MODIFY_CQ 0x90UL #define CMDQ_BASE_OPCODE_QUERY_QP_EXTEND 0x91UL #define CMDQ_BASE_OPCODE_QUERY_ROCE_STATS_EXT 0x92UL - #define CMDQ_BASE_OPCODE_LAST CMDQ_BASE_OPCODE_QUERY_ROCE_STATS_EXT + #define CMDQ_BASE_OPCODE_ROCE_MIRROR_CFG 0x99UL + #define CMDQ_BASE_OPCODE_LAST CMDQ_BASE_OPCODE_ROCE_MIRROR_CFG u8 cmd_size; __le16 flags; __le16 cookie; @@ -218,6 +219,7 @@ struct cmdq_initialize_fw { #define CMDQ_INITIALIZE_FW_FLAGS_HW_REQUESTER_RETX_SUPPORTED 0x2UL #define CMDQ_INITIALIZE_FW_FLAGS_OPTIMIZE_MODIFY_QP_SUPPORTED 0x8UL #define CMDQ_INITIALIZE_FW_FLAGS_L2_VF_RESOURCE_MGMT 0x10UL + #define CMDQ_INITIALIZE_FW_FLAGS_MIRROR_ON_ROCE_SUPPORTED 0x80UL __le16 cookie; u8 resp_size; u8 reserved8; @@ -788,7 +790,8 @@ struct creq_query_qp_resp_sb { #define CREQ_QUERY_QP_RESP_SB_ACCESS_REMOTE_ATOMIC 0x8UL __le16 pkey; __le32 qkey; - __le32 reserved32; + __le16 udp_src_port; + __le16 reserved16; __le32 dgid[4]; __le32 flow_label; __le16 sgid_index; @@ -2108,6 +2111,43 @@ struct creq_query_roce_stats_ext_resp_sb { __le64 dup_req; }; +/* cmdq_roce_mirror_cfg (size:192b/24B) */ +struct cmdq_roce_mirror_cfg { + u8 opcode; + #define CMDQ_ROCE_MIRROR_CFG_OPCODE_ROCE_MIRROR_CFG 0x99UL + #define CMDQ_ROCE_MIRROR_CFG_OPCODE_LAST \ + CMDQ_ROCE_MIRROR_CFG_OPCODE_ROCE_MIRROR_CFG + u8 cmd_size; + __le16 flags; + __le16 cookie; + u8 resp_size; + u8 reserved8; + __le64 resp_addr; + u8 mirror_flags; + #define CMDQ_ROCE_MIRROR_CFG_MIRROR_ENABLE 0x1UL + u8 rsvd[7]; +}; + +/* creq_roce_mirror_cfg_resp (size:128b/16B) */ +struct creq_roce_mirror_cfg_resp { + u8 type; + #define CREQ_ROCE_MIRROR_CFG_RESP_TYPE_MASK 0x3fUL + #define CREQ_ROCE_MIRROR_CFG_RESP_TYPE_SFT 0 + #define CREQ_ROCE_MIRROR_CFG_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_ROCE_MIRROR_CFG_RESP_TYPE_LAST \ + CREQ_ROCE_MIRROR_CFG_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 reserved32; + u8 v; + #define CREQ_ROCE_MIRROR_CFG_RESP_V 0x1UL + u8 event; + #define CREQ_ROCE_MIRROR_CFG_RESP_EVENT_ROCE_MIRROR_CFG 0x99UL + #define CREQ_ROCE_MIRROR_CFG_RESP_EVENT_LAST \ + CREQ_ROCE_MIRROR_CFG_RESP_EVENT_ROCE_MIRROR_CFG + u8 reserved48[6]; +}; + /* cmdq_query_func (size:128b/16B) */ struct cmdq_query_func { u8 opcode; |