From 7c4b1ab9f16732fb921b3f11cd127fa65f26ad5c Mon Sep 17 00:00:00 2001 From: Mark Zhang Date: Tue, 18 Aug 2020 14:52:45 +0300 Subject: IB/mlx5: Add DCT RoCE LAG support When DCT QPs work in RoCE LAG mode: 1. DCT creation is allowed only when it is supported 2. The "port" of a DCT QP is assigned in a round-robin way Link: https://lore.kernel.org/r/20200818115245.700581-3-leon@kernel.org Signed-off-by: Mark Zhang Reviewed-by: Maor Gottlieb Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- include/linux/mlx5/mlx5_ifc.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index de1ffb4804d6..aee25e4fb2cc 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1430,7 +1430,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 log_bf_reg_size[0x5]; - u8 reserved_at_270[0x8]; + u8 reserved_at_270[0x6]; + u8 lag_dct[0x2]; u8 lag_tx_port_affinity[0x1]; u8 reserved_at_279[0x2]; u8 lag_master[0x1]; -- cgit v1.2.3 From b3003a74456f0c1f614a46c07e16abe33bfdd087 Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Fri, 4 Sep 2020 19:41:57 -0300 Subject: RDMA/qedr: Remove fbo and zbva from the MR MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit zbva is always false, so fbo is never read. A 'zero-based-virtual-address' is simply IOVA == 0, and the driver already supports this. Link: https://lore.kernel.org/r/16-v2-270386b7e60b+28f4-umem_1_jgg@nvidia.com Acked-by: Michal KalderonĀ  Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/qedr/verbs.c | 4 ---- drivers/net/ethernet/qlogic/qed/qed_rdma.c | 12 ++---------- include/linux/qed/qed_rdma_if.h | 2 -- 3 files changed, 2 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 9319778f5df4..f7bfc43f0f5c 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -2882,10 +2882,8 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered; mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size); mr->hw_mr.page_size_log = PAGE_SHIFT; - mr->hw_mr.fbo = ib_umem_offset(mr->umem); mr->hw_mr.length = len; mr->hw_mr.vaddr = usr_addr; - mr->hw_mr.zbva = false; mr->hw_mr.phy_mr = false; mr->hw_mr.dma_mr = false; @@ -2978,10 +2976,8 @@ static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, mr->hw_mr.pbl_ptr = 0; mr->hw_mr.pbl_two_level = mr->info.pbl_info.two_layered; mr->hw_mr.pbl_page_size_log = ilog2(mr->info.pbl_info.pbl_size); - mr->hw_mr.fbo = 0; mr->hw_mr.length = 0; mr->hw_mr.vaddr = 0; - mr->hw_mr.zbva = false; mr->hw_mr.phy_mr = true; mr->hw_mr.dma_mr = false; diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c index 03894584415d..0df6e0587752 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c +++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c @@ -1521,7 +1521,7 @@ qed_rdma_register_tid(void *rdma_cxt, params->pbl_two_level); SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED, - params->zbva); + false); SET_FIELD(flags, RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR, params->phy_mr); @@ -1583,15 +1583,7 @@ qed_rdma_register_tid(void *rdma_cxt, p_ramrod->pd = cpu_to_le16(params->pd); p_ramrod->length_hi = (u8)(params->length >> 32); p_ramrod->length_lo = DMA_LO_LE(params->length); - if (params->zbva) { - /* Lower 32 bits of the registered MR address. - * In case of zero based MR, will hold FBO - */ - p_ramrod->va.hi = 0; - p_ramrod->va.lo = cpu_to_le32(params->fbo); - } else { - DMA_REGPAIR_LE(p_ramrod->va, params->vaddr); - } + DMA_REGPAIR_LE(p_ramrod->va, params->vaddr); DMA_REGPAIR_LE(p_ramrod->pbl_base, params->pbl_ptr); /* DIF */ diff --git a/include/linux/qed/qed_rdma_if.h b/include/linux/qed/qed_rdma_if.h index f464d85e88a4..aeb242cefebf 100644 --- a/include/linux/qed/qed_rdma_if.h +++ b/include/linux/qed/qed_rdma_if.h @@ -242,10 +242,8 @@ struct qed_rdma_register_tid_in_params { bool pbl_two_level; u8 pbl_page_size_log; u8 page_size_log; - u32 fbo; u64 length; u64 vaddr; - bool zbva; bool phy_mr; bool dma_mr; -- cgit v1.2.3 From 97fb3e33474897c35b7cfe6ff0e16a3e172d9249 Mon Sep 17 00:00:00 2001 From: Michal Kalderon Date: Wed, 2 Sep 2020 19:57:38 +0300 Subject: qede: Notify qedr when mtu has changed MTU change on ethtool is currently not supported for iWARP. Notify qedr driver so that appropriate logging can take place. Link: https://lore.kernel.org/r/20200902165741.8355-6-michal.kalderon@marvell.com Signed-off-by: Michal Kalderon Signed-off-by: Jason Gunthorpe --- drivers/net/ethernet/qlogic/qede/qede_ethtool.c | 4 +++- drivers/net/ethernet/qlogic/qede/qede_rdma.c | 17 +++++++++++++++++ include/linux/qed/qede_rdma.h | 4 +++- 3 files changed, 23 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index b9aa6384563b..bedbb85a179a 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -1026,7 +1026,9 @@ int qede_change_mtu(struct net_device *ndev, int new_mtu) args.u.mtu = new_mtu; args.func = &qede_update_mtu; qede_reload(edev, &args, false); - +#if IS_ENABLED(CONFIG_QED_RDMA) + qede_rdma_event_change_mtu(edev); +#endif edev->ops->common->update_mtu(edev->cdev, new_mtu); return 0; diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c index 769ec2f4d0b7..2f6598086d9b 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c +++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c @@ -234,6 +234,15 @@ static void qede_rdma_changeaddr(struct qede_dev *edev) qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR); } +static void qede_rdma_change_mtu(struct qede_dev *edev) +{ + if (qede_rdma_supported(edev)) { + if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) + qedr_drv->notify(edev->rdma_info.qedr_dev, + QEDE_CHANGE_MTU); + } +} + static struct qede_rdma_event_work * qede_rdma_get_free_event_node(struct qede_dev *edev) { @@ -287,6 +296,9 @@ static void qede_rdma_handle_event(struct work_struct *work) case QEDE_CHANGE_ADDR: qede_rdma_changeaddr(edev); break; + case QEDE_CHANGE_MTU: + qede_rdma_change_mtu(edev); + break; default: DP_NOTICE(edev, "Invalid rdma event %d", event); } @@ -338,3 +350,8 @@ void qede_rdma_event_changeaddr(struct qede_dev *edev) { qede_rdma_add_event(edev, QEDE_CHANGE_ADDR); } + +void qede_rdma_event_change_mtu(struct qede_dev *edev) +{ + qede_rdma_add_event(edev, QEDE_CHANGE_MTU); +} diff --git a/include/linux/qed/qede_rdma.h b/include/linux/qed/qede_rdma.h index 072da2f6da37..0d5564a59a59 100644 --- a/include/linux/qed/qede_rdma.h +++ b/include/linux/qed/qede_rdma.h @@ -20,7 +20,8 @@ enum qede_rdma_event { QEDE_UP, QEDE_DOWN, QEDE_CHANGE_ADDR, - QEDE_CLOSE + QEDE_CLOSE, + QEDE_CHANGE_MTU, }; struct qede_rdma_event_work { @@ -54,6 +55,7 @@ void qede_rdma_dev_event_open(struct qede_dev *dev); void qede_rdma_dev_event_close(struct qede_dev *dev); void qede_rdma_dev_remove(struct qede_dev *dev, bool recovery); void qede_rdma_event_changeaddr(struct qede_dev *edr); +void qede_rdma_event_change_mtu(struct qede_dev *edev); #else static inline int qede_rdma_dev_add(struct qede_dev *dev, -- cgit v1.2.3 From 639bf4415cadff4c18e13aa5cb0dba2d443e3aa7 Mon Sep 17 00:00:00 2001 From: Aharon Landau Date: Thu, 17 Sep 2020 12:02:21 +0300 Subject: net/mlx5: Refactor query port speed functions The functions mlx5_query_port_link_width_oper and mlx5_query_port_ib_proto_oper are always called together, so combine them to a new function called mlx5_query_port_oper to avoid duplication. And while the mlx5i_get_port_settings is the same as mlx5_query_port_oper therefore let's remove it. According to the IB spec link_width_oper and ib_proto_oper should be u16 and not as written u8, so perform casting as a preparation to cross-RDMA patch which will fix that type for all drivers in the RDMA subsystem. Fixes: ada68c31ba9c ("net/mlx5: Introduce a new header file for physical port functions") Signed-off-by: Aharon Landau Reviewed-by: Michael Guralnik Signed-off-by: Leon Romanovsky --- drivers/infiniband/hw/mlx5/main.c | 27 +++++++++++----------- .../ethernet/mellanox/mlx5/core/ipoib/ethtool.c | 23 ++++-------------- drivers/net/ethernet/mellanox/mlx5/core/port.c | 23 ++++-------------- include/linux/mlx5/port.h | 7 +++--- 4 files changed, 25 insertions(+), 55 deletions(-) (limited to 'include/linux') diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index d60d63221b14..631ca360057f 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -326,8 +326,8 @@ out: spin_unlock(&port->mp.mpi_lock); } -static int translate_eth_legacy_proto_oper(u32 eth_proto_oper, u8 *active_speed, - u8 *active_width) +static int translate_eth_legacy_proto_oper(u32 eth_proto_oper, + u16 *active_speed, u8 *active_width) { switch (eth_proto_oper) { case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII): @@ -384,7 +384,7 @@ static int translate_eth_legacy_proto_oper(u32 eth_proto_oper, u8 *active_speed, return 0; } -static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u8 *active_speed, +static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed, u8 *active_width) { switch (eth_proto_oper) { @@ -436,7 +436,7 @@ static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u8 *active_speed, return 0; } -static int translate_eth_proto_oper(u32 eth_proto_oper, u8 *active_speed, +static int translate_eth_proto_oper(u32 eth_proto_oper, u16 *active_speed, u8 *active_width, bool ext) { return ext ? @@ -457,6 +457,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, bool put_mdev = true; u16 qkey_viol_cntr; u32 eth_prot_oper; + u16 active_speed; u8 mdev_port_num; bool ext; int err; @@ -490,9 +491,12 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num, props->active_width = IB_WIDTH_4X; props->active_speed = IB_SPEED_QDR; - translate_eth_proto_oper(eth_prot_oper, &props->active_speed, + translate_eth_proto_oper(eth_prot_oper, &active_speed, &props->active_width, ext); + WARN_ON_ONCE(active_speed & ~0xFF); + props->active_speed = (u8)active_speed; + props->port_cap_flags |= IB_PORT_CM_SUP; props->ip_gids = true; @@ -1183,8 +1187,8 @@ enum mlx5_ib_width { MLX5_IB_WIDTH_12X = 1 << 4 }; -static void translate_active_width(struct ib_device *ibdev, u8 active_width, - u8 *ib_width) +static void translate_active_width(struct ib_device *ibdev, u16 active_width, + u8 *ib_width) { struct mlx5_ib_dev *dev = to_mdev(ibdev); @@ -1277,7 +1281,7 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port, u16 max_mtu; u16 oper_mtu; int err; - u8 ib_link_width_oper; + u16 ib_link_width_oper; u8 vl_hw_cap; rep = kzalloc(sizeof(*rep), GFP_KERNEL); @@ -1310,16 +1314,13 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port, if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP) props->port_cap_flags2 = rep->cap_mask2; - err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port); + err = mlx5_query_ib_port_oper(mdev, &ib_link_width_oper, + (u16 *)&props->active_speed, port); if (err) goto out; translate_active_width(ibdev, ib_link_width_oper, &props->active_width); - err = mlx5_query_port_ib_proto_oper(mdev, &props->active_speed, port); - if (err) - goto out; - mlx5_query_port_max_mtu(mdev, &max_mtu, port); props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c index 1eef66ee849e..17f5be801d2f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c @@ -174,24 +174,6 @@ static inline int mlx5_ptys_rate_enum_to_int(enum mlx5_ptys_rate rate) } } -static int mlx5i_get_port_settings(struct net_device *netdev, - u16 *ib_link_width_oper, u16 *ib_proto_oper) -{ - struct mlx5e_priv *priv = mlx5i_epriv(netdev); - struct mlx5_core_dev *mdev = priv->mdev; - u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0}; - int ret; - - ret = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_IB, 1); - if (ret) - return ret; - - *ib_link_width_oper = MLX5_GET(ptys_reg, out, ib_link_width_oper); - *ib_proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper); - - return 0; -} - static int mlx5i_get_speed_settings(u16 ib_link_width_oper, u16 ib_proto_oper) { int rate, width; @@ -209,11 +191,14 @@ static int mlx5i_get_speed_settings(u16 ib_link_width_oper, u16 ib_proto_oper) static int mlx5i_get_link_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *link_ksettings) { + struct mlx5e_priv *priv = mlx5i_epriv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; u16 ib_link_width_oper; u16 ib_proto_oper; int speed, ret; - ret = mlx5i_get_port_settings(netdev, &ib_link_width_oper, &ib_proto_oper); + ret = mlx5_query_ib_port_oper(mdev, &ib_link_width_oper, &ib_proto_oper, + 1); if (ret) return ret; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index e4186e84b3ff..4bb219565c58 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -154,24 +154,8 @@ int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration) sizeof(out), MLX5_REG_MLCR, 0, 1); } -int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev, - u8 *link_width_oper, u8 local_port) -{ - u32 out[MLX5_ST_SZ_DW(ptys_reg)]; - int err; - - err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_IB, local_port); - if (err) - return err; - - *link_width_oper = MLX5_GET(ptys_reg, out, ib_link_width_oper); - - return 0; -} -EXPORT_SYMBOL_GPL(mlx5_query_port_link_width_oper); - -int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev, - u8 *proto_oper, u8 local_port) +int mlx5_query_ib_port_oper(struct mlx5_core_dev *dev, u16 *link_width_oper, + u16 *proto_oper, u8 local_port) { u32 out[MLX5_ST_SZ_DW(ptys_reg)]; int err; @@ -181,11 +165,12 @@ int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev, if (err) return err; + *link_width_oper = MLX5_GET(ptys_reg, out, ib_link_width_oper); *proto_oper = MLX5_GET(ptys_reg, out, ib_proto_oper); return 0; } -EXPORT_SYMBOL(mlx5_query_port_ib_proto_oper); +EXPORT_SYMBOL(mlx5_query_ib_port_oper); /* This function should be used after setting a port register only */ void mlx5_toggle_port_link(struct mlx5_core_dev *dev) diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index 2d45a6af52a4..4d33ae0c2d97 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -133,10 +133,9 @@ enum mlx5e_connector_type { int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, int ptys_size, int proto_mask, u8 local_port); -int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev, - u8 *link_width_oper, u8 local_port); -int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev, - u8 *proto_oper, u8 local_port); + +int mlx5_query_ib_port_oper(struct mlx5_core_dev *dev, u16 *link_width_oper, + u16 *proto_oper, u8 local_port); void mlx5_toggle_port_link(struct mlx5_core_dev *dev); int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, enum mlx5_port_status status); -- cgit v1.2.3 From e27014bdb47eb435f78573685f4196c07329f1f7 Mon Sep 17 00:00:00 2001 From: Aharon Landau Date: Thu, 17 Sep 2020 12:02:22 +0300 Subject: RDMA/mlx5: Delete duplicated mlx5_ptys_width enum Combine two same enums to avoid duplication. Signed-off-by: Aharon Landau Reviewed-by: Michael Guralnik Signed-off-by: Leon Romanovsky --- drivers/infiniband/hw/mlx5/main.c | 20 ++++++-------------- .../net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c | 8 -------- include/linux/mlx5/port.h | 8 ++++++++ 3 files changed, 14 insertions(+), 22 deletions(-) (limited to 'include/linux') diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 631ca360057f..2dbacf0ebda3 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1179,32 +1179,24 @@ static int mlx5_ib_query_device(struct ib_device *ibdev, return 0; } -enum mlx5_ib_width { - MLX5_IB_WIDTH_1X = 1 << 0, - MLX5_IB_WIDTH_2X = 1 << 1, - MLX5_IB_WIDTH_4X = 1 << 2, - MLX5_IB_WIDTH_8X = 1 << 3, - MLX5_IB_WIDTH_12X = 1 << 4 -}; - static void translate_active_width(struct ib_device *ibdev, u16 active_width, u8 *ib_width) { struct mlx5_ib_dev *dev = to_mdev(ibdev); - if (active_width & MLX5_IB_WIDTH_1X) + if (active_width & MLX5_PTYS_WIDTH_1X) *ib_width = IB_WIDTH_1X; - else if (active_width & MLX5_IB_WIDTH_2X) + else if (active_width & MLX5_PTYS_WIDTH_2X) *ib_width = IB_WIDTH_2X; - else if (active_width & MLX5_IB_WIDTH_4X) + else if (active_width & MLX5_PTYS_WIDTH_4X) *ib_width = IB_WIDTH_4X; - else if (active_width & MLX5_IB_WIDTH_8X) + else if (active_width & MLX5_PTYS_WIDTH_8X) *ib_width = IB_WIDTH_8X; - else if (active_width & MLX5_IB_WIDTH_12X) + else if (active_width & MLX5_PTYS_WIDTH_12X) *ib_width = IB_WIDTH_12X; else { mlx5_ib_dbg(dev, "Invalid active_width %d, setting width to default value: 4x\n", - (int)active_width); + active_width); *ib_width = IB_WIDTH_4X; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c index 17f5be801d2f..cac8f085b16d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c @@ -130,14 +130,6 @@ static int mlx5i_flash_device(struct net_device *netdev, return mlx5e_ethtool_flash_device(priv, flash); } -enum mlx5_ptys_width { - MLX5_PTYS_WIDTH_1X = 1 << 0, - MLX5_PTYS_WIDTH_2X = 1 << 1, - MLX5_PTYS_WIDTH_4X = 1 << 2, - MLX5_PTYS_WIDTH_8X = 1 << 3, - MLX5_PTYS_WIDTH_12X = 1 << 4, -}; - static inline int mlx5_ptys_width_enum_to_int(enum mlx5_ptys_width width) { switch (width) { diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index 4d33ae0c2d97..23edd2db4803 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h @@ -125,6 +125,14 @@ enum mlx5e_connector_type { MLX5E_CONNECTOR_TYPE_NUMBER, }; +enum mlx5_ptys_width { + MLX5_PTYS_WIDTH_1X = 1 << 0, + MLX5_PTYS_WIDTH_2X = 1 << 1, + MLX5_PTYS_WIDTH_4X = 1 << 2, + MLX5_PTYS_WIDTH_8X = 1 << 3, + MLX5_PTYS_WIDTH_12X = 1 << 4, +}; + #define MLX5E_PROT_MASK(link_mode) (1 << link_mode) #define MLX5_GET_ETH_PROTO(reg, out, ext, field) \ (ext ? MLX5_GET(reg, out, ext_##field) : \ -- cgit v1.2.3 From 9d8feb460adb90e89e64101ce2c1cfcd548a6d83 Mon Sep 17 00:00:00 2001 From: Alex Vesker Date: Mon, 31 Aug 2020 10:01:48 +0300 Subject: RDMA/mlx5: Add sw_owner_v2 bit capability Added sw_owner_v2 which will be enabled for future devices, replacing sw_owner bit. Signed-off-by: Alex Vesker Signed-off-by: Leon Romanovsky --- include/linux/mlx5/mlx5_ifc.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index de1ffb4804d6..3061ceebbaf3 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -420,7 +420,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits { u8 reserved_at_1a[0x2]; u8 ipsec_encrypt[0x1]; u8 ipsec_decrypt[0x1]; - u8 reserved_at_1e[0x2]; + u8 sw_owner_v2[0x1]; + u8 reserved_at_1f[0x1]; u8 termination_table_raw_traffic[0x1]; u8 reserved_at_21[0x1]; -- cgit v1.2.3 From a4947e84f23474803b62a2759b5808147e4e15f9 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Sun, 13 Sep 2020 13:29:28 +0300 Subject: overflow: Include header file with SIZE_MAX declaration The various array_size functions use SIZE_MAX define, but missed limits.h causes to failure to compile code that needs overflow.h. In file included from drivers/infiniband/core/uverbs_std_types_device.c:6: ./include/linux/overflow.h: In function 'array_size': ./include/linux/overflow.h:258:10: error: 'SIZE_MAX' undeclared (first use in this function) 258 | return SIZE_MAX; | ^~~~~~~~ Fixes: 610b15c50e86 ("overflow.h: Add allocation size calculation helpers") Link: https://lore.kernel.org/r/20200913102928.134985-1-leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- include/linux/overflow.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/overflow.h b/include/linux/overflow.h index 93fcef105061..ff3c48f0abc5 100644 --- a/include/linux/overflow.h +++ b/include/linux/overflow.h @@ -3,6 +3,7 @@ #define __LINUX_OVERFLOW_H #include +#include /* * In the fallback code below, we need to compute the minimum and -- cgit v1.2.3 From 07da1223ec939982497db3caccd6215b55acc35c Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Sun, 4 Oct 2020 18:43:37 +0300 Subject: lib/scatterlist: Add support in dynamic allocation of SG table from pages Extend __sg_alloc_table_from_pages to support dynamic allocation of SG table from pages. It should be used by drivers that can't supply all the pages at one time. This function returns the last populated SGE in the table. Users should pass it as an argument to the function from the second call and forward. As before, nents will be equal to the number of populated SGEs (chunks). With this new extension, drivers can benefit the optimization of merging contiguous pages without a need to allocate all pages in advance and hold them in a large buffer. E.g. with the Infiniband driver that allocates a single page for hold the pages. For 1TB memory registration, the temporary buffer would consume only 4KB, instead of 2GB. Link: https://lore.kernel.org/r/20201004154340.1080481-2-leon@kernel.org Signed-off-by: Maor Gottlieb Reviewed-by: Christoph Hellwig Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 12 +-- drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c | 15 ++-- include/linux/scatterlist.h | 38 +++++---- lib/scatterlist.c | 125 ++++++++++++++++++++++------ tools/testing/scatterlist/main.c | 9 +- 5 files changed, 142 insertions(+), 57 deletions(-) (limited to 'include/linux') diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 12b30075134a..f2eaed6aca3d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -403,6 +403,7 @@ __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj, unsigned int max_segment = i915_sg_segment_size(); struct sg_table *st; unsigned int sg_page_sizes; + struct scatterlist *sg; int ret; st = kmalloc(sizeof(*st), GFP_KERNEL); @@ -410,13 +411,12 @@ __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj, return ERR_PTR(-ENOMEM); alloc_table: - ret = __sg_alloc_table_from_pages(st, pvec, num_pages, - 0, num_pages << PAGE_SHIFT, - max_segment, - GFP_KERNEL); - if (ret) { + sg = __sg_alloc_table_from_pages(st, pvec, num_pages, 0, + num_pages << PAGE_SHIFT, max_segment, + NULL, 0, GFP_KERNEL); + if (IS_ERR(sg)) { kfree(st); - return ERR_PTR(ret); + return ERR_CAST(sg); } ret = i915_gem_gtt_prepare_pages(obj, st); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index ab524ab3b0b4..f22acd398b1f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -419,6 +419,7 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) int ret = 0; static size_t sgl_size; static size_t sgt_size; + struct scatterlist *sg; if (vmw_tt->mapped) return 0; @@ -441,13 +442,15 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) if (unlikely(ret != 0)) return ret; - ret = __sg_alloc_table_from_pages - (&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0, - (unsigned long) vsgt->num_pages << PAGE_SHIFT, - dma_get_max_seg_size(dev_priv->dev->dev), - GFP_KERNEL); - if (unlikely(ret != 0)) + sg = __sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages, + vsgt->num_pages, 0, + (unsigned long) vsgt->num_pages << PAGE_SHIFT, + dma_get_max_seg_size(dev_priv->dev->dev), + NULL, 0, GFP_KERNEL); + if (IS_ERR(sg)) { + ret = PTR_ERR(sg); goto out_sg_alloc_fail; + } if (vsgt->num_pages > vmw_tt->sgt.nents) { uint64_t over_alloc = diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index 45cf7b69d852..36c47e7e66a2 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -165,6 +165,22 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf, #define for_each_sgtable_dma_sg(sgt, sg, i) \ for_each_sg((sgt)->sgl, sg, (sgt)->nents, i) +static inline void __sg_chain(struct scatterlist *chain_sg, + struct scatterlist *sgl) +{ + /* + * offset and length are unused for chain entry. Clear them. + */ + chain_sg->offset = 0; + chain_sg->length = 0; + + /* + * Set lowest bit to indicate a link pointer, and make sure to clear + * the termination bit if it happens to be set. + */ + chain_sg->page_link = ((unsigned long) sgl | SG_CHAIN) & ~SG_END; +} + /** * sg_chain - Chain two sglists together * @prv: First scatterlist @@ -178,18 +194,7 @@ static inline void sg_set_buf(struct scatterlist *sg, const void *buf, static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents, struct scatterlist *sgl) { - /* - * offset and length are unused for chain entry. Clear them. - */ - prv[prv_nents - 1].offset = 0; - prv[prv_nents - 1].length = 0; - - /* - * Set lowest bit to indicate a link pointer, and make sure to clear - * the termination bit if it happens to be set. - */ - prv[prv_nents - 1].page_link = ((unsigned long) sgl | SG_CHAIN) - & ~SG_END; + __sg_chain(&prv[prv_nents - 1], sgl); } /** @@ -286,10 +291,11 @@ void sg_free_table(struct sg_table *); int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, struct scatterlist *, unsigned int, gfp_t, sg_alloc_fn *); int sg_alloc_table(struct sg_table *, unsigned int, gfp_t); -int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, - unsigned int n_pages, unsigned int offset, - unsigned long size, unsigned int max_segment, - gfp_t gfp_mask); +struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt, + struct page **pages, unsigned int n_pages, unsigned int offset, + unsigned long size, unsigned int max_segment, + struct scatterlist *prv, unsigned int left_pages, + gfp_t gfp_mask); int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, unsigned int n_pages, unsigned int offset, unsigned long size, gfp_t gfp_mask); diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 5d63a8857f36..e102fdfaa75b 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -365,6 +365,37 @@ int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) } EXPORT_SYMBOL(sg_alloc_table); +static struct scatterlist *get_next_sg(struct sg_table *table, + struct scatterlist *cur, + unsigned long needed_sges, + gfp_t gfp_mask) +{ + struct scatterlist *new_sg, *next_sg; + unsigned int alloc_size; + + if (cur) { + next_sg = sg_next(cur); + /* Check if last entry should be keeped for chainning */ + if (!sg_is_last(next_sg) || needed_sges == 1) + return next_sg; + } + + alloc_size = min_t(unsigned long, needed_sges, SG_MAX_SINGLE_ALLOC); + new_sg = sg_kmalloc(alloc_size, gfp_mask); + if (!new_sg) + return ERR_PTR(-ENOMEM); + sg_init_table(new_sg, alloc_size); + if (cur) { + __sg_chain(next_sg, new_sg); + table->orig_nents += alloc_size - 1; + } else { + table->sgl = new_sg; + table->orig_nents = alloc_size; + table->nents = 0; + } + return new_sg; +} + /** * __sg_alloc_table_from_pages - Allocate and initialize an sg table from * an array of pages @@ -374,29 +405,63 @@ EXPORT_SYMBOL(sg_alloc_table); * @offset: Offset from start of the first page to the start of a buffer * @size: Number of valid bytes in the buffer (after offset) * @max_segment: Maximum size of a scatterlist node in bytes (page aligned) + * @prv: Last populated sge in sgt + * @left_pages: Left pages caller have to set after this call * @gfp_mask: GFP allocation mask * - * Description: - * Allocate and initialize an sg table from a list of pages. Contiguous - * ranges of the pages are squashed into a single scatterlist node up to the - * maximum size specified in @max_segment. An user may provide an offset at a - * start and a size of valid data in a buffer specified by the page array. - * The returned sg table is released by sg_free_table. + * Description: + * If @prv is NULL, allocate and initialize an sg table from a list of pages, + * else reuse the scatterlist passed in at @prv. + * Contiguous ranges of the pages are squashed into a single scatterlist + * entry up to the maximum size specified in @max_segment. A user may + * provide an offset at a start and a size of valid data in a buffer + * specified by the page array. * * Returns: - * 0 on success, negative error on failure + * Last SGE in sgt on success, PTR_ERR on otherwise. + * The allocation in @sgt must be released by sg_free_table. + * + * Notes: + * If this function returns non-0 (eg failure), the caller must call + * sg_free_table() to cleanup any leftover allocations. */ -int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, - unsigned int n_pages, unsigned int offset, - unsigned long size, unsigned int max_segment, - gfp_t gfp_mask) +struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt, + struct page **pages, unsigned int n_pages, unsigned int offset, + unsigned long size, unsigned int max_segment, + struct scatterlist *prv, unsigned int left_pages, + gfp_t gfp_mask) { - unsigned int chunks, cur_page, seg_len, i; - int ret; - struct scatterlist *s; + unsigned int chunks, cur_page, seg_len, i, prv_len = 0; + unsigned int added_nents = 0; + struct scatterlist *s = prv; if (WARN_ON(!max_segment || offset_in_page(max_segment))) - return -EINVAL; + return ERR_PTR(-EINVAL); + + if (IS_ENABLED(CONFIG_ARCH_NO_SG_CHAIN) && prv) + return ERR_PTR(-EOPNOTSUPP); + + if (prv) { + unsigned long paddr = (page_to_pfn(sg_page(prv)) * PAGE_SIZE + + prv->offset + prv->length) / + PAGE_SIZE; + + if (WARN_ON(offset)) + return ERR_PTR(-EINVAL); + + /* Merge contiguous pages into the last SG */ + prv_len = prv->length; + while (n_pages && page_to_pfn(pages[0]) == paddr) { + if (prv->length + PAGE_SIZE > max_segment) + break; + prv->length += PAGE_SIZE; + paddr++; + pages++; + n_pages--; + } + if (!n_pages) + goto out; + } /* compute number of contiguous chunks */ chunks = 1; @@ -410,13 +475,9 @@ int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, } } - ret = sg_alloc_table(sgt, chunks, gfp_mask); - if (unlikely(ret)) - return ret; - /* merging chunks and putting them into the scatterlist */ cur_page = 0; - for_each_sg(sgt->sgl, s, sgt->orig_nents, i) { + for (i = 0; i < chunks; i++) { unsigned int j, chunk_size; /* look for the end of the current chunk */ @@ -429,15 +490,30 @@ int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, break; } + /* Pass how many chunks might be left */ + s = get_next_sg(sgt, s, chunks - i + left_pages, gfp_mask); + if (IS_ERR(s)) { + /* + * Adjust entry length to be as before function was + * called. + */ + if (prv) + prv->length = prv_len; + return s; + } chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset; sg_set_page(s, pages[cur_page], min_t(unsigned long, size, chunk_size), offset); + added_nents++; size -= chunk_size; offset = 0; cur_page = j; } - - return 0; + sgt->nents += added_nents; +out: + if (!left_pages) + sg_mark_end(s); + return s; } EXPORT_SYMBOL(__sg_alloc_table_from_pages); @@ -465,8 +541,9 @@ int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, unsigned int n_pages, unsigned int offset, unsigned long size, gfp_t gfp_mask) { - return __sg_alloc_table_from_pages(sgt, pages, n_pages, offset, size, - SCATTERLIST_MAX_SEGMENT, gfp_mask); + return PTR_ERR_OR_ZERO(__sg_alloc_table_from_pages(sgt, pages, n_pages, + offset, size, SCATTERLIST_MAX_SEGMENT, + NULL, 0, gfp_mask)); } EXPORT_SYMBOL(sg_alloc_table_from_pages); diff --git a/tools/testing/scatterlist/main.c b/tools/testing/scatterlist/main.c index c6db0a1989b2..b2c7e9f7b8d3 100644 --- a/tools/testing/scatterlist/main.c +++ b/tools/testing/scatterlist/main.c @@ -79,14 +79,13 @@ int main(void) for (i = 0, test = tests; test->expected_segments; test++, i++) { struct page *pages[MAX_PAGES]; struct sg_table st; - int ret; + struct scatterlist *sg; set_pages(pages, test->pfn, test->num_pages); - ret = __sg_alloc_table_from_pages(&st, pages, test->num_pages, - 0, test->size, test->max_seg, - GFP_KERNEL); - assert(ret == test->alloc_ret); + sg = __sg_alloc_table_from_pages(&st, pages, test->num_pages, 0, + test->size, test->max_seg, NULL, 0, GFP_KERNEL); + assert(PTR_ERR_OR_ZERO(sg) == test->alloc_ret); if (test->alloc_ret) continue; -- cgit v1.2.3