From 8e1a28e8e6797449dfdfa4739002d1e5939355a8 Mon Sep 17 00:00:00 2001 From: Hadar Hen Zion Date: Thu, 19 Dec 2013 21:20:12 +0200 Subject: net/mlx4_core: Expose physical port id as PF/VF capability Add the infrastructure needed to support ndo_get_phys_port_id which allows users to identify to which physical port a net-device is connected to by reading a unique port id. This will work for VFs and PFs. The driver uses a new device capability - phys_port_id, The PF driver reads the port phys_port_id from Firmware and stores it. The VF driver reads the port phys_port_id from the PF using QUERY_FUNC_CAP command. Signed-off-by: Hadar Hen Zion Signed-off-by: Amir Vadai Signed-off-by: David S. Miller --- include/linux/mlx4/device.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux/mlx4') diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 7d3a523160ba..294b7c58fe95 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -454,6 +454,7 @@ struct mlx4_caps { u32 userspace_caps; /* userspace must be aware of these */ u32 function_caps; /* VFs must be aware of these */ u16 hca_core_clock; + u64 phys_port_id[MLX4_MAX_PORTS + 1]; }; struct mlx4_buf_list { @@ -1113,6 +1114,7 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap, int *vector); void mlx4_release_eq(struct mlx4_dev *dev, int vec); +int mlx4_get_phys_port_id(struct mlx4_dev *dev); int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port); int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port); -- cgit v1.2.3 From 7ffdf726cfe0d188907bdbb0e7729fb35a69c219 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Mon, 23 Dec 2013 16:09:43 +0200 Subject: net/mlx4_core: Add basic support for TCP/IP offloads under tunneling Add the low-level device commands and definitions used for TCP/IP HW offloads of tunneled/vxlan traffic which are supported by the ConnectX3-pro NIC. This is done through the following elements: - read tunneling device caps in QUERY_DEV_CAP - add helper function to do SET_PORT for tunneling - add DMFS VXLAN steering rule definitions - add CQE and WQE checksum offload field definitions Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller --- drivers/net/ethernet/mellanox/mlx4/fw.c | 19 +++++++++++++- drivers/net/ethernet/mellanox/mlx4/main.c | 14 +++++++++++ drivers/net/ethernet/mellanox/mlx4/mcg.c | 14 +++++++++-- drivers/net/ethernet/mellanox/mlx4/port.c | 41 +++++++++++++++++++++++++++++++ include/linux/mlx4/cmd.h | 1 + include/linux/mlx4/cq.h | 5 ++++ include/linux/mlx4/device.h | 37 +++++++++++++++++++++++++++- include/linux/mlx4/qp.h | 6 +++++ 8 files changed, 133 insertions(+), 4 deletions(-) (limited to 'include/linux/mlx4') diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 27a84342525f..55c4ea740258 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -134,7 +134,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) [5] = "Time stamping support", [6] = "VST (control vlan insertion/stripping) support", [7] = "FSM (MAC anti-spoofing) support", - [8] = "Dynamic QP updates support" + [8] = "Dynamic QP updates support", + [9] = "TCP/IP offloads/flow-steering for VXLAN support" }; int i; @@ -536,6 +537,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) #define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98 #define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0 #define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d +#define QUERY_DEV_CAP_VXLAN 0x9e dev_cap->flags2 = 0; mailbox = mlx4_alloc_cmd_mailbox(dev); @@ -701,6 +703,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC); if (field & 1<<6) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN; + MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN); + if (field & 1<<3) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS; MLX4_GET(dev_cap->max_icm_sz, outbox, QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET); if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS) @@ -849,6 +854,11 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, field &= 0x7f; MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); + /* For guests, disable vxlan tunneling */ + MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN); + field &= 0xf7; + MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN); + /* For guests, report Blueflame disabled */ MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET); field &= 0x7f; @@ -1274,6 +1284,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) #define INIT_HCA_IN_SIZE 0x200 #define INIT_HCA_VERSION_OFFSET 0x000 #define INIT_HCA_VERSION 2 +#define INIT_HCA_VXLAN_OFFSET 0x0c #define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e #define INIT_HCA_FLAGS_OFFSET 0x014 #define INIT_HCA_QPC_OFFSET 0x020 @@ -1432,6 +1443,12 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) MLX4_PUT(inbox, param->uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET); MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET); + /* set parser VXLAN attributes */ + if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) { + u8 parser_params = 0; + MLX4_PUT(inbox, parser_params, INIT_HCA_VXLAN_OFFSET); + } + err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000, MLX4_CMD_NATIVE); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index fbebfb0a6436..d2b8b3918b45 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1444,6 +1444,19 @@ static void choose_steering_mode(struct mlx4_dev *dev, mlx4_log_num_mgm_entry_size); } +static void choose_tunnel_offload_mode(struct mlx4_dev *dev, + struct mlx4_dev_cap *dev_cap) +{ + if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED && + dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) + dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN; + else + dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE; + + mlx4_dbg(dev, "Tunneling offload mode is: %s\n", (dev->caps.tunnel_offload_mode + == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) ? "vxlan" : "none"); +} + static int mlx4_init_hca(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -1484,6 +1497,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) } choose_steering_mode(dev, &dev_cap); + choose_tunnel_offload_mode(dev, &dev_cap); err = mlx4_get_phys_port_id(dev); if (err) diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index 7c83e6ccc9da..bfe65f7be91c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -697,7 +697,8 @@ const u16 __sw_id_hw[] = { [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003, [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002, [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004, - [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006 + [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006, + [MLX4_NET_TRANS_RULE_ID_VXLAN] = 0xE008 }; int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev, @@ -722,7 +723,9 @@ static const int __rule_hw_sz[] = { [MLX4_NET_TRANS_RULE_ID_TCP] = sizeof(struct mlx4_net_trans_rule_hw_tcp_udp), [MLX4_NET_TRANS_RULE_ID_UDP] = - sizeof(struct mlx4_net_trans_rule_hw_tcp_udp) + sizeof(struct mlx4_net_trans_rule_hw_tcp_udp), + [MLX4_NET_TRANS_RULE_ID_VXLAN] = + sizeof(struct mlx4_net_trans_rule_hw_vxlan) }; int mlx4_hw_rule_sz(struct mlx4_dev *dev, @@ -787,6 +790,13 @@ static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec, rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk; break; + case MLX4_NET_TRANS_RULE_ID_VXLAN: + rule_hw->vxlan.vni = + cpu_to_be32(be32_to_cpu(spec->vxlan.vni) << 8); + rule_hw->vxlan.vni_mask = + cpu_to_be32(be32_to_cpu(spec->vxlan.vni_mask) << 8); + break; + default: return -EINVAL; } diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 97d342fa5032..93f75ec78c82 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -800,6 +800,47 @@ int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw, } EXPORT_SYMBOL(mlx4_SET_PORT_SCHEDULER); +enum { + VXLAN_ENABLE_MODIFY = 1 << 7, + VXLAN_STEERING_MODIFY = 1 << 6, + + VXLAN_ENABLE = 1 << 7, +}; + +struct mlx4_set_port_vxlan_context { + u32 reserved1; + u8 modify_flags; + u8 reserved2; + u8 enable_flags; + u8 steering; +}; + +int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering) +{ + int err; + u32 in_mod; + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_set_port_vxlan_context *context; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + context = mailbox->buf; + memset(context, 0, sizeof(*context)); + + context->modify_flags = VXLAN_ENABLE_MODIFY | VXLAN_STEERING_MODIFY; + context->enable_flags = VXLAN_ENABLE; + context->steering = steering; + + in_mod = MLX4_SET_PORT_VXLAN << 8 | port; + err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); + + mlx4_free_cmd_mailbox(dev, mailbox); + return err; +} +EXPORT_SYMBOL(mlx4_SET_PORT_VXLAN); + int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index 8df61bc5da00..b0ec0fe035c0 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h @@ -180,6 +180,7 @@ enum { MLX4_SET_PORT_GID_TABLE = 0x5, MLX4_SET_PORT_PRIO2TC = 0x8, MLX4_SET_PORT_SCHEDULER = 0x9, + MLX4_SET_PORT_VXLAN = 0xB }; enum { diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h index 98fa492cf406..5dd0d70a0689 100644 --- a/include/linux/mlx4/cq.h +++ b/include/linux/mlx4/cq.h @@ -81,7 +81,12 @@ struct mlx4_ts_cqe { } __packed; enum { + MLX4_CQE_L2_TUNNEL_IPOK = 1 << 31, MLX4_CQE_VLAN_PRESENT_MASK = 1 << 29, + MLX4_CQE_L2_TUNNEL = 1 << 27, + MLX4_CQE_L2_TUNNEL_CSUM = 1 << 26, + MLX4_CQE_L2_TUNNEL_IPV4 = 1 << 25, + MLX4_CQE_QPN_MASK = 0xffffff, }; diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 294b7c58fe95..c99ecf6d2c67 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -118,6 +118,11 @@ static inline const char *mlx4_steering_mode_str(int steering_mode) } } +enum { + MLX4_TUNNEL_OFFLOAD_MODE_NONE, + MLX4_TUNNEL_OFFLOAD_MODE_VXLAN +}; + enum { MLX4_DEV_CAP_FLAG_RC = 1LL << 0, MLX4_DEV_CAP_FLAG_UC = 1LL << 1, @@ -160,7 +165,8 @@ enum { MLX4_DEV_CAP_FLAG2_TS = 1LL << 5, MLX4_DEV_CAP_FLAG2_VLAN_CONTROL = 1LL << 6, MLX4_DEV_CAP_FLAG2_FSM = 1LL << 7, - MLX4_DEV_CAP_FLAG2_UPDATE_QP = 1LL << 8 + MLX4_DEV_CAP_FLAG2_UPDATE_QP = 1LL << 8, + MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS = 1LL << 9 }; enum { @@ -455,6 +461,7 @@ struct mlx4_caps { u32 function_caps; /* VFs must be aware of these */ u16 hca_core_clock; u64 phys_port_id[MLX4_MAX_PORTS + 1]; + int tunnel_offload_mode; }; struct mlx4_buf_list { @@ -909,6 +916,7 @@ enum mlx4_net_trans_rule_id { MLX4_NET_TRANS_RULE_ID_IPV4, MLX4_NET_TRANS_RULE_ID_TCP, MLX4_NET_TRANS_RULE_ID_UDP, + MLX4_NET_TRANS_RULE_ID_VXLAN, MLX4_NET_TRANS_RULE_NUM, /* should be last */ }; @@ -966,6 +974,12 @@ struct mlx4_spec_ib { u8 dst_gid_msk[16]; }; +struct mlx4_spec_vxlan { + __be32 vni; + __be32 vni_mask; + +}; + struct mlx4_spec_list { struct list_head list; enum mlx4_net_trans_rule_id id; @@ -974,6 +988,7 @@ struct mlx4_spec_list { struct mlx4_spec_ib ib; struct mlx4_spec_ipv4 ipv4; struct mlx4_spec_tcp_udp tcp_udp; + struct mlx4_spec_vxlan vxlan; }; }; @@ -1060,6 +1075,15 @@ struct mlx4_net_trans_rule_hw_ipv4 { __be32 src_ip_msk; } __packed; +struct mlx4_net_trans_rule_hw_vxlan { + u8 size; + u8 rsvd; + __be16 id; + __be32 rsvd1; + __be32 vni; + __be32 vni_mask; +} __packed; + struct _rule_hw { union { struct { @@ -1071,9 +1095,19 @@ struct _rule_hw { struct mlx4_net_trans_rule_hw_ib ib; struct mlx4_net_trans_rule_hw_ipv4 ipv4; struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp; + struct mlx4_net_trans_rule_hw_vxlan vxlan; }; }; +enum { + VXLAN_STEER_BY_OUTER_MAC = 1 << 0, + VXLAN_STEER_BY_OUTER_VLAN = 1 << 1, + VXLAN_STEER_BY_VSID_VNI = 1 << 2, + VXLAN_STEER_BY_INNER_MAC = 1 << 3, + VXLAN_STEER_BY_INNER_VLAN = 1 << 4, +}; + + int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u32 qpn, enum mlx4_net_trans_promisc_mode mode); int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port, @@ -1096,6 +1130,7 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc); int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw, u8 *pg, u16 *ratelimit); +int mlx4_SET_PORT_VXLAN(struct mlx4_dev *dev, u8 port, u8 steering); int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan); diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 6d351473c292..59f8ba84568b 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -109,6 +109,10 @@ enum { MLX4_RSS_TCP_IPV4 = 1 << 4, MLX4_RSS_IPV4 = 1 << 5, + MLX4_RSS_BY_OUTER_HEADERS = 0 << 6, + MLX4_RSS_BY_INNER_HEADERS = 2 << 6, + MLX4_RSS_BY_INNER_HEADERS_IPONLY = 3 << 6, + /* offset of mlx4_rss_context within mlx4_qp_context.pri_path */ MLX4_RSS_OFFSET_IN_QPC_PRI_PATH = 0x24, /* offset of being RSS indirection QP within mlx4_qp_context.flags */ @@ -252,6 +256,8 @@ enum { /* param3 */ enum { MLX4_WQE_CTRL_NEC = 1 << 29, + MLX4_WQE_CTRL_IIP = 1 << 28, + MLX4_WQE_CTRL_ILP = 1 << 27, MLX4_WQE_CTRL_FENCE = 1 << 6, MLX4_WQE_CTRL_CQ_UPDATE = 3 << 2, MLX4_WQE_CTRL_SOLICITED = 1 << 1, -- cgit v1.2.3 From 4de6580360867d44adecb2d05febed1c8d186c82 Mon Sep 17 00:00:00 2001 From: Matan Barak Date: Thu, 7 Nov 2013 15:25:14 +0200 Subject: mlx4_core: Add support for steerable IB UD QPs This patch adds support for allocating IB UD QPs that we can steer traffic from. We introduce a new firmware command FLOW_STEERING_IB_UC_QP_RANGE and a capability bit. This command isn't supported for VFs. Signed-off-by: Matan Barak Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier --- drivers/net/ethernet/mellanox/mlx4/cmd.c | 9 +++++++++ drivers/net/ethernet/mellanox/mlx4/fw.c | 10 ++++++++++ drivers/net/ethernet/mellanox/mlx4/mcg.c | 17 +++++++++++++++++ drivers/net/ethernet/mellanox/mlx4/mlx4.h | 5 +++++ drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | 10 ++++++++++ include/linux/mlx4/cmd.h | 1 + include/linux/mlx4/device.h | 6 +++++- 7 files changed, 57 insertions(+), 1 deletion(-) (limited to 'include/linux/mlx4') diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 1e9970d2f0f3..0d02fba94536 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -1371,6 +1371,15 @@ static struct mlx4_cmd_info cmd_info[] = { .verify = NULL, .wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper }, + { + .opcode = MLX4_FLOW_STEERING_IB_UC_QP_RANGE, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper + }, }; static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave, diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 194928214606..4bd2d80d065e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -513,6 +513,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) #define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67 #define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68 #define QUERY_DEV_CAP_EXT_2_FLAGS_OFFSET 0x70 +#define QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET 0x74 #define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76 #define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77 #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 @@ -603,6 +604,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) if (field & 0x80) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN; dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f; + MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); + if (field & 0x80) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DMFS_IPOIB; MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET); dev_cap->fs_max_num_qp_per_entry = field; MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); @@ -860,6 +864,12 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); } + + /* turn off ipoib managed steering for guests */ + MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); + field &= ~0x80; + MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); + return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index acf9d5f1f922..34dffcf61bff 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -895,6 +895,23 @@ int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id) } EXPORT_SYMBOL_GPL(mlx4_flow_detach); +int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, + u32 max_range_qpn) +{ + int err; + u64 in_param; + + in_param = ((u64) min_range_qpn) << 32; + in_param |= ((u64) max_range_qpn) & 0xFFFFFFFF; + + err = mlx4_cmd(dev, in_param, 0, 0, + MLX4_FLOW_STEERING_IB_UC_QP_RANGE, + MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE); + + return err; +} +EXPORT_SYMBOL_GPL(mlx4_FLOW_STEERING_IB_UC_QP_RANGE); + int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], int block_mcast_loopback, enum mlx4_protocol prot, enum mlx4_steer_type steer) diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index e582a41a802b..59f67f9086dc 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -1236,6 +1236,11 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd); +int mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); int mlx4_get_mgm_entry_size(struct mlx4_dev *dev); int mlx4_get_qp_per_mgm(struct mlx4_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 2f3f2bc7f283..663510325c22 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -3844,6 +3844,16 @@ int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave, return err; } +int mlx4_FLOW_STEERING_IB_UC_QP_RANGE_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + return -EPERM; +} + + static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp) { struct res_gid *rgid; diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index 8df61bc5da00..ff36620f88a7 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h @@ -157,6 +157,7 @@ enum { /* register/delete flow steering network rules */ MLX4_QP_FLOW_STEERING_ATTACH = 0x65, MLX4_QP_FLOW_STEERING_DETACH = 0x66, + MLX4_FLOW_STEERING_IB_UC_QP_RANGE = 0x64, }; enum { diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 7d3a523160ba..7de9fde3a9dd 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -160,7 +160,8 @@ enum { MLX4_DEV_CAP_FLAG2_TS = 1LL << 5, MLX4_DEV_CAP_FLAG2_VLAN_CONTROL = 1LL << 6, MLX4_DEV_CAP_FLAG2_FSM = 1LL << 7, - MLX4_DEV_CAP_FLAG2_UPDATE_QP = 1LL << 8 + MLX4_DEV_CAP_FLAG2_UPDATE_QP = 1LL << 8, + MLX4_DEV_CAP_FLAG2_DMFS_IPOIB = 1LL << 9 }; enum { @@ -1144,6 +1145,9 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave, u8 port, int void mlx4_put_slave_node_guid(struct mlx4_dev *dev, int slave, __be64 guid); __be64 mlx4_get_slave_node_guid(struct mlx4_dev *dev, int slave); +int mlx4_FLOW_STEERING_IB_UC_QP_RANGE(struct mlx4_dev *dev, u32 min_range_qpn, + u32 max_range_qpn); + cycle_t mlx4_read_clock(struct mlx4_dev *dev); #endif /* MLX4_DEVICE_H */ -- cgit v1.2.3 From dd5f03beb4f76ae65d76d8c22a8815e424fc607c Mon Sep 17 00:00:00 2001 From: Matan Barak Date: Thu, 12 Dec 2013 18:03:11 +0200 Subject: IB/core: Ethernet L2 attributes in verbs/cm structures This patch add the support for Ethernet L2 attributes in the verbs/cm/cma structures. When dealing with L2 Ethernet, we should use smac, dmac, vlan ID and priority in a similar manner that the IB L2 (and the L4 PKEY) attributes are used. Thus, those attributes were added to the following structures: * ib_ah_attr - added dmac * ib_qp_attr - added smac and vlan_id, (sl remains vlan priority) * ib_wc - added smac, vlan_id * ib_sa_path_rec - added smac, dmac, vlan_id * cm_av - added smac and vlan_id For the path record structure, extra care was taken to avoid the new fields when packing it into wire format, so we don't break the IB CM and SA wire protocol. On the active side, the CM fills. its internal structures from the path provided by the ULP. We add there taking the ETH L2 attributes and placing them into the CM Address Handle (struct cm_av). On the passive side, the CM fills its internal structures from the WC associated with the REQ message. We add there taking the ETH L2 attributes from the WC. When the HW driver provides the required ETH L2 attributes in the WC, they set the IB_WC_WITH_SMAC and IB_WC_WITH_VLAN flags. The IB core code checks for the presence of these flags, and in their absence does address resolution from the ib_init_ah_from_wc() helper function. ib_modify_qp_is_ok is also updated to consider the link layer. Some parameters are mandatory for Ethernet link layer, while they are irrelevant for IB. Vendor drivers are modified to support the new function signature. Signed-off-by: Matan Barak Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier --- drivers/infiniband/core/addr.c | 97 ++++++++++++++++++++++++++++- drivers/infiniband/core/cm.c | 50 +++++++++++++++ drivers/infiniband/core/cma.c | 60 +++++++++++++++--- drivers/infiniband/core/sa_query.c | 12 +++- drivers/infiniband/core/verbs.c | 55 +++++++++++++++- drivers/infiniband/hw/ehca/ehca_qp.c | 2 +- drivers/infiniband/hw/ipath/ipath_qp.c | 2 +- drivers/infiniband/hw/mlx4/qp.c | 9 ++- drivers/infiniband/hw/mlx5/qp.c | 3 +- drivers/infiniband/hw/mthca/mthca_qp.c | 3 +- drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 3 +- drivers/infiniband/hw/qib/qib_qp.c | 2 +- include/linux/mlx4/device.h | 1 + include/rdma/ib_addr.h | 42 ++++++++++++- include/rdma/ib_cm.h | 1 + include/rdma/ib_pack.h | 1 + include/rdma/ib_sa.h | 3 + include/rdma/ib_verbs.h | 21 ++++++- 18 files changed, 343 insertions(+), 24 deletions(-) (limited to 'include/linux/mlx4') diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index e90f2b2eabd7..8172d37f9add 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -86,6 +86,8 @@ int rdma_addr_size(struct sockaddr *addr) } EXPORT_SYMBOL(rdma_addr_size); +static struct rdma_addr_client self; + void rdma_addr_register_client(struct rdma_addr_client *client) { atomic_set(&client->refcount, 1); @@ -119,7 +121,8 @@ int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev, } EXPORT_SYMBOL(rdma_copy_addr); -int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) +int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr, + u16 *vlan_id) { struct net_device *dev; int ret = -EADDRNOTAVAIL; @@ -142,6 +145,8 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) return ret; ret = rdma_copy_addr(dev_addr, dev, NULL); + if (vlan_id) + *vlan_id = rdma_vlan_dev_vlan_id(dev); dev_put(dev); break; @@ -153,6 +158,8 @@ int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) &((struct sockaddr_in6 *) addr)->sin6_addr, dev, 1)) { ret = rdma_copy_addr(dev_addr, dev, NULL); + if (vlan_id) + *vlan_id = rdma_vlan_dev_vlan_id(dev); break; } } @@ -238,7 +245,7 @@ static int addr4_resolve(struct sockaddr_in *src_in, src_in->sin_addr.s_addr = fl4.saddr; if (rt->dst.dev->flags & IFF_LOOPBACK) { - ret = rdma_translate_ip((struct sockaddr *) dst_in, addr); + ret = rdma_translate_ip((struct sockaddr *)dst_in, addr, NULL); if (!ret) memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN); goto put; @@ -286,7 +293,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, } if (dst->dev->flags & IFF_LOOPBACK) { - ret = rdma_translate_ip((struct sockaddr *) dst_in, addr); + ret = rdma_translate_ip((struct sockaddr *)dst_in, addr, NULL); if (!ret) memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN); goto put; @@ -437,6 +444,88 @@ void rdma_addr_cancel(struct rdma_dev_addr *addr) } EXPORT_SYMBOL(rdma_addr_cancel); +struct resolve_cb_context { + struct rdma_dev_addr *addr; + struct completion comp; +}; + +static void resolve_cb(int status, struct sockaddr *src_addr, + struct rdma_dev_addr *addr, void *context) +{ + memcpy(((struct resolve_cb_context *)context)->addr, addr, sizeof(struct + rdma_dev_addr)); + complete(&((struct resolve_cb_context *)context)->comp); +} + +int rdma_addr_find_dmac_by_grh(union ib_gid *sgid, union ib_gid *dgid, u8 *dmac, + u16 *vlan_id) +{ + int ret = 0; + struct rdma_dev_addr dev_addr; + struct resolve_cb_context ctx; + struct net_device *dev; + + union { + struct sockaddr _sockaddr; + struct sockaddr_in _sockaddr_in; + struct sockaddr_in6 _sockaddr_in6; + } sgid_addr, dgid_addr; + + + ret = rdma_gid2ip(&sgid_addr._sockaddr, sgid); + if (ret) + return ret; + + ret = rdma_gid2ip(&dgid_addr._sockaddr, dgid); + if (ret) + return ret; + + memset(&dev_addr, 0, sizeof(dev_addr)); + + ctx.addr = &dev_addr; + init_completion(&ctx.comp); + ret = rdma_resolve_ip(&self, &sgid_addr._sockaddr, &dgid_addr._sockaddr, + &dev_addr, 1000, resolve_cb, &ctx); + if (ret) + return ret; + + wait_for_completion(&ctx.comp); + + memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN); + dev = dev_get_by_index(&init_net, dev_addr.bound_dev_if); + if (!dev) + return -ENODEV; + if (vlan_id) + *vlan_id = rdma_vlan_dev_vlan_id(dev); + dev_put(dev); + return ret; +} +EXPORT_SYMBOL(rdma_addr_find_dmac_by_grh); + +int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id) +{ + int ret = 0; + struct rdma_dev_addr dev_addr; + union { + struct sockaddr _sockaddr; + struct sockaddr_in _sockaddr_in; + struct sockaddr_in6 _sockaddr_in6; + } gid_addr; + + ret = rdma_gid2ip(&gid_addr._sockaddr, sgid); + + if (ret) + return ret; + memset(&dev_addr, 0, sizeof(dev_addr)); + ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id); + if (ret) + return ret; + + memcpy(smac, dev_addr.src_dev_addr, ETH_ALEN); + return ret; +} +EXPORT_SYMBOL(rdma_addr_find_smac_by_sgid); + static int netevent_callback(struct notifier_block *self, unsigned long event, void *ctx) { @@ -461,11 +550,13 @@ static int __init addr_init(void) return -ENOMEM; register_netevent_notifier(&nb); + rdma_addr_register_client(&self); return 0; } static void __exit addr_cleanup(void) { + rdma_addr_unregister_client(&self); unregister_netevent_notifier(&nb); destroy_workqueue(addr_wq); } diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index f2ef7ef0f36f..c6d543c3ef52 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -47,6 +47,7 @@ #include #include #include +#include #include #include @@ -177,6 +178,8 @@ struct cm_av { struct ib_ah_attr ah_attr; u16 pkey_index; u8 timeout; + u8 valid; + u8 smac[ETH_ALEN]; }; struct cm_work { @@ -346,6 +349,23 @@ static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc, grh, &av->ah_attr); } +int ib_update_cm_av(struct ib_cm_id *id, const u8 *smac, const u8 *alt_smac) +{ + struct cm_id_private *cm_id_priv; + + cm_id_priv = container_of(id, struct cm_id_private, id); + + if (smac != NULL) + memcpy(cm_id_priv->av.smac, smac, sizeof(cm_id_priv->av.smac)); + + if (alt_smac != NULL) + memcpy(cm_id_priv->alt_av.smac, alt_smac, + sizeof(cm_id_priv->alt_av.smac)); + + return 0; +} +EXPORT_SYMBOL(ib_update_cm_av); + static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) { struct cm_device *cm_dev; @@ -376,6 +396,9 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path, &av->ah_attr); av->timeout = path->packet_life_time + 1; + memcpy(av->smac, path->smac, sizeof(av->smac)); + + av->valid = 1; return 0; } @@ -1554,6 +1577,9 @@ static int cm_req_handler(struct cm_work *work) cm_process_routed_req(req_msg, work->mad_recv_wc->wc); cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]); + + memcpy(work->path[0].dmac, cm_id_priv->av.ah_attr.dmac, ETH_ALEN); + work->path[0].vlan_id = cm_id_priv->av.ah_attr.vlan_id; ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); if (ret) { ib_get_cached_gid(work->port->cm_dev->ib_device, @@ -3500,6 +3526,30 @@ static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv, *qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN | IB_QP_RQ_PSN; qp_attr->ah_attr = cm_id_priv->av.ah_attr; + if (!cm_id_priv->av.valid) + return -EINVAL; + if (cm_id_priv->av.ah_attr.vlan_id != 0xffff) { + qp_attr->vlan_id = cm_id_priv->av.ah_attr.vlan_id; + *qp_attr_mask |= IB_QP_VID; + } + if (!is_zero_ether_addr(cm_id_priv->av.smac)) { + memcpy(qp_attr->smac, cm_id_priv->av.smac, + sizeof(qp_attr->smac)); + *qp_attr_mask |= IB_QP_SMAC; + } + if (cm_id_priv->alt_av.valid) { + if (cm_id_priv->alt_av.ah_attr.vlan_id != 0xffff) { + qp_attr->alt_vlan_id = + cm_id_priv->alt_av.ah_attr.vlan_id; + *qp_attr_mask |= IB_QP_ALT_VID; + } + if (!is_zero_ether_addr(cm_id_priv->alt_av.smac)) { + memcpy(qp_attr->alt_smac, + cm_id_priv->alt_av.smac, + sizeof(qp_attr->alt_smac)); + *qp_attr_mask |= IB_QP_ALT_SMAC; + } + } qp_attr->path_mtu = cm_id_priv->path_mtu; qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn); qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn); diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 8e49db690f33..0caf465f317c 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -340,7 +340,7 @@ static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_a int ret; if (addr->sa_family != AF_IB) { - ret = rdma_translate_ip(addr, dev_addr); + ret = rdma_translate_ip(addr, dev_addr, NULL); } else { cma_translate_ib((struct sockaddr_ib *) addr, dev_addr); ret = 0; @@ -603,6 +603,7 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, { struct ib_qp_attr qp_attr; int qp_attr_mask, ret; + union ib_gid sgid; mutex_lock(&id_priv->qp_mutex); if (!id_priv->id.qp) { @@ -625,6 +626,20 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, if (ret) goto out; + ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num, + qp_attr.ah_attr.grh.sgid_index, &sgid); + if (ret) + goto out; + + if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) + == RDMA_TRANSPORT_IB && + rdma_port_get_link_layer(id_priv->id.device, id_priv->id.port_num) + == IB_LINK_LAYER_ETHERNET) { + ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr.smac, NULL); + + if (ret) + goto out; + } if (conn_param) qp_attr.max_dest_rd_atomic = conn_param->responder_resources; ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); @@ -725,6 +740,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, else ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, qp_attr_mask); + if (qp_attr->qp_state == IB_QPS_RTR) qp_attr->rq_psn = id_priv->seq_num; break; @@ -1266,6 +1282,15 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) struct rdma_id_private *listen_id, *conn_id; struct rdma_cm_event event; int offset, ret; + u8 smac[ETH_ALEN]; + u8 alt_smac[ETH_ALEN]; + u8 *psmac = smac; + u8 *palt_smac = alt_smac; + int is_iboe = ((rdma_node_get_transport(cm_id->device->node_type) == + RDMA_TRANSPORT_IB) && + (rdma_port_get_link_layer(cm_id->device, + ib_event->param.req_rcvd.port) == + IB_LINK_LAYER_ETHERNET)); listen_id = cm_id->context; if (!cma_check_req_qp_type(&listen_id->id, ib_event)) @@ -1310,12 +1335,29 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) if (ret) goto err3; + if (is_iboe) { + if (ib_event->param.req_rcvd.primary_path != NULL) + rdma_addr_find_smac_by_sgid( + &ib_event->param.req_rcvd.primary_path->sgid, + psmac, NULL); + else + psmac = NULL; + if (ib_event->param.req_rcvd.alternate_path != NULL) + rdma_addr_find_smac_by_sgid( + &ib_event->param.req_rcvd.alternate_path->sgid, + palt_smac, NULL); + else + palt_smac = NULL; + } /* * Acquire mutex to prevent user executing rdma_destroy_id() * while we're accessing the cm_id. */ mutex_lock(&lock); - if (cma_comp(conn_id, RDMA_CM_CONNECT) && (conn_id->id.qp_type != IB_QPT_UD)) + if (is_iboe) + ib_update_cm_av(cm_id, psmac, palt_smac); + if (cma_comp(conn_id, RDMA_CM_CONNECT) && + (conn_id->id.qp_type != IB_QPT_UD)) ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); mutex_unlock(&lock); mutex_unlock(&conn_id->handler_mutex); @@ -1474,7 +1516,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); conn_id->state = RDMA_CM_CONNECT; - ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr); + ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr, NULL); if (ret) { mutex_unlock(&conn_id->handler_mutex); rdma_destroy_id(new_cm_id); @@ -1873,7 +1915,7 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) struct cma_work *work; int ret; struct net_device *ndev = NULL; - u16 vid; + work = kzalloc(sizeof *work, GFP_KERNEL); if (!work) @@ -1897,10 +1939,14 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) goto err2; } - vid = rdma_vlan_dev_vlan_id(ndev); + route->path_rec->vlan_id = rdma_vlan_dev_vlan_id(ndev); + memcpy(route->path_rec->dmac, addr->dev_addr.dst_dev_addr, ETH_ALEN); + memcpy(route->path_rec->smac, ndev->dev_addr, ndev->addr_len); - iboe_mac_vlan_to_ll(&route->path_rec->sgid, addr->dev_addr.src_dev_addr, vid); - iboe_mac_vlan_to_ll(&route->path_rec->dgid, addr->dev_addr.dst_dev_addr, vid); + iboe_mac_vlan_to_ll(&route->path_rec->sgid, addr->dev_addr.src_dev_addr, + route->path_rec->vlan_id); + iboe_mac_vlan_to_ll(&route->path_rec->dgid, addr->dev_addr.dst_dev_addr, + route->path_rec->vlan_id); route->path_rec->hop_limit = 1; route->path_rec->reversible = 1; diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 9838ca484389..f820958e4047 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -42,7 +42,7 @@ #include #include #include - +#include #include #include #include "sa.h" @@ -556,6 +556,13 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num, ah_attr->grh.hop_limit = rec->hop_limit; ah_attr->grh.traffic_class = rec->traffic_class; } + if (force_grh) { + memcpy(ah_attr->dmac, rec->dmac, ETH_ALEN); + ah_attr->vlan_id = rec->vlan_id; + } else { + ah_attr->vlan_id = 0xffff; + } + return 0; } EXPORT_SYMBOL(ib_init_ah_from_path); @@ -670,6 +677,9 @@ static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query, ib_unpack(path_rec_table, ARRAY_SIZE(path_rec_table), mad->data, &rec); + rec.vlan_id = 0xffff; + memset(rec.dmac, 0, ETH_ALEN); + memset(rec.smac, 0, ETH_ALEN); query->callback(status, &rec, query->context); } else query->callback(status, NULL, query->context); diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index d4f6ddf72ffa..7978394738a3 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -44,6 +44,7 @@ #include #include +#include int ib_rate_to_mult(enum ib_rate rate) { @@ -192,8 +193,28 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc, u32 flow_class; u16 gid_index; int ret; + int is_eth = (rdma_port_get_link_layer(device, port_num) == + IB_LINK_LAYER_ETHERNET); memset(ah_attr, 0, sizeof *ah_attr); + if (is_eth) { + if (!(wc->wc_flags & IB_WC_GRH)) + return -EPROTOTYPE; + + if (wc->wc_flags & IB_WC_WITH_SMAC && + wc->wc_flags & IB_WC_WITH_VLAN) { + memcpy(ah_attr->dmac, wc->smac, ETH_ALEN); + ah_attr->vlan_id = wc->vlan_id; + } else { + ret = rdma_addr_find_dmac_by_grh(&grh->dgid, &grh->sgid, + ah_attr->dmac, &ah_attr->vlan_id); + if (ret) + return ret; + } + } else { + ah_attr->vlan_id = 0xffff; + } + ah_attr->dlid = wc->slid; ah_attr->sl = wc->sl; ah_attr->src_path_bits = wc->dlid_path_bits; @@ -476,7 +497,9 @@ EXPORT_SYMBOL(ib_create_qp); static const struct { int valid; enum ib_qp_attr_mask req_param[IB_QPT_MAX]; + enum ib_qp_attr_mask req_param_add_eth[IB_QPT_MAX]; enum ib_qp_attr_mask opt_param[IB_QPT_MAX]; + enum ib_qp_attr_mask opt_param_add_eth[IB_QPT_MAX]; } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { [IB_QPS_RESET] = { [IB_QPS_RESET] = { .valid = 1 }, @@ -557,6 +580,12 @@ static const struct { IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER), }, + .req_param_add_eth = { + [IB_QPT_RC] = (IB_QP_SMAC), + [IB_QPT_UC] = (IB_QP_SMAC), + [IB_QPT_XRC_INI] = (IB_QP_SMAC), + [IB_QPT_XRC_TGT] = (IB_QP_SMAC) + }, .opt_param = { [IB_QPT_UD] = (IB_QP_PKEY_INDEX | IB_QP_QKEY), @@ -576,7 +605,21 @@ static const struct { IB_QP_QKEY), [IB_QPT_GSI] = (IB_QP_PKEY_INDEX | IB_QP_QKEY), - } + }, + .opt_param_add_eth = { + [IB_QPT_RC] = (IB_QP_ALT_SMAC | + IB_QP_VID | + IB_QP_ALT_VID), + [IB_QPT_UC] = (IB_QP_ALT_SMAC | + IB_QP_VID | + IB_QP_ALT_VID), + [IB_QPT_XRC_INI] = (IB_QP_ALT_SMAC | + IB_QP_VID | + IB_QP_ALT_VID), + [IB_QPT_XRC_TGT] = (IB_QP_ALT_SMAC | + IB_QP_VID | + IB_QP_ALT_VID) + } } }, [IB_QPS_RTR] = { @@ -779,7 +822,8 @@ static const struct { }; int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, - enum ib_qp_type type, enum ib_qp_attr_mask mask) + enum ib_qp_type type, enum ib_qp_attr_mask mask, + enum rdma_link_layer ll) { enum ib_qp_attr_mask req_param, opt_param; @@ -798,6 +842,13 @@ int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, req_param = qp_state_table[cur_state][next_state].req_param[type]; opt_param = qp_state_table[cur_state][next_state].opt_param[type]; + if (ll == IB_LINK_LAYER_ETHERNET) { + req_param |= qp_state_table[cur_state][next_state]. + req_param_add_eth[type]; + opt_param |= qp_state_table[cur_state][next_state]. + opt_param_add_eth[type]; + } + if ((mask & req_param) != req_param) return 0; diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index 00d6861a6a18..2e89356c46fa 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c @@ -1329,7 +1329,7 @@ static int internal_modify_qp(struct ib_qp *ibqp, qp_new_state = attr_mask & IB_QP_STATE ? attr->qp_state : qp_cur_state; if (!smi_reset2init && !ib_modify_qp_is_ok(qp_cur_state, qp_new_state, ibqp->qp_type, - attr_mask)) { + attr_mask, IB_LINK_LAYER_UNSPECIFIED)) { ret = -EINVAL; ehca_err(ibqp->device, "Invalid qp transition new_state=%x cur_state=%x " diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c index 0857a9c3cd3d..face87602dc1 100644 --- a/drivers/infiniband/hw/ipath/ipath_qp.c +++ b/drivers/infiniband/hw/ipath/ipath_qp.c @@ -463,7 +463,7 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, - attr_mask)) + attr_mask, IB_LINK_LAYER_UNSPECIFIED)) goto inval; if (attr_mask & IB_QP_AV) { diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 4f10af2905b5..da6f5fa0c328 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -1561,13 +1561,18 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, struct mlx4_ib_qp *qp = to_mqp(ibqp); enum ib_qp_state cur_state, new_state; int err = -EINVAL; - + int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; mutex_lock(&qp->mutex); cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; - if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) { + if (cur_state == new_state && cur_state == IB_QPS_RESET) + p = IB_LINK_LAYER_UNSPECIFIED; + + if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, + attr_mask, + rdma_port_get_link_layer(&dev->ib_dev, p))) { pr_debug("qpn 0x%x: invalid attribute mask specified " "for transition %d to %d. qp_type %d," " attr_mask 0x%x\n", diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 7c6b4ba49bec..ca2936233713 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1616,7 +1616,8 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR && - !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) + !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask, + IB_LINK_LAYER_UNSPECIFIED)) goto out; if ((attr_mask & IB_QP_PORT) && diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 26a684536109..e354b2f04ad9 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -860,7 +860,8 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; - if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) { + if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask, + IB_LINK_LAYER_UNSPECIFIED)) { mthca_dbg(dev, "Bad QP transition (transport %d) " "%d->%d with attr 0x%08x\n", qp->transport, cur_state, new_state, diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 7686dceadd29..a0f1c47c333f 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -1326,7 +1326,8 @@ int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, new_qps = old_qps; spin_unlock_irqrestore(&qp->q_lock, flags); - if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) { + if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask, + IB_LINK_LAYER_UNSPECIFIED)) { pr_err("%s(%d) invalid attribute mask=0x%x specified for\n" "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n", __func__, dev->id, attr_mask, qp->id, ibqp->qp_type, diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 3cca55b51e54..0cad0c40d742 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c @@ -585,7 +585,7 @@ int qib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, - attr_mask)) + attr_mask, IB_LINK_LAYER_UNSPECIFIED)) goto inval; if (attr_mask & IB_QP_AV) { diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 7d3a523160ba..9347a78f7872 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -1095,6 +1095,7 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, int mlx4_SET_PORT_PRIO2TC(struct mlx4_dev *dev, u8 port, u8 *prio2tc); int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw, u8 *pg, u16 *ratelimit); +int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx); int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx); int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index); void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan); diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h index f3ac0f2c4c66..a0715606ebb2 100644 --- a/include/rdma/ib_addr.h +++ b/include/rdma/ib_addr.h @@ -42,6 +42,7 @@ #include #include #include +#include struct rdma_addr_client { atomic_t refcount; @@ -72,7 +73,8 @@ struct rdma_dev_addr { * rdma_translate_ip - Translate a local IP address to an RDMA hardware * address. */ -int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr); +int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr, + u16 *vlan_id); /** * rdma_resolve_ip - Resolve source and destination IP addresses to @@ -104,6 +106,10 @@ int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev, int rdma_addr_size(struct sockaddr *addr); +int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id); +int rdma_addr_find_dmac_by_grh(union ib_gid *sgid, union ib_gid *dgid, u8 *smac, + u16 *vlan_id); + static inline u16 ib_addr_get_pkey(struct rdma_dev_addr *dev_addr) { return ((u16)dev_addr->broadcast[8] << 8) | (u16)dev_addr->broadcast[9]; @@ -142,6 +148,40 @@ static inline void iboe_mac_vlan_to_ll(union ib_gid *gid, u8 *mac, u16 vid) gid->raw[8] ^= 2; } +static inline int rdma_ip2gid(struct sockaddr *addr, union ib_gid *gid) +{ + switch (addr->sa_family) { + case AF_INET: + ipv6_addr_set_v4mapped(((struct sockaddr_in *) + addr)->sin_addr.s_addr, + (struct in6_addr *)gid); + break; + case AF_INET6: + memcpy(gid->raw, &((struct sockaddr_in6 *)addr)->sin6_addr, 16); + break; + default: + return -EINVAL; + } + return 0; +} + +/* Important - sockaddr should be a union of sockaddr_in and sockaddr_in6 */ +static inline int rdma_gid2ip(struct sockaddr *out, union ib_gid *gid) +{ + if (ipv6_addr_v4mapped((struct in6_addr *)gid)) { + struct sockaddr_in *out_in = (struct sockaddr_in *)out; + memset(out_in, 0, sizeof(*out_in)); + out_in->sin_family = AF_INET; + memcpy(&out_in->sin_addr.s_addr, gid->raw + 12, 4); + } else { + struct sockaddr_in6 *out_in = (struct sockaddr_in6 *)out; + memset(out_in, 0, sizeof(*out_in)); + out_in->sin6_family = AF_INET6; + memcpy(&out_in->sin6_addr.s6_addr, gid->raw, 16); + } + return 0; +} + static inline u16 rdma_vlan_dev_vlan_id(const struct net_device *dev) { return dev->priv_flags & IFF_802_1Q_VLAN ? diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h index 0e3ff30647d5..f29e3a27c2cc 100644 --- a/include/rdma/ib_cm.h +++ b/include/rdma/ib_cm.h @@ -601,4 +601,5 @@ struct ib_cm_sidr_rep_param { int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, struct ib_cm_sidr_rep_param *param); +int ib_update_cm_av(struct ib_cm_id *id, const u8 *smac, const u8 *alt_smac); #endif /* IB_CM_H */ diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h index b37fe3b10a9d..b1f7592e02e4 100644 --- a/include/rdma/ib_pack.h +++ b/include/rdma/ib_pack.h @@ -34,6 +34,7 @@ #define IB_PACK_H #include +#include enum { IB_LRH_BYTES = 8, diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h index 125f8714301d..7e071a6abb34 100644 --- a/include/rdma/ib_sa.h +++ b/include/rdma/ib_sa.h @@ -154,6 +154,9 @@ struct ib_sa_path_rec { u8 packet_life_time_selector; u8 packet_life_time; u8 preference; + u8 smac[ETH_ALEN]; + u8 dmac[ETH_ALEN]; + u16 vlan_id; }; #define IB_SA_MCMEMBER_REC_MGID IB_SA_COMP_MASK( 0) diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 61e1935c91b1..ea0f6eed7863 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -48,6 +48,7 @@ #include #include #include +#include #include #include @@ -472,6 +473,8 @@ struct ib_ah_attr { u8 static_rate; u8 ah_flags; u8 port_num; + u8 dmac[ETH_ALEN]; + u16 vlan_id; }; enum ib_wc_status { @@ -524,6 +527,8 @@ enum ib_wc_flags { IB_WC_WITH_IMM = (1<<1), IB_WC_WITH_INVALIDATE = (1<<2), IB_WC_IP_CSUM_OK = (1<<3), + IB_WC_WITH_SMAC = (1<<4), + IB_WC_WITH_VLAN = (1<<5), }; struct ib_wc { @@ -544,6 +549,8 @@ struct ib_wc { u8 sl; u8 dlid_path_bits; u8 port_num; /* valid only for DR SMPs on switches */ + u8 smac[ETH_ALEN]; + u16 vlan_id; }; enum ib_cq_notify_flags { @@ -721,7 +728,11 @@ enum ib_qp_attr_mask { IB_QP_MAX_DEST_RD_ATOMIC = (1<<17), IB_QP_PATH_MIG_STATE = (1<<18), IB_QP_CAP = (1<<19), - IB_QP_DEST_QPN = (1<<20) + IB_QP_DEST_QPN = (1<<20), + IB_QP_SMAC = (1<<21), + IB_QP_ALT_SMAC = (1<<22), + IB_QP_VID = (1<<23), + IB_QP_ALT_VID = (1<<24), }; enum ib_qp_state { @@ -771,6 +782,10 @@ struct ib_qp_attr { u8 rnr_retry; u8 alt_port_num; u8 alt_timeout; + u8 smac[ETH_ALEN]; + u8 alt_smac[ETH_ALEN]; + u16 vlan_id; + u16 alt_vlan_id; }; enum ib_wr_opcode { @@ -1488,6 +1503,7 @@ static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len * @next_state: Next QP state * @type: QP type * @mask: Mask of supplied QP attributes + * @ll : link layer of port * * This function is a helper function that a low-level driver's * modify_qp method can use to validate the consumer's input. It @@ -1496,7 +1512,8 @@ static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len * and that the attribute mask supplied is allowed for the transition. */ int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state, - enum ib_qp_type type, enum ib_qp_attr_mask mask); + enum ib_qp_type type, enum ib_qp_attr_mask mask, + enum rdma_link_layer ll); int ib_register_event_handler (struct ib_event_handler *event_handler); int ib_unregister_event_handler(struct ib_event_handler *event_handler); -- cgit v1.2.3 From 297e0dad720664dad44baa2cdd13f871979fb58c Mon Sep 17 00:00:00 2001 From: Moni Shoua Date: Thu, 12 Dec 2013 18:03:14 +0200 Subject: IB/mlx4: Handle Ethernet L2 parameters for IP based GID addressing IP based RoCE gids don't store Ethernet L2 parameters, MAC and VLAN. Therefore, we need to extract them from the CQE and place them in struct ib_wc (to be used for cases were they were taken from the gid). Also, when modifying a QP or building address handle, instead of parsing the dgid to get the MAC and VLAN, take them from the address handle attributes. Signed-off-by: Moni Shoua Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier --- drivers/infiniband/hw/mlx4/ah.c | 40 +++--------- drivers/infiniband/hw/mlx4/cq.c | 9 +++ drivers/infiniband/hw/mlx4/mlx4_ib.h | 3 - drivers/infiniband/hw/mlx4/qp.c | 105 +++++++++++++++++++++++------- drivers/net/ethernet/mellanox/mlx4/port.c | 20 ++++++ include/linux/mlx4/cq.h | 15 +++-- 6 files changed, 130 insertions(+), 62 deletions(-) (limited to 'include/linux/mlx4') diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c index a251becdaa98..170dca608042 100644 --- a/drivers/infiniband/hw/mlx4/ah.c +++ b/drivers/infiniband/hw/mlx4/ah.c @@ -39,25 +39,6 @@ #include "mlx4_ib.h" -int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr, - u8 *mac, int *is_mcast, u8 port) -{ - struct in6_addr in6; - - *is_mcast = 0; - - memcpy(&in6, ah_attr->grh.dgid.raw, sizeof in6); - if (rdma_link_local_addr(&in6)) - rdma_get_ll_mac(&in6, mac); - else if (rdma_is_multicast_addr(&in6)) { - rdma_get_mcast_mac(&in6, mac); - *is_mcast = 1; - } else - return -EINVAL; - - return 0; -} - static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, struct mlx4_ib_ah *ah) { @@ -92,21 +73,18 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr { struct mlx4_ib_dev *ibdev = to_mdev(pd->device); struct mlx4_dev *dev = ibdev->dev; - union ib_gid sgid; - u8 mac[6]; - int err; int is_mcast; + struct in6_addr in6; u16 vlan_tag; - err = mlx4_ib_resolve_grh(ibdev, ah_attr, mac, &is_mcast, ah_attr->port_num); - if (err) - return ERR_PTR(err); - - memcpy(ah->av.eth.mac, mac, 6); - err = ib_get_cached_gid(pd->device, ah_attr->port_num, ah_attr->grh.sgid_index, &sgid); - if (err) - return ERR_PTR(err); - vlan_tag = rdma_get_vlan_id(&sgid); + memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6)); + if (rdma_is_multicast_addr(&in6)) { + is_mcast = 1; + rdma_get_mcast_mac(&in6, ah->av.eth.mac); + } else { + memcpy(ah->av.eth.mac, ah_attr->dmac, ETH_ALEN); + } + vlan_tag = ah_attr->vlan_id; if (vlan_tag < 0x1000) vlan_tag |= (ah_attr->sl & 7) << 13; ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 66dbf8062374..cc40f08ca8f1 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -798,6 +798,15 @@ repoll: wc->sl = be16_to_cpu(cqe->sl_vid) >> 13; else wc->sl = be16_to_cpu(cqe->sl_vid) >> 12; + if (be32_to_cpu(cqe->vlan_my_qpn) & MLX4_CQE_VLAN_PRESENT_MASK) { + wc->vlan_id = be16_to_cpu(cqe->sl_vid) & + MLX4_CQE_VID_MASK; + } else { + wc->vlan_id = 0xffff; + } + wc->wc_flags |= IB_WC_WITH_VLAN; + memcpy(wc->smac, cqe->smac, ETH_ALEN); + wc->wc_flags |= IB_WC_WITH_SMAC; } return 0; diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 133f41f42194..c06f571619df 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -678,9 +678,6 @@ int __mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid, int netw_view); -int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr, - u8 *mac, int *is_mcast, u8 port); - static inline bool mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah) { u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3; diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index da6f5fa0c328..e0c2186529ff 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -90,6 +90,21 @@ enum { MLX4_RAW_QP_MSGMAX = 31, }; +#ifndef ETH_ALEN +#define ETH_ALEN 6 +#endif +static inline u64 mlx4_mac_to_u64(u8 *addr) +{ + u64 mac = 0; + int i; + + for (i = 0; i < ETH_ALEN; i++) { + mac <<= 8; + mac |= addr[i]; + } + return mac; +} + static const __be32 mlx4_ib_opcode[] = { [IB_WR_SEND] = cpu_to_be32(MLX4_OPCODE_SEND), [IB_WR_LSO] = cpu_to_be32(MLX4_OPCODE_LSO), @@ -1144,16 +1159,15 @@ static void mlx4_set_sched(struct mlx4_qp_path *path, u8 port) path->sched_queue = (path->sched_queue & 0xbf) | ((port - 1) << 6); } -static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah, - struct mlx4_qp_path *path, u8 port) +static int _mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah, + u64 smac, u16 vlan_tag, struct mlx4_qp_path *path, + u8 port) { - int err; int is_eth = rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_ETHERNET; - u8 mac[6]; - int is_mcast; - u16 vlan_tag; int vidx; + int smac_index; + path->grh_mylmc = ah->src_path_bits & 0x7f; path->rlid = cpu_to_be16(ah->dlid); @@ -1188,22 +1202,27 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah, if (!(ah->ah_flags & IB_AH_GRH)) return -1; - err = mlx4_ib_resolve_grh(dev, ah, mac, &is_mcast, port); - if (err) - return err; - - memcpy(path->dmac, mac, 6); + memcpy(path->dmac, ah->dmac, ETH_ALEN); path->ackto = MLX4_IB_LINK_TYPE_ETH; - /* use index 0 into MAC table for IBoE */ - path->grh_mylmc &= 0x80; + /* find the index into MAC table for IBoE */ + if (!is_zero_ether_addr((const u8 *)&smac)) { + if (mlx4_find_cached_mac(dev->dev, port, smac, + &smac_index)) + return -ENOENT; + } else { + smac_index = 0; + } + + path->grh_mylmc &= 0x80 | smac_index; - vlan_tag = rdma_get_vlan_id(&dev->iboe.gid_table[port - 1][ah->grh.sgid_index]); + path->feup |= MLX4_FEUP_FORCE_ETH_UP; if (vlan_tag < 0x1000) { if (mlx4_find_cached_vlan(dev->dev, port, vlan_tag, &vidx)) return -ENOENT; path->vlan_index = vidx; path->fl = 1 << 6; + path->feup |= MLX4_FVL_FORCE_ETH_VLAN; } } else path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE | @@ -1212,6 +1231,28 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah, return 0; } +static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp, + enum ib_qp_attr_mask qp_attr_mask, + struct mlx4_qp_path *path, u8 port) +{ + return _mlx4_set_path(dev, &qp->ah_attr, + mlx4_mac_to_u64((u8 *)qp->smac), + (qp_attr_mask & IB_QP_VID) ? qp->vlan_id : 0xffff, + path, port); +} + +static int mlx4_set_alt_path(struct mlx4_ib_dev *dev, + const struct ib_qp_attr *qp, + enum ib_qp_attr_mask qp_attr_mask, + struct mlx4_qp_path *path, u8 port) +{ + return _mlx4_set_path(dev, &qp->alt_ah_attr, + mlx4_mac_to_u64((u8 *)qp->alt_smac), + (qp_attr_mask & IB_QP_ALT_VID) ? + qp->alt_vlan_id : 0xffff, + path, port); +} + static void update_mcg_macs(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) { struct mlx4_ib_gid_entry *ge, *tmp; @@ -1329,7 +1370,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, } if (attr_mask & IB_QP_AV) { - if (mlx4_set_path(dev, &attr->ah_attr, &context->pri_path, + if (mlx4_set_path(dev, attr, attr_mask, &context->pri_path, attr_mask & IB_QP_PORT ? attr->port_num : qp->port)) goto out; @@ -1352,8 +1393,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, dev->dev->caps.pkey_table_len[attr->alt_port_num]) goto out; - if (mlx4_set_path(dev, &attr->alt_ah_attr, &context->alt_path, - attr->alt_port_num)) + if (mlx4_set_alt_path(dev, attr, attr_mask, &context->alt_path, + attr->alt_port_num)) goto out; context->alt_path.pkey_index = attr->alt_pkey_index; @@ -1464,6 +1505,17 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, context->pri_path.ackto = (context->pri_path.ackto & 0xf8) | MLX4_IB_LINK_TYPE_ETH; + if (ibqp->qp_type == IB_QPT_UD && (new_state == IB_QPS_RTR)) { + int is_eth = rdma_port_get_link_layer( + &dev->ib_dev, qp->port) == + IB_LINK_LAYER_ETHERNET; + if (is_eth) { + context->pri_path.ackto = MLX4_IB_LINK_TYPE_ETH; + optpar |= MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH; + } + } + + if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify) sqd_event = 1; @@ -1561,18 +1613,21 @@ int mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, struct mlx4_ib_qp *qp = to_mqp(ibqp); enum ib_qp_state cur_state, new_state; int err = -EINVAL; - int p = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; + int ll; mutex_lock(&qp->mutex); cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state; new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state; - if (cur_state == new_state && cur_state == IB_QPS_RESET) - p = IB_LINK_LAYER_UNSPECIFIED; + if (cur_state == new_state && cur_state == IB_QPS_RESET) { + ll = IB_LINK_LAYER_UNSPECIFIED; + } else { + int port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port; + ll = rdma_port_get_link_layer(&dev->ib_dev, port); + } if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, - attr_mask, - rdma_port_get_link_layer(&dev->ib_dev, p))) { + attr_mask, ll)) { pr_debug("qpn 0x%x: invalid attribute mask specified " "for transition %d to %d. qp_type %d," " attr_mask 0x%x\n", @@ -1789,8 +1844,10 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, return err; } - vlan = rdma_get_vlan_id(&sgid); - is_vlan = vlan < 0x1000; + if (ah->av.eth.vlan != 0xffff) { + vlan = be16_to_cpu(ah->av.eth.vlan) & 0x0fff; + is_vlan = 1; + } } ib_ud_header_init(send_size, !is_eth, is_eth, is_vlan, is_grh, 0, &sqp->ud_header); diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 97d342fa5032..f50ef6a5ee5e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -123,6 +123,26 @@ static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port, return err; } +int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx) +{ + struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; + struct mlx4_mac_table *table = &info->mac_table; + int i; + + for (i = 0; i < MLX4_MAX_MAC_NUM; i++) { + if (!table->refs[i]) + continue; + + if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) { + *idx = i; + return 0; + } + } + + return -ENOENT; +} +EXPORT_SYMBOL_GPL(mlx4_find_cached_mac); + int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac) { struct mlx4_port_info *info = &mlx4_priv(dev)->port[port]; diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h index 98fa492cf406..e1862997f933 100644 --- a/include/linux/mlx4/cq.h +++ b/include/linux/mlx4/cq.h @@ -34,6 +34,7 @@ #define MLX4_CQ_H #include +#include #include #include @@ -43,10 +44,15 @@ struct mlx4_cqe { __be32 immed_rss_invalid; __be32 g_mlpath_rqpn; __be16 sl_vid; - __be16 rlid; - __be16 status; - u8 ipv6_ext_mask; - u8 badfcs_enc; + union { + struct { + __be16 rlid; + __be16 status; + u8 ipv6_ext_mask; + u8 badfcs_enc; + }; + u8 smac[ETH_ALEN]; + }; __be32 byte_cnt; __be16 wqe_index; __be16 checksum; @@ -83,6 +89,7 @@ struct mlx4_ts_cqe { enum { MLX4_CQE_VLAN_PRESENT_MASK = 1 << 29, MLX4_CQE_QPN_MASK = 0xffffff, + MLX4_CQE_VID_MASK = 0xfff, }; enum { -- cgit v1.2.3