diff options
Diffstat (limited to 'drivers/net/ethernet/microsoft')
| -rw-r--r-- | drivers/net/ethernet/microsoft/mana/gdma_main.c | 183 | ||||
| -rw-r--r-- | drivers/net/ethernet/microsoft/mana/hw_channel.c | 12 | ||||
| -rw-r--r-- | drivers/net/ethernet/microsoft/mana/mana_en.c | 204 | ||||
| -rw-r--r-- | drivers/net/ethernet/microsoft/mana/mana_ethtool.c | 87 |
4 files changed, 378 insertions, 108 deletions
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index 43f034e180c4..efb4e412ec7e 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -15,6 +15,20 @@ struct dentry *mana_debugfs_root; +struct mana_dev_recovery { + struct list_head list; + struct pci_dev *pdev; + enum gdma_eqe_type type; +}; + +static struct mana_dev_recovery_work { + struct list_head dev_list; + struct delayed_work work; + + /* Lock for dev_list above */ + spinlock_t lock; +} mana_dev_recovery_work; + static u32 mana_gd_r32(struct gdma_context *g, u64 offset) { return readl(g->bar0_va + offset); @@ -387,6 +401,25 @@ EXPORT_SYMBOL_NS(mana_gd_ring_cq, "NET_MANA"); #define MANA_SERVICE_PERIOD 10 +static void mana_serv_rescan(struct pci_dev *pdev) +{ + struct pci_bus *parent; + + pci_lock_rescan_remove(); + + parent = pdev->bus; + if (!parent) { + dev_err(&pdev->dev, "MANA service: no parent bus\n"); + goto out; + } + + pci_stop_and_remove_bus_device(pdev); + pci_rescan_bus(parent); + +out: + pci_unlock_rescan_remove(); +} + static void mana_serv_fpga(struct pci_dev *pdev) { struct pci_bus *bus, *parent; @@ -419,9 +452,12 @@ static void mana_serv_reset(struct pci_dev *pdev) { struct gdma_context *gc = pci_get_drvdata(pdev); struct hw_channel_context *hwc; + int ret; if (!gc) { - dev_err(&pdev->dev, "MANA service: no GC\n"); + /* Perform PCI rescan on device if GC is not set up */ + dev_err(&pdev->dev, "MANA service: GC not setup, rescanning\n"); + mana_serv_rescan(pdev); return; } @@ -440,9 +476,18 @@ static void mana_serv_reset(struct pci_dev *pdev) msleep(MANA_SERVICE_PERIOD * 1000); - mana_gd_resume(pdev); + ret = mana_gd_resume(pdev); + if (ret == -ETIMEDOUT || ret == -EPROTO) { + /* Perform PCI rescan on device if we failed on HWC */ + dev_err(&pdev->dev, "MANA service: resume failed, rescanning\n"); + mana_serv_rescan(pdev); + goto out; + } - dev_info(&pdev->dev, "MANA reset cycle completed\n"); + if (ret) + dev_info(&pdev->dev, "MANA reset cycle failed err %d\n", ret); + else + dev_info(&pdev->dev, "MANA reset cycle completed\n"); out: gc->in_service = false; @@ -454,18 +499,9 @@ struct mana_serv_work { enum gdma_eqe_type type; }; -static void mana_serv_func(struct work_struct *w) +static void mana_do_service(enum gdma_eqe_type type, struct pci_dev *pdev) { - struct mana_serv_work *mns_wk; - struct pci_dev *pdev; - - mns_wk = container_of(w, struct mana_serv_work, serv_work); - pdev = mns_wk->pdev; - - if (!pdev) - goto out; - - switch (mns_wk->type) { + switch (type) { case GDMA_EQE_HWC_FPGA_RECONFIG: mana_serv_fpga(pdev); break; @@ -475,12 +511,48 @@ static void mana_serv_func(struct work_struct *w) break; default: - dev_err(&pdev->dev, "MANA service: unknown type %d\n", - mns_wk->type); + dev_err(&pdev->dev, "MANA service: unknown type %d\n", type); break; } +} + +static void mana_recovery_delayed_func(struct work_struct *w) +{ + struct mana_dev_recovery_work *work; + struct mana_dev_recovery *dev; + unsigned long flags; + + work = container_of(w, struct mana_dev_recovery_work, work.work); + + spin_lock_irqsave(&work->lock, flags); + + while (!list_empty(&work->dev_list)) { + dev = list_first_entry(&work->dev_list, + struct mana_dev_recovery, list); + list_del(&dev->list); + spin_unlock_irqrestore(&work->lock, flags); + + mana_do_service(dev->type, dev->pdev); + pci_dev_put(dev->pdev); + kfree(dev); + + spin_lock_irqsave(&work->lock, flags); + } + + spin_unlock_irqrestore(&work->lock, flags); +} + +static void mana_serv_func(struct work_struct *w) +{ + struct mana_serv_work *mns_wk; + struct pci_dev *pdev; + + mns_wk = container_of(w, struct mana_serv_work, serv_work); + pdev = mns_wk->pdev; + + if (pdev) + mana_do_service(mns_wk->type, pdev); -out: pci_dev_put(pdev); kfree(mns_wk); module_put(THIS_MODULE); @@ -528,6 +600,7 @@ static void mana_gd_process_eqe(struct gdma_queue *eq) case GDMA_EQE_HWC_INIT_DONE: case GDMA_EQE_HWC_SOC_SERVICE: case GDMA_EQE_RNIC_QP_FATAL: + case GDMA_EQE_HWC_SOC_RECONFIG_DATA: if (!eq->eq.callback) break; @@ -540,6 +613,17 @@ static void mana_gd_process_eqe(struct gdma_queue *eq) case GDMA_EQE_HWC_RESET_REQUEST: dev_info(gc->dev, "Recv MANA service type:%d\n", type); + if (!test_and_set_bit(GC_PROBE_SUCCEEDED, &gc->flags)) { + /* + * Device is in probe and we received a hardware reset + * event, the probe function will detect that the flag + * has changed and perform service procedure. + */ + dev_info(gc->dev, + "Service is to be processed in probe\n"); + break; + } + if (gc->in_service) { dev_info(gc->dev, "Already in service\n"); break; @@ -1299,7 +1383,6 @@ int mana_gd_post_work_request(struct gdma_queue *wq, struct gdma_posted_wqe_info *wqe_info) { u32 client_oob_size = wqe_req->inline_oob_size; - struct gdma_context *gc; u32 sgl_data_size; u32 max_wqe_size; u32 wqe_size; @@ -1329,11 +1412,8 @@ int mana_gd_post_work_request(struct gdma_queue *wq, if (wqe_size > max_wqe_size) return -EINVAL; - if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq)) { - gc = wq->gdma_dev->gdma_context; - dev_err(gc->dev, "unsuccessful flow control!\n"); + if (wq->monitor_avl_buf && wqe_size > mana_gd_wq_avail_space(wq)) return -ENOSPC; - } if (wqe_info) wqe_info->wqe_size_in_bu = wqe_size / GDMA_WQE_BU_SIZE; @@ -1941,8 +2021,19 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto cleanup_mana; + /* + * If a hardware reset event has occurred over HWC during probe, + * rollback and perform hardware reset procedure. + */ + if (test_and_set_bit(GC_PROBE_SUCCEEDED, &gc->flags)) { + err = -EPROTO; + goto cleanup_mana_rdma; + } + return 0; +cleanup_mana_rdma: + mana_rdma_remove(&gc->mana_ib); cleanup_mana: mana_remove(&gc->mana, false); cleanup_gd: @@ -1966,6 +2057,35 @@ release_region: disable_dev: pci_disable_device(pdev); dev_err(&pdev->dev, "gdma probe failed: err = %d\n", err); + + /* + * Hardware could be in recovery mode and the HWC returns TIMEDOUT or + * EPROTO from mana_gd_setup(), mana_probe() or mana_rdma_probe(), or + * we received a hardware reset event over HWC interrupt. In this case, + * perform the device recovery procedure after MANA_SERVICE_PERIOD + * seconds. + */ + if (err == -ETIMEDOUT || err == -EPROTO) { + struct mana_dev_recovery *dev; + unsigned long flags; + + dev_info(&pdev->dev, "Start MANA recovery mode\n"); + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) + return err; + + dev->pdev = pci_dev_get(pdev); + dev->type = GDMA_EQE_HWC_RESET_REQUEST; + + spin_lock_irqsave(&mana_dev_recovery_work.lock, flags); + list_add_tail(&dev->list, &mana_dev_recovery_work.dev_list); + spin_unlock_irqrestore(&mana_dev_recovery_work.lock, flags); + + schedule_delayed_work(&mana_dev_recovery_work.work, + secs_to_jiffies(MANA_SERVICE_PERIOD)); + } + return err; } @@ -2070,6 +2190,10 @@ static int __init mana_driver_init(void) { int err; + INIT_LIST_HEAD(&mana_dev_recovery_work.dev_list); + spin_lock_init(&mana_dev_recovery_work.lock); + INIT_DELAYED_WORK(&mana_dev_recovery_work.work, mana_recovery_delayed_func); + mana_debugfs_root = debugfs_create_dir("mana", NULL); err = pci_register_driver(&mana_driver); @@ -2083,6 +2207,21 @@ static int __init mana_driver_init(void) static void __exit mana_driver_exit(void) { + struct mana_dev_recovery *dev; + unsigned long flags; + + disable_delayed_work_sync(&mana_dev_recovery_work.work); + + spin_lock_irqsave(&mana_dev_recovery_work.lock, flags); + while (!list_empty(&mana_dev_recovery_work.dev_list)) { + dev = list_first_entry(&mana_dev_recovery_work.dev_list, + struct mana_dev_recovery, list); + list_del(&dev->list); + pci_dev_put(dev->pdev); + kfree(dev); + } + spin_unlock_irqrestore(&mana_dev_recovery_work.lock, flags); + pci_unregister_driver(&mana_driver); debugfs_remove(mana_debugfs_root); diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c index ada6c78a2bef..aa4e2731e2ba 100644 --- a/drivers/net/ethernet/microsoft/mana/hw_channel.c +++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c @@ -118,6 +118,7 @@ static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self, struct gdma_dev *gd = hwc->gdma_dev; union hwc_init_type_data type_data; union hwc_init_eq_id_db eq_db; + struct mana_context *ac; u32 type, val; int ret; @@ -196,6 +197,17 @@ static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self, hwc->hwc_timeout = val; break; + case HWC_DATA_HW_LINK_CONNECT: + case HWC_DATA_HW_LINK_DISCONNECT: + ac = gd->gdma_context->mana.driver_data; + if (!ac) + break; + + WRITE_ONCE(ac->link_event, type); + schedule_work(&ac->link_change_work); + + break; + default: dev_warn(hwc->dev, "Received unknown reconfig type %u\n", type); break; diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 0142fd98392c..1ad154f9db1a 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -11,6 +11,7 @@ #include <linux/mm.h> #include <linux/pci.h> #include <linux/export.h> +#include <linux/skbuff.h> #include <net/checksum.h> #include <net/ip6_checksum.h> @@ -20,6 +21,7 @@ #include <net/mana/mana.h> #include <net/mana/mana_auxiliary.h> +#include <net/mana/hw_channel.h> static DEFINE_IDA(mana_adev_ida); @@ -84,7 +86,6 @@ static int mana_open(struct net_device *ndev) /* Ensure port state updated before txq state */ smp_wmb(); - netif_carrier_on(ndev); netif_tx_wake_all_queues(ndev); netdev_dbg(ndev, "%s successful\n", __func__); return 0; @@ -100,6 +101,46 @@ static int mana_close(struct net_device *ndev) return mana_detach(ndev, true); } +static void mana_link_state_handle(struct work_struct *w) +{ + struct mana_context *ac; + struct net_device *ndev; + u32 link_event; + bool link_up; + int i; + + ac = container_of(w, struct mana_context, link_change_work); + + rtnl_lock(); + + link_event = READ_ONCE(ac->link_event); + + if (link_event == HWC_DATA_HW_LINK_CONNECT) + link_up = true; + else if (link_event == HWC_DATA_HW_LINK_DISCONNECT) + link_up = false; + else + goto out; + + /* Process all ports */ + for (i = 0; i < ac->num_ports; i++) { + ndev = ac->ports[i]; + if (!ndev) + continue; + + if (link_up) { + netif_carrier_on(ndev); + + __netdev_notify_peers(ndev); + } else { + netif_carrier_off(ndev); + } + } + +out: + rtnl_unlock(); +} + static bool mana_can_tx(struct gdma_queue *wq) { return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE; @@ -289,6 +330,21 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) cq = &apc->tx_qp[txq_idx].tx_cq; tx_stats = &txq->stats; + BUILD_BUG_ON(MAX_TX_WQE_SGL_ENTRIES != MANA_MAX_TX_WQE_SGL_ENTRIES); + if (MAX_SKB_FRAGS + 2 > MAX_TX_WQE_SGL_ENTRIES && + skb_shinfo(skb)->nr_frags + 2 > MAX_TX_WQE_SGL_ENTRIES) { + /* GSO skb with Hardware SGE limit exceeded is not expected here + * as they are handled in mana_features_check() callback + */ + if (skb_linearize(skb)) { + netdev_warn_once(ndev, "Failed to linearize skb with nr_frags=%d and is_gso=%d\n", + skb_shinfo(skb)->nr_frags, + skb_is_gso(skb)); + goto tx_drop_count; + } + apc->eth_stats.tx_linear_pkt_cnt++; + } + pkg.tx_oob.s_oob.vcq_num = cq->gdma_id; pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame; @@ -402,8 +458,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) } } - WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES); - if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) { pkg.wqe_req.sgl = pkg.sgl_array; } else { @@ -438,9 +492,9 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) if (err) { (void)skb_dequeue_tail(&txq->pending_skbs); + mana_unmap_skb(skb, apc); netdev_warn(ndev, "Failed to post TX OOB: %d\n", err); - err = NETDEV_TX_BUSY; - goto tx_busy; + goto free_sgl_ptr; } err = NETDEV_TX_OK; @@ -460,7 +514,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev) tx_stats->bytes += len + ((num_gso_seg - 1) * gso_hs); u64_stats_update_end(&tx_stats->syncp); -tx_busy: if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) { netif_tx_wake_queue(net_txq); apc->eth_stats.wake_queue++; @@ -478,6 +531,25 @@ tx_drop: return NETDEV_TX_OK; } +#if (MAX_SKB_FRAGS + 2 > MANA_MAX_TX_WQE_SGL_ENTRIES) +static netdev_features_t mana_features_check(struct sk_buff *skb, + struct net_device *ndev, + netdev_features_t features) +{ + if (skb_shinfo(skb)->nr_frags + 2 > MAX_TX_WQE_SGL_ENTRIES) { + /* Exceeds HW SGE limit. + * GSO case: + * Disable GSO so the stack will software-segment the skb + * into smaller skbs that fit the SGE budget. + * Non-GSO case: + * The xmit path will attempt skb_linearize() as a fallback. + */ + features &= ~NETIF_F_GSO_MASK; + } + return features; +} +#endif + static void mana_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *st) { @@ -494,6 +566,11 @@ static void mana_get_stats64(struct net_device *ndev, netdev_stats_to_stats64(st, &ndev->stats); + if (apc->ac->hwc_timeout_occurred) + netdev_warn_once(ndev, "HWC timeout occurred\n"); + + st->rx_missed_errors = apc->ac->hc_stats.hc_rx_discards_no_wqe; + for (q = 0; q < num_queues; q++) { rx_stats = &apc->rxqs[q]->stats; @@ -814,7 +891,7 @@ static int mana_shaper_del(struct net_shaper_binding *binding, /* Reset mana port context parameters */ apc->handle.id = 0; apc->handle.scope = NET_SHAPER_SCOPE_UNSPEC; - apc->speed = 0; + apc->speed = apc->max_speed; } return err; @@ -838,6 +915,9 @@ static const struct net_device_ops mana_devops = { .ndo_open = mana_open, .ndo_stop = mana_close, .ndo_select_queue = mana_select_queue, +#if (MAX_SKB_FRAGS + 2 > MANA_MAX_TX_WQE_SGL_ENTRIES) + .ndo_features_check = mana_features_check, +#endif .ndo_start_xmit = mana_start_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_get_stats64 = mana_get_stats64, @@ -1606,7 +1686,7 @@ static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units) return 0; } -static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc) +void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc) { struct mana_skb_head *ash = (struct mana_skb_head *)skb->head; struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; @@ -2769,11 +2849,12 @@ int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx, return 0; } -void mana_query_gf_stats(struct mana_port_context *apc) +int mana_query_gf_stats(struct mana_context *ac) { + struct gdma_context *gc = ac->gdma_dev->gdma_context; struct mana_query_gf_stat_resp resp = {}; struct mana_query_gf_stat_req req = {}; - struct net_device *ndev = apc->ndev; + struct device *dev = gc->dev; int err; mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_GF_STAT, @@ -2807,52 +2888,54 @@ void mana_query_gf_stats(struct mana_port_context *apc) STATISTICS_FLAGS_HC_TX_BCAST_BYTES | STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR; - err = mana_send_request(apc->ac, &req, sizeof(req), &resp, + err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp)); if (err) { - netdev_err(ndev, "Failed to query GF stats: %d\n", err); - return; + dev_err(dev, "Failed to query GF stats: %d\n", err); + return err; } err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_GF_STAT, sizeof(resp)); if (err || resp.hdr.status) { - netdev_err(ndev, "Failed to query GF stats: %d, 0x%x\n", err, - resp.hdr.status); - return; + dev_err(dev, "Failed to query GF stats: %d, 0x%x\n", err, + resp.hdr.status); + return err; } - apc->eth_stats.hc_rx_discards_no_wqe = resp.rx_discards_nowqe; - apc->eth_stats.hc_rx_err_vport_disabled = resp.rx_err_vport_disabled; - apc->eth_stats.hc_rx_bytes = resp.hc_rx_bytes; - apc->eth_stats.hc_rx_ucast_pkts = resp.hc_rx_ucast_pkts; - apc->eth_stats.hc_rx_ucast_bytes = resp.hc_rx_ucast_bytes; - apc->eth_stats.hc_rx_bcast_pkts = resp.hc_rx_bcast_pkts; - apc->eth_stats.hc_rx_bcast_bytes = resp.hc_rx_bcast_bytes; - apc->eth_stats.hc_rx_mcast_pkts = resp.hc_rx_mcast_pkts; - apc->eth_stats.hc_rx_mcast_bytes = resp.hc_rx_mcast_bytes; - apc->eth_stats.hc_tx_err_gf_disabled = resp.tx_err_gf_disabled; - apc->eth_stats.hc_tx_err_vport_disabled = resp.tx_err_vport_disabled; - apc->eth_stats.hc_tx_err_inval_vportoffset_pkt = + ac->hc_stats.hc_rx_discards_no_wqe = resp.rx_discards_nowqe; + ac->hc_stats.hc_rx_err_vport_disabled = resp.rx_err_vport_disabled; + ac->hc_stats.hc_rx_bytes = resp.hc_rx_bytes; + ac->hc_stats.hc_rx_ucast_pkts = resp.hc_rx_ucast_pkts; + ac->hc_stats.hc_rx_ucast_bytes = resp.hc_rx_ucast_bytes; + ac->hc_stats.hc_rx_bcast_pkts = resp.hc_rx_bcast_pkts; + ac->hc_stats.hc_rx_bcast_bytes = resp.hc_rx_bcast_bytes; + ac->hc_stats.hc_rx_mcast_pkts = resp.hc_rx_mcast_pkts; + ac->hc_stats.hc_rx_mcast_bytes = resp.hc_rx_mcast_bytes; + ac->hc_stats.hc_tx_err_gf_disabled = resp.tx_err_gf_disabled; + ac->hc_stats.hc_tx_err_vport_disabled = resp.tx_err_vport_disabled; + ac->hc_stats.hc_tx_err_inval_vportoffset_pkt = resp.tx_err_inval_vport_offset_pkt; - apc->eth_stats.hc_tx_err_vlan_enforcement = + ac->hc_stats.hc_tx_err_vlan_enforcement = resp.tx_err_vlan_enforcement; - apc->eth_stats.hc_tx_err_eth_type_enforcement = + ac->hc_stats.hc_tx_err_eth_type_enforcement = resp.tx_err_ethtype_enforcement; - apc->eth_stats.hc_tx_err_sa_enforcement = resp.tx_err_SA_enforcement; - apc->eth_stats.hc_tx_err_sqpdid_enforcement = + ac->hc_stats.hc_tx_err_sa_enforcement = resp.tx_err_SA_enforcement; + ac->hc_stats.hc_tx_err_sqpdid_enforcement = resp.tx_err_SQPDID_enforcement; - apc->eth_stats.hc_tx_err_cqpdid_enforcement = + ac->hc_stats.hc_tx_err_cqpdid_enforcement = resp.tx_err_CQPDID_enforcement; - apc->eth_stats.hc_tx_err_mtu_violation = resp.tx_err_mtu_violation; - apc->eth_stats.hc_tx_err_inval_oob = resp.tx_err_inval_oob; - apc->eth_stats.hc_tx_bytes = resp.hc_tx_bytes; - apc->eth_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts; - apc->eth_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes; - apc->eth_stats.hc_tx_bcast_pkts = resp.hc_tx_bcast_pkts; - apc->eth_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes; - apc->eth_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts; - apc->eth_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes; - apc->eth_stats.hc_tx_err_gdma = resp.tx_err_gdma; + ac->hc_stats.hc_tx_err_mtu_violation = resp.tx_err_mtu_violation; + ac->hc_stats.hc_tx_err_inval_oob = resp.tx_err_inval_oob; + ac->hc_stats.hc_tx_bytes = resp.hc_tx_bytes; + ac->hc_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts; + ac->hc_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes; + ac->hc_stats.hc_tx_bcast_pkts = resp.hc_tx_bcast_pkts; + ac->hc_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes; + ac->hc_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts; + ac->hc_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes; + ac->hc_stats.hc_tx_err_gdma = resp.tx_err_gdma; + + return 0; } void mana_query_phy_stats(struct mana_port_context *apc) @@ -3059,9 +3142,6 @@ int mana_attach(struct net_device *ndev) /* Ensure port state updated before txq state */ smp_wmb(); - if (apc->port_is_up) - netif_carrier_on(ndev); - netif_device_attach(ndev); return 0; @@ -3154,7 +3234,6 @@ int mana_detach(struct net_device *ndev, bool from_close) smp_wmb(); netif_tx_disable(ndev); - netif_carrier_off(ndev); if (apc->port_st_save) { err = mana_dealloc_queues(ndev); @@ -3243,6 +3322,8 @@ static int mana_probe_port(struct mana_context *ac, int port_idx, goto free_indir; } + netif_carrier_on(ndev); + debugfs_create_u32("current_speed", 0400, apc->mana_port_debugfs, &apc->speed); return 0; @@ -3389,6 +3470,24 @@ int mana_rdma_service_event(struct gdma_context *gc, enum gdma_service_type even return 0; } +#define MANA_GF_STATS_PERIOD (2 * HZ) + +static void mana_gf_stats_work_handler(struct work_struct *work) +{ + struct mana_context *ac = + container_of(to_delayed_work(work), struct mana_context, gf_stats_work); + int err; + + err = mana_query_gf_stats(ac); + if (err == -ETIMEDOUT) { + /* HWC timeout detected - reset stats and stop rescheduling */ + ac->hwc_timeout_occurred = true; + memset(&ac->hc_stats, 0, sizeof(ac->hc_stats)); + return; + } + schedule_delayed_work(&ac->gf_stats_work, MANA_GF_STATS_PERIOD); +} + int mana_probe(struct gdma_dev *gd, bool resuming) { struct gdma_context *gc = gd->gdma_context; @@ -3431,6 +3530,8 @@ int mana_probe(struct gdma_dev *gd, bool resuming) if (!resuming) { ac->num_ports = num_ports; + + INIT_WORK(&ac->link_change_work, mana_link_state_handle); } else { if (ac->num_ports != num_ports) { dev_err(dev, "The number of vPorts changed: %d->%d\n", @@ -3438,6 +3539,8 @@ int mana_probe(struct gdma_dev *gd, bool resuming) err = -EPROTO; goto out; } + + enable_work(&ac->link_change_work); } if (ac->num_ports == 0) @@ -3477,6 +3580,10 @@ int mana_probe(struct gdma_dev *gd, bool resuming) } err = add_adev(gd, "eth"); + + INIT_DELAYED_WORK(&ac->gf_stats_work, mana_gf_stats_work_handler); + schedule_delayed_work(&ac->gf_stats_work, MANA_GF_STATS_PERIOD); + out: if (err) { mana_remove(gd, false); @@ -3500,6 +3607,9 @@ void mana_remove(struct gdma_dev *gd, bool suspending) int err; int i; + disable_work_sync(&ac->link_change_work); + cancel_delayed_work_sync(&ac->gf_stats_work); + /* adev currently doesn't support suspending, always remove it */ if (gd->adev) remove_adev(gd); diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c index a1afa75a9463..0e2f4343ac67 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c +++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c @@ -15,66 +15,71 @@ struct mana_stats_desc { static const struct mana_stats_desc mana_eth_stats[] = { {"stop_queue", offsetof(struct mana_ethtool_stats, stop_queue)}, {"wake_queue", offsetof(struct mana_ethtool_stats, wake_queue)}, - {"hc_rx_discards_no_wqe", offsetof(struct mana_ethtool_stats, + {"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)}, + {"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats, + tx_cqe_unknown_type)}, + {"tx_linear_pkt_cnt", offsetof(struct mana_ethtool_stats, + tx_linear_pkt_cnt)}, + {"rx_coalesced_err", offsetof(struct mana_ethtool_stats, + rx_coalesced_err)}, + {"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats, + rx_cqe_unknown_type)}, +}; + +static const struct mana_stats_desc mana_hc_stats[] = { + {"hc_rx_discards_no_wqe", offsetof(struct mana_ethtool_hc_stats, hc_rx_discards_no_wqe)}, - {"hc_rx_err_vport_disabled", offsetof(struct mana_ethtool_stats, + {"hc_rx_err_vport_disabled", offsetof(struct mana_ethtool_hc_stats, hc_rx_err_vport_disabled)}, - {"hc_rx_bytes", offsetof(struct mana_ethtool_stats, hc_rx_bytes)}, - {"hc_rx_ucast_pkts", offsetof(struct mana_ethtool_stats, + {"hc_rx_bytes", offsetof(struct mana_ethtool_hc_stats, hc_rx_bytes)}, + {"hc_rx_ucast_pkts", offsetof(struct mana_ethtool_hc_stats, hc_rx_ucast_pkts)}, - {"hc_rx_ucast_bytes", offsetof(struct mana_ethtool_stats, + {"hc_rx_ucast_bytes", offsetof(struct mana_ethtool_hc_stats, hc_rx_ucast_bytes)}, - {"hc_rx_bcast_pkts", offsetof(struct mana_ethtool_stats, + {"hc_rx_bcast_pkts", offsetof(struct mana_ethtool_hc_stats, hc_rx_bcast_pkts)}, - {"hc_rx_bcast_bytes", offsetof(struct mana_ethtool_stats, + {"hc_rx_bcast_bytes", offsetof(struct mana_ethtool_hc_stats, hc_rx_bcast_bytes)}, - {"hc_rx_mcast_pkts", offsetof(struct mana_ethtool_stats, - hc_rx_mcast_pkts)}, - {"hc_rx_mcast_bytes", offsetof(struct mana_ethtool_stats, + {"hc_rx_mcast_pkts", offsetof(struct mana_ethtool_hc_stats, + hc_rx_mcast_pkts)}, + {"hc_rx_mcast_bytes", offsetof(struct mana_ethtool_hc_stats, hc_rx_mcast_bytes)}, - {"hc_tx_err_gf_disabled", offsetof(struct mana_ethtool_stats, + {"hc_tx_err_gf_disabled", offsetof(struct mana_ethtool_hc_stats, hc_tx_err_gf_disabled)}, - {"hc_tx_err_vport_disabled", offsetof(struct mana_ethtool_stats, + {"hc_tx_err_vport_disabled", offsetof(struct mana_ethtool_hc_stats, hc_tx_err_vport_disabled)}, {"hc_tx_err_inval_vportoffset_pkt", - offsetof(struct mana_ethtool_stats, + offsetof(struct mana_ethtool_hc_stats, hc_tx_err_inval_vportoffset_pkt)}, - {"hc_tx_err_vlan_enforcement", offsetof(struct mana_ethtool_stats, + {"hc_tx_err_vlan_enforcement", offsetof(struct mana_ethtool_hc_stats, hc_tx_err_vlan_enforcement)}, {"hc_tx_err_eth_type_enforcement", - offsetof(struct mana_ethtool_stats, hc_tx_err_eth_type_enforcement)}, - {"hc_tx_err_sa_enforcement", offsetof(struct mana_ethtool_stats, + offsetof(struct mana_ethtool_hc_stats, hc_tx_err_eth_type_enforcement)}, + {"hc_tx_err_sa_enforcement", offsetof(struct mana_ethtool_hc_stats, hc_tx_err_sa_enforcement)}, {"hc_tx_err_sqpdid_enforcement", - offsetof(struct mana_ethtool_stats, hc_tx_err_sqpdid_enforcement)}, + offsetof(struct mana_ethtool_hc_stats, hc_tx_err_sqpdid_enforcement)}, {"hc_tx_err_cqpdid_enforcement", - offsetof(struct mana_ethtool_stats, hc_tx_err_cqpdid_enforcement)}, - {"hc_tx_err_mtu_violation", offsetof(struct mana_ethtool_stats, + offsetof(struct mana_ethtool_hc_stats, hc_tx_err_cqpdid_enforcement)}, + {"hc_tx_err_mtu_violation", offsetof(struct mana_ethtool_hc_stats, hc_tx_err_mtu_violation)}, - {"hc_tx_err_inval_oob", offsetof(struct mana_ethtool_stats, + {"hc_tx_err_inval_oob", offsetof(struct mana_ethtool_hc_stats, hc_tx_err_inval_oob)}, - {"hc_tx_err_gdma", offsetof(struct mana_ethtool_stats, + {"hc_tx_err_gdma", offsetof(struct mana_ethtool_hc_stats, hc_tx_err_gdma)}, - {"hc_tx_bytes", offsetof(struct mana_ethtool_stats, hc_tx_bytes)}, - {"hc_tx_ucast_pkts", offsetof(struct mana_ethtool_stats, + {"hc_tx_bytes", offsetof(struct mana_ethtool_hc_stats, hc_tx_bytes)}, + {"hc_tx_ucast_pkts", offsetof(struct mana_ethtool_hc_stats, hc_tx_ucast_pkts)}, - {"hc_tx_ucast_bytes", offsetof(struct mana_ethtool_stats, + {"hc_tx_ucast_bytes", offsetof(struct mana_ethtool_hc_stats, hc_tx_ucast_bytes)}, - {"hc_tx_bcast_pkts", offsetof(struct mana_ethtool_stats, + {"hc_tx_bcast_pkts", offsetof(struct mana_ethtool_hc_stats, hc_tx_bcast_pkts)}, - {"hc_tx_bcast_bytes", offsetof(struct mana_ethtool_stats, + {"hc_tx_bcast_bytes", offsetof(struct mana_ethtool_hc_stats, hc_tx_bcast_bytes)}, - {"hc_tx_mcast_pkts", offsetof(struct mana_ethtool_stats, + {"hc_tx_mcast_pkts", offsetof(struct mana_ethtool_hc_stats, hc_tx_mcast_pkts)}, - {"hc_tx_mcast_bytes", offsetof(struct mana_ethtool_stats, + {"hc_tx_mcast_bytes", offsetof(struct mana_ethtool_hc_stats, hc_tx_mcast_bytes)}, - {"tx_cq_err", offsetof(struct mana_ethtool_stats, tx_cqe_err)}, - {"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats, - tx_cqe_unknown_type)}, - {"rx_coalesced_err", offsetof(struct mana_ethtool_stats, - rx_coalesced_err)}, - {"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats, - rx_cqe_unknown_type)}, }; static const struct mana_stats_desc mana_phy_stats[] = { @@ -138,7 +143,7 @@ static int mana_get_sset_count(struct net_device *ndev, int stringset) if (stringset != ETH_SS_STATS) return -EINVAL; - return ARRAY_SIZE(mana_eth_stats) + ARRAY_SIZE(mana_phy_stats) + + return ARRAY_SIZE(mana_eth_stats) + ARRAY_SIZE(mana_phy_stats) + ARRAY_SIZE(mana_hc_stats) + num_queues * (MANA_STATS_RX_COUNT + MANA_STATS_TX_COUNT); } @@ -150,10 +155,12 @@ static void mana_get_strings(struct net_device *ndev, u32 stringset, u8 *data) if (stringset != ETH_SS_STATS) return; - for (i = 0; i < ARRAY_SIZE(mana_eth_stats); i++) ethtool_puts(&data, mana_eth_stats[i].name); + for (i = 0; i < ARRAY_SIZE(mana_hc_stats); i++) + ethtool_puts(&data, mana_hc_stats[i].name); + for (i = 0; i < ARRAY_SIZE(mana_phy_stats); i++) ethtool_puts(&data, mana_phy_stats[i].name); @@ -186,6 +193,7 @@ static void mana_get_ethtool_stats(struct net_device *ndev, struct mana_port_context *apc = netdev_priv(ndev); unsigned int num_queues = apc->num_queues; void *eth_stats = &apc->eth_stats; + void *hc_stats = &apc->ac->hc_stats; void *phy_stats = &apc->phy_stats; struct mana_stats_rx *rx_stats; struct mana_stats_tx *tx_stats; @@ -207,8 +215,6 @@ static void mana_get_ethtool_stats(struct net_device *ndev, if (!apc->port_is_up) return; - /* we call mana function to update stats from GDMA */ - mana_query_gf_stats(apc); /* We call this mana function to get the phy stats from GDMA and includes * aggregate tx/rx drop counters, Per-TC(Traffic Channel) tx/rx and pause @@ -219,6 +225,9 @@ static void mana_get_ethtool_stats(struct net_device *ndev, for (q = 0; q < ARRAY_SIZE(mana_eth_stats); q++) data[i++] = *(u64 *)(eth_stats + mana_eth_stats[q].offset); + for (q = 0; q < ARRAY_SIZE(mana_hc_stats); q++) + data[i++] = *(u64 *)(hc_stats + mana_hc_stats[q].offset); + for (q = 0; q < ARRAY_SIZE(mana_phy_stats); q++) data[i++] = *(u64 *)(phy_stats + mana_phy_stats[q].offset); |
