summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/google/gve
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/google/gve')
-rw-r--r--drivers/net/ethernet/google/gve/gve.h24
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c4
-rw-r--r--drivers/net/ethernet/google/gve/gve_desc_dqo.h3
-rw-r--r--drivers/net/ethernet/google/gve/gve_dqo.h1
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c97
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c97
-rw-r--r--drivers/net/ethernet/google/gve/gve_ptp.c27
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c69
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c6
10 files changed, 234 insertions, 96 deletions
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index bceaf9b05cb4..970d5ca8cdde 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -59,8 +59,6 @@
#define GVE_DEFAULT_RX_BUFFER_SIZE 2048
-#define GVE_MAX_RX_BUFFER_SIZE 4096
-
#define GVE_XDP_RX_BUFFER_SIZE_DQO 4096
#define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
@@ -100,6 +98,8 @@
*/
#define GVE_DQO_QPL_ONDEMAND_ALLOC_THRESHOLD 96
+#define GVE_DQO_RX_HWTSTAMP_VALID 0x1
+
/* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
struct gve_rx_desc_queue {
struct gve_rx_desc *desc_ring; /* the descriptor ring */
@@ -205,6 +205,13 @@ struct gve_rx_buf_state_dqo {
s16 next;
};
+/* Wrapper for XDP Rx metadata */
+struct gve_xdp_buff {
+ struct xdp_buff xdp;
+ struct gve_priv *gve;
+ const struct gve_rx_compl_desc_dqo *compl_desc;
+};
+
/* `head` and `tail` are indices into an array, or -1 if empty. */
struct gve_index_list {
s16 head;
@@ -1167,6 +1174,12 @@ static inline bool gve_is_gqi(struct gve_priv *priv)
priv->queue_format == GVE_GQI_QPL_FORMAT;
}
+static inline bool gve_is_dqo(struct gve_priv *priv)
+{
+ return priv->queue_format == GVE_DQO_RDA_FORMAT ||
+ priv->queue_format == GVE_DQO_QPL_FORMAT;
+}
+
static inline u32 gve_num_tx_queues(struct gve_priv *priv)
{
return priv->tx_cfg.num_queues + priv->tx_cfg.num_xdp_queues;
@@ -1247,9 +1260,12 @@ void gve_rx_free_rings_gqi(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg);
void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx);
void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx);
-u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hplit);
bool gve_header_split_supported(const struct gve_priv *priv);
-int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split);
+int gve_set_rx_buf_len_config(struct gve_priv *priv, u32 rx_buf_len,
+ struct netlink_ext_ack *extack,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
+int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
/* rx buffer handling */
int gve_buf_ref_cnt(struct gve_rx_buf_state_dqo *bs);
void gve_free_page_dqo(struct gve_priv *priv, struct gve_rx_buf_state_dqo *bs,
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 4f33d094a2ef..b72cc0fa2ba2 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -987,6 +987,10 @@ static void gve_enable_supported_features(struct gve_priv *priv,
dev_info(&priv->pdev->dev,
"BUFFER SIZES device option enabled with max_rx_buffer_size of %u, header_buf_size of %u.\n",
priv->max_rx_buffer_size, priv->header_buf_size);
+ if (gve_is_dqo(priv) &&
+ priv->max_rx_buffer_size > GVE_DEFAULT_RX_BUFFER_SIZE)
+ priv->rx_cfg.packet_buffer_size =
+ priv->max_rx_buffer_size;
}
/* Read and store ring size ranges given by device */
diff --git a/drivers/net/ethernet/google/gve/gve_desc_dqo.h b/drivers/net/ethernet/google/gve/gve_desc_dqo.h
index d17da841b5a0..f7786b03c744 100644
--- a/drivers/net/ethernet/google/gve/gve_desc_dqo.h
+++ b/drivers/net/ethernet/google/gve/gve_desc_dqo.h
@@ -236,7 +236,8 @@ struct gve_rx_compl_desc_dqo {
u8 status_error1;
- __le16 reserved5;
+ u8 reserved5;
+ u8 ts_sub_nsecs_low;
__le16 buf_id; /* Buffer ID which was sent on the buffer queue. */
union {
diff --git a/drivers/net/ethernet/google/gve/gve_dqo.h b/drivers/net/ethernet/google/gve/gve_dqo.h
index 6eb442096e02..5871f773f0c7 100644
--- a/drivers/net/ethernet/google/gve/gve_dqo.h
+++ b/drivers/net/ethernet/google/gve/gve_dqo.h
@@ -36,6 +36,7 @@ netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev);
netdev_features_t gve_features_check_dqo(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
+int gve_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp);
bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean);
bool gve_xdp_poll_dqo(struct gve_notify_block *block);
bool gve_xsk_tx_poll_dqo(struct gve_notify_block *block, int budget);
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index d0a223250845..52500ae8348e 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -529,6 +529,8 @@ static void gve_get_ringparam(struct net_device *netdev,
cmd->rx_pending = priv->rx_desc_cnt;
cmd->tx_pending = priv->tx_desc_cnt;
+ kernel_cmd->rx_buf_len = priv->rx_cfg.packet_buffer_size;
+
if (!gve_header_split_supported(priv))
kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_UNKNOWN;
else if (priv->header_split_enabled)
@@ -537,34 +539,6 @@ static void gve_get_ringparam(struct net_device *netdev,
kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
}
-static int gve_adjust_ring_sizes(struct gve_priv *priv,
- u16 new_tx_desc_cnt,
- u16 new_rx_desc_cnt)
-{
- struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
- struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
- int err;
-
- /* get current queue configuration */
- gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
-
- /* copy over the new ring_size from ethtool */
- tx_alloc_cfg.ring_size = new_tx_desc_cnt;
- rx_alloc_cfg.ring_size = new_rx_desc_cnt;
-
- if (netif_running(priv->dev)) {
- err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
- if (err)
- return err;
- }
-
- /* Set new ring_size for the next up */
- priv->tx_desc_cnt = new_tx_desc_cnt;
- priv->rx_desc_cnt = new_rx_desc_cnt;
-
- return 0;
-}
-
static int gve_validate_req_ring_size(struct gve_priv *priv, u16 new_tx_desc_cnt,
u16 new_rx_desc_cnt)
{
@@ -584,34 +558,68 @@ static int gve_validate_req_ring_size(struct gve_priv *priv, u16 new_tx_desc_cnt
return 0;
}
+static int gve_set_ring_sizes_config(struct gve_priv *priv, u16 new_tx_desc_cnt,
+ u16 new_rx_desc_cnt,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+{
+ if (new_tx_desc_cnt == priv->tx_desc_cnt &&
+ new_rx_desc_cnt == priv->rx_desc_cnt)
+ return 0;
+
+ if (!priv->modify_ring_size_enabled) {
+ dev_err(&priv->pdev->dev, "Modify ring size is not supported.\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (gve_validate_req_ring_size(priv, new_tx_desc_cnt, new_rx_desc_cnt))
+ return -EINVAL;
+
+ tx_alloc_cfg->ring_size = new_tx_desc_cnt;
+ rx_alloc_cfg->ring_size = new_rx_desc_cnt;
+ return 0;
+}
+
static int gve_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *cmd,
struct kernel_ethtool_ringparam *kernel_cmd,
struct netlink_ext_ack *extack)
{
+ struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
+ struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
struct gve_priv *priv = netdev_priv(netdev);
- u16 new_tx_cnt, new_rx_cnt;
int err;
- err = gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split);
+ gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+
+ err = gve_set_rx_buf_len_config(priv, kernel_cmd->rx_buf_len, extack,
+ &rx_alloc_cfg);
if (err)
return err;
- if (cmd->tx_pending == priv->tx_desc_cnt && cmd->rx_pending == priv->rx_desc_cnt)
- return 0;
-
- if (!priv->modify_ring_size_enabled) {
- dev_err(&priv->pdev->dev, "Modify ring size is not supported.\n");
- return -EOPNOTSUPP;
- }
-
- new_tx_cnt = cmd->tx_pending;
- new_rx_cnt = cmd->rx_pending;
+ err = gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split,
+ &rx_alloc_cfg);
+ if (err)
+ return err;
- if (gve_validate_req_ring_size(priv, new_tx_cnt, new_rx_cnt))
- return -EINVAL;
+ err = gve_set_ring_sizes_config(priv, cmd->tx_pending, cmd->rx_pending,
+ &tx_alloc_cfg, &rx_alloc_cfg);
+ if (err)
+ return err;
- return gve_adjust_ring_sizes(priv, new_tx_cnt, new_rx_cnt);
+ if (netif_running(priv->dev)) {
+ err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+ if (err)
+ return err;
+ } else {
+ /* Set ring params for the next up */
+ priv->rx_cfg.packet_buffer_size =
+ rx_alloc_cfg.packet_buffer_size;
+ priv->header_split_enabled = rx_alloc_cfg.enable_header_split;
+ priv->tx_desc_cnt = tx_alloc_cfg.ring_size;
+ priv->rx_desc_cnt = rx_alloc_cfg.ring_size;
+ }
+ return 0;
}
static int gve_user_reset(struct net_device *netdev, u32 *flags)
@@ -946,7 +954,8 @@ static int gve_get_ts_info(struct net_device *netdev,
const struct ethtool_ops gve_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
- .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT,
+ .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT |
+ ETHTOOL_RING_USE_RX_BUF_LEN,
.get_drvinfo = gve_get_drvinfo,
.get_strings = gve_get_strings,
.get_sset_count = gve_get_sset_count,
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 1be1b1ef31ee..a5a2b18d309b 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -1707,18 +1707,28 @@ static int gve_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
return 0;
}
-static int verify_xdp_configuration(struct net_device *dev)
+static int gve_verify_xdp_configuration(struct net_device *dev,
+ struct netlink_ext_ack *extack)
{
struct gve_priv *priv = netdev_priv(dev);
u16 max_xdp_mtu;
if (dev->features & NETIF_F_LRO) {
- netdev_warn(dev, "XDP is not supported when LRO is on.\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "XDP is not supported when LRO is on.");
return -EOPNOTSUPP;
}
if (priv->header_split_enabled) {
- netdev_warn(dev, "XDP is not supported when header-data split is enabled.\n");
+ NL_SET_ERR_MSG_MOD(extack,
+ "XDP is not supported when header-data split is enabled.");
+ return -EOPNOTSUPP;
+ }
+
+ if (priv->rx_cfg.packet_buffer_size != SZ_2K) {
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "XDP is not supported for Rx buf len %d, only %d supported.",
+ priv->rx_cfg.packet_buffer_size, SZ_2K);
return -EOPNOTSUPP;
}
@@ -1727,17 +1737,20 @@ static int verify_xdp_configuration(struct net_device *dev)
max_xdp_mtu -= GVE_RX_PAD;
if (dev->mtu > max_xdp_mtu) {
- netdev_warn(dev, "XDP is not supported for mtu %d.\n",
- dev->mtu);
+ NL_SET_ERR_MSG_FMT_MOD(extack,
+ "XDP is not supported for mtu %d.",
+ dev->mtu);
return -EOPNOTSUPP;
}
if (priv->rx_cfg.num_queues != priv->tx_cfg.num_queues ||
(2 * priv->tx_cfg.num_queues > priv->tx_cfg.max_queues)) {
- netdev_warn(dev, "XDP load failed: The number of configured RX queues %d should be equal to the number of configured TX queues %d and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues %d",
- priv->rx_cfg.num_queues,
- priv->tx_cfg.num_queues,
+ netdev_warn(dev,
+ "XDP load failed: The number of configured RX queues %d should be equal to the number of configured TX queues %d and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues %d.",
+ priv->rx_cfg.num_queues, priv->tx_cfg.num_queues,
priv->tx_cfg.max_queues);
+ NL_SET_ERR_MSG_MOD(extack,
+ "XDP load failed: The number of configured RX queues should be equal to the number of configured TX queues and the number of configured RX/TX queues should be less than or equal to half the maximum number of RX/TX queues");
return -EINVAL;
}
return 0;
@@ -1748,7 +1761,7 @@ static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp)
struct gve_priv *priv = netdev_priv(dev);
int err;
- err = verify_xdp_configuration(dev);
+ err = gve_verify_xdp_configuration(dev, xdp->extack);
if (err)
return err;
switch (xdp->command) {
@@ -2041,14 +2054,6 @@ static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
priv->tx_timeo_cnt++;
}
-u16 gve_get_pkt_buf_size(const struct gve_priv *priv, bool enable_hsplit)
-{
- if (enable_hsplit && priv->max_rx_buffer_size >= GVE_MAX_RX_BUFFER_SIZE)
- return GVE_MAX_RX_BUFFER_SIZE;
- else
- return GVE_DEFAULT_RX_BUFFER_SIZE;
-}
-
/* Header split is only supported on DQ RDA queue format. If XDP is enabled,
* header split is not allowed.
*/
@@ -2058,12 +2063,42 @@ bool gve_header_split_supported(const struct gve_priv *priv)
priv->queue_format == GVE_DQO_RDA_FORMAT && !priv->xdp_prog;
}
-int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split)
+int gve_set_rx_buf_len_config(struct gve_priv *priv, u32 rx_buf_len,
+ struct netlink_ext_ack *extack,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+{
+ u32 old_rx_buf_len = rx_alloc_cfg->packet_buffer_size;
+
+ if (rx_buf_len == old_rx_buf_len)
+ return 0;
+
+ /* device options may not always contain support for 4K buffers */
+ if (!gve_is_dqo(priv) || priv->max_rx_buffer_size < SZ_4K) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Modifying Rx buf len is not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (priv->xdp_prog && rx_buf_len != SZ_2K) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Rx buf len can only be 2048 when XDP is on");
+ return -EINVAL;
+ }
+
+ if (rx_buf_len != SZ_2K && rx_buf_len != SZ_4K) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Rx buf len can only be 2048 or 4096");
+ return -EINVAL;
+ }
+ rx_alloc_cfg->packet_buffer_size = rx_buf_len;
+
+ return 0;
+}
+
+int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
- struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
- struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
bool enable_hdr_split;
- int err = 0;
if (tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_UNKNOWN)
return 0;
@@ -2081,14 +2116,9 @@ int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split)
if (enable_hdr_split == priv->header_split_enabled)
return 0;
- gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
-
- rx_alloc_cfg.enable_header_split = enable_hdr_split;
- rx_alloc_cfg.packet_buffer_size = gve_get_pkt_buf_size(priv, enable_hdr_split);
+ rx_alloc_cfg->enable_header_split = enable_hdr_split;
- if (netif_running(priv->dev))
- err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
- return err;
+ return 0;
}
static int gve_set_features(struct net_device *netdev,
@@ -2158,10 +2188,6 @@ static int gve_set_ts_config(struct net_device *dev,
}
kernel_config->rx_filter = HWTSTAMP_FILTER_ALL;
- gve_clock_nic_ts_read(priv);
- ptp_schedule_worker(priv->ptp->clock, 0);
- } else {
- ptp_cancel_worker_sync(priv->ptp->clock);
}
priv->ts_config.rx_filter = kernel_config->rx_filter;
@@ -2322,6 +2348,10 @@ static void gve_set_netdev_xdp_features(struct gve_priv *priv)
xdp_set_features_flag_locked(priv->dev, xdp_features);
}
+static const struct xdp_metadata_ops gve_xdp_metadata_ops = {
+ .xmo_rx_timestamp = gve_xdp_rx_timestamp,
+};
+
static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
{
int num_ntfy;
@@ -2417,6 +2447,9 @@ setup_device:
}
gve_set_netdev_xdp_features(priv);
+ if (!gve_is_gqi(priv))
+ priv->dev->xdp_metadata_ops = &gve_xdp_metadata_ops;
+
err = gve_setup_device_resources(priv);
if (err)
goto err_free_xsk_bitmap;
diff --git a/drivers/net/ethernet/google/gve/gve_ptp.c b/drivers/net/ethernet/google/gve/gve_ptp.c
index e96247c9d68d..073677d82ee8 100644
--- a/drivers/net/ethernet/google/gve/gve_ptp.c
+++ b/drivers/net/ethernet/google/gve/gve_ptp.c
@@ -26,6 +26,19 @@ int gve_clock_nic_ts_read(struct gve_priv *priv)
return 0;
}
+static int gve_ptp_gettimex64(struct ptp_clock_info *info,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ return -EOPNOTSUPP;
+}
+
+static int gve_ptp_settime64(struct ptp_clock_info *info,
+ const struct timespec64 *ts)
+{
+ return -EOPNOTSUPP;
+}
+
static long gve_ptp_do_aux_work(struct ptp_clock_info *info)
{
const struct gve_ptp *ptp = container_of(info, struct gve_ptp, info);
@@ -47,6 +60,8 @@ out:
static const struct ptp_clock_info gve_ptp_caps = {
.owner = THIS_MODULE,
.name = "gve clock",
+ .gettimex64 = gve_ptp_gettimex64,
+ .settime64 = gve_ptp_settime64,
.do_aux_work = gve_ptp_do_aux_work,
};
@@ -118,9 +133,21 @@ int gve_init_clock(struct gve_priv *priv)
err = -ENOMEM;
goto release_ptp;
}
+ err = gve_clock_nic_ts_read(priv);
+ if (err) {
+ dev_err(&priv->pdev->dev, "failed to read NIC clock %d\n", err);
+ goto release_nic_ts_report;
+ }
+ ptp_schedule_worker(priv->ptp->clock,
+ msecs_to_jiffies(GVE_NIC_TS_SYNC_INTERVAL_MS));
return 0;
+release_nic_ts_report:
+ dma_free_coherent(&priv->pdev->dev,
+ sizeof(struct gve_nic_ts_report),
+ priv->nic_ts_report, priv->nic_ts_report_bus);
+ priv->nic_ts_report = NULL;
release_ptp:
gve_ptp_release(priv);
return err;
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 55393b784317..f1bd8f5d5732 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -240,6 +240,11 @@ int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
rx->rx_headroom = 0;
}
+ /* struct gve_xdp_buff is overlaid on struct xdp_buff_xsk and utilizes
+ * the 24 byte field cb to store gve specific data.
+ */
+ XSK_CHECK_PRIV_TYPE(struct gve_xdp_buff);
+
rx->dqo.num_buf_states = cfg->raw_addressing ? buffer_queue_slots :
gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
rx->dqo.buf_states = kvcalloc_node(rx->dqo.num_buf_states,
@@ -456,14 +461,38 @@ static void gve_rx_skb_hash(struct sk_buff *skb,
* Note that this means if the time delta between packet reception and the last
* clock read is greater than ~2 seconds, this will provide invalid results.
*/
-static void gve_rx_skb_hwtstamp(struct gve_rx_ring *rx, u32 hwts)
+static ktime_t gve_rx_get_hwtstamp(struct gve_priv *gve, u32 hwts)
{
- u64 last_read = READ_ONCE(rx->gve->last_sync_nic_counter);
- struct sk_buff *skb = rx->ctx.skb_head;
+ u64 last_read = READ_ONCE(gve->last_sync_nic_counter);
u32 low = (u32)last_read;
s32 diff = hwts - low;
- skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(last_read + diff);
+ return ns_to_ktime(last_read + diff);
+}
+
+static void gve_rx_skb_hwtstamp(struct gve_rx_ring *rx,
+ const struct gve_rx_compl_desc_dqo *desc)
+{
+ struct sk_buff *skb = rx->ctx.skb_head;
+
+ if (desc->ts_sub_nsecs_low & GVE_DQO_RX_HWTSTAMP_VALID)
+ skb_hwtstamps(skb)->hwtstamp =
+ gve_rx_get_hwtstamp(rx->gve, le32_to_cpu(desc->ts));
+}
+
+int gve_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
+{
+ const struct gve_xdp_buff *ctx = (void *)_ctx;
+
+ if (!ctx->gve->nic_ts_report)
+ return -ENODATA;
+
+ if (!(ctx->compl_desc->ts_sub_nsecs_low & GVE_DQO_RX_HWTSTAMP_VALID))
+ return -ENODATA;
+
+ *timestamp = gve_rx_get_hwtstamp(ctx->gve,
+ le32_to_cpu(ctx->compl_desc->ts));
+ return 0;
}
static void gve_rx_free_skb(struct napi_struct *napi, struct gve_rx_ring *rx)
@@ -677,16 +706,23 @@ err:
}
static int gve_rx_xsk_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
- struct gve_rx_buf_state_dqo *buf_state, int buf_len,
+ const struct gve_rx_compl_desc_dqo *compl_desc,
+ struct gve_rx_buf_state_dqo *buf_state,
struct bpf_prog *xprog)
{
struct xdp_buff *xdp = buf_state->xsk_buff;
+ int buf_len = compl_desc->packet_len;
struct gve_priv *priv = rx->gve;
+ struct gve_xdp_buff *gve_xdp;
int xdp_act;
xdp->data_end = xdp->data + buf_len;
xsk_buff_dma_sync_for_cpu(xdp);
+ gve_xdp = (void *)xdp;
+ gve_xdp->gve = priv;
+ gve_xdp->compl_desc = compl_desc;
+
if (xprog) {
xdp_act = bpf_prog_run_xdp(xprog, xdp);
buf_len = xdp->data_end - xdp->data;
@@ -776,7 +812,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
xprog = READ_ONCE(priv->xdp_prog);
if (buf_state->xsk_buff)
- return gve_rx_xsk_dqo(napi, rx, buf_state, buf_len, xprog);
+ return gve_rx_xsk_dqo(napi, rx, compl_desc, buf_state, xprog);
/* Page might have not been used for awhile and was likely last written
* by a different thread.
@@ -834,23 +870,26 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
}
if (xprog) {
- struct xdp_buff xdp;
+ struct gve_xdp_buff gve_xdp;
void *old_data;
int xdp_act;
- xdp_init_buff(&xdp, buf_state->page_info.buf_size,
+ xdp_init_buff(&gve_xdp.xdp, buf_state->page_info.buf_size,
&rx->xdp_rxq);
- xdp_prepare_buff(&xdp,
+ xdp_prepare_buff(&gve_xdp.xdp,
buf_state->page_info.page_address +
buf_state->page_info.page_offset,
buf_state->page_info.pad,
buf_len, false);
- old_data = xdp.data;
- xdp_act = bpf_prog_run_xdp(xprog, &xdp);
- buf_state->page_info.pad += xdp.data - old_data;
- buf_len = xdp.data_end - xdp.data;
+ gve_xdp.gve = priv;
+ gve_xdp.compl_desc = compl_desc;
+
+ old_data = gve_xdp.xdp.data;
+ xdp_act = bpf_prog_run_xdp(xprog, &gve_xdp.xdp);
+ buf_state->page_info.pad += gve_xdp.xdp.data - old_data;
+ buf_len = gve_xdp.xdp.data_end - gve_xdp.xdp.data;
if (xdp_act != XDP_PASS) {
- gve_xdp_done_dqo(priv, rx, &xdp, xprog, xdp_act,
+ gve_xdp_done_dqo(priv, rx, &gve_xdp.xdp, xprog, xdp_act,
buf_state);
return 0;
}
@@ -944,7 +983,7 @@ static int gve_rx_complete_skb(struct gve_rx_ring *rx, struct napi_struct *napi,
gve_rx_skb_csum(rx->ctx.skb_head, desc, ptype);
if (rx->gve->ts_config.rx_filter == HWTSTAMP_FILTER_ALL)
- gve_rx_skb_hwtstamp(rx, le32_to_cpu(desc->ts));
+ gve_rx_skb_hwtstamp(rx, desc);
/* RSC packets must set gso_size otherwise the TCP stack will complain
* that packets are larger than MTU.
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index c6ff0968929d..97efc8d27e6f 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -730,7 +730,9 @@ unmap_drop:
gve_tx_unmap_buf(tx->dev, &tx->info[idx & tx->mask]);
}
drop:
+ u64_stats_update_begin(&tx->statss);
tx->dropped_pkt++;
+ u64_stats_update_end(&tx->statss);
return 0;
}
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index 6f1d515673d2..40b89b3e5a31 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -1002,7 +1002,9 @@ static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx,
return 0;
drop:
+ u64_stats_update_begin(&tx->statss);
tx->dropped_pkt++;
+ u64_stats_update_end(&tx->statss);
dev_kfree_skb_any(skb);
return 0;
}
@@ -1324,7 +1326,11 @@ static void remove_miss_completions(struct gve_priv *priv,
/* This indicates the packet was dropped. */
dev_kfree_skb_any(pending_packet->skb);
pending_packet->skb = NULL;
+
+ u64_stats_update_begin(&tx->statss);
tx->dropped_pkt++;
+ u64_stats_update_end(&tx->statss);
+
net_err_ratelimited("%s: No reinjection completion was received for: %d.\n",
priv->dev->name,
(int)(pending_packet - tx->dqo.pending_packets));