diff --git a/Documentation/networking/statistics.rst b/Documentation/networking/statistics.rst index c9aeb70dafa29e..ec5d14f279e1af 100644 --- a/Documentation/networking/statistics.rst +++ b/Documentation/networking/statistics.rst @@ -41,6 +41,29 @@ If `-s` is specified once the detailed errors won't be shown. `ip` supports JSON formatting via the `-j` option. +For some interfaces, standard XDP statistics are available. +It can be accessed the same ways, e.g. `ip`:: + + $ ip link xdpstats dev enp178s0 + 16: enp178s0: + xdp-channel0-rx_xdp_packets: 0 + xdp-channel0-rx_xdp_bytes: 1 + xdp-channel0-rx_xdp_errors: 2 + xdp-channel0-rx_xdp_aborted: 3 + xdp-channel0-rx_xdp_drop: 4 + xdp-channel0-rx_xdp_invalid: 5 + xdp-channel0-rx_xdp_pass: 6 + xdp-channel0-rx_xdp_redirect: 7 + xdp-channel0-rx_xdp_redirect_errors: 8 + xdp-channel0-rx_xdp_tx: 9 + xdp-channel0-rx_xdp_tx_errors: 10 + xdp-channel0-tx_xdp_xmit_packets: 11 + xdp-channel0-tx_xdp_xmit_bytes: 12 + xdp-channel0-tx_xdp_xmit_errors: 13 + xdp-channel0-tx_xdp_xmit_full: 14 + +Those are usually per-channel. JSON is also supported via the `-j` opt. + Protocol-specific statistics ---------------------------- @@ -147,6 +170,8 @@ Statistics are reported both in the responses to link information requests (`RTM_GETLINK`) and statistic requests (`RTM_GETSTATS`, when `IFLA_STATS_LINK_64` bit is set in the `.filter_mask` of the request). +`IFLA_STATS_LINK_XDP_XSTATS` bit is used to retrieve standard XDP statstics. + ethtool ------- @@ -206,6 +231,14 @@ Retrieving ethtool statistics is a multi-syscall process, drivers are advised to keep the number of statistics constant to avoid race conditions with user space trying to read them. +It is up to the developers whether to implement XDP statistics or not due to +possible performance hits. If so, it is encouraged to export it using generic +XDP statistics infrastructure, not driver-defined Ethtool stats. +It can be achieve by implementing `.ndo_get_xdp_stats` and, optionally but +preferred, `.ndo_get_xdp_stats_nch`. There are several common helper structures +and functions in `include/net/xdp.h` to make this simpler and keep the code +compact. + Statistics must persist across routine operations like bringing the interface down and up. diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 7d5d885d85d5ed..83e9b85cc9981f 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -3313,12 +3313,65 @@ static void ena_get_stats64(struct net_device *netdev, stats->tx_errors = 0; } +static int ena_get_xdp_stats_nch(const struct net_device *netdev, u32 attr_id) +{ + const struct ena_adapter *adapter = netdev_priv(netdev); + + switch (attr_id) { + case IFLA_XDP_XSTATS_TYPE_XDP: + return adapter->num_io_queues; + default: + return -EOPNOTSUPP; + } +} + +static int ena_get_xdp_stats(const struct net_device *netdev, u32 attr_id, + void *attr_data) +{ + const struct ena_adapter *adapter = netdev_priv(netdev); + struct ifla_xdp_stats *xdp_stats = attr_data; + u32 i; + + switch (attr_id) { + case IFLA_XDP_XSTATS_TYPE_XDP: + break; + default: + return -EOPNOTSUPP; + } + + for (i = 0; i < adapter->num_io_queues; i++) { + const struct u64_stats_sync *syncp; + const struct ena_stats_rx *stats; + u32 start; + + stats = &adapter->rx_ring[i].rx_stats; + syncp = &adapter->rx_ring[i].syncp; + + do { + start = u64_stats_fetch_begin_irq(syncp); + + xdp_stats->drop = stats->xdp_drop; + xdp_stats->pass = stats->xdp_pass; + xdp_stats->tx = stats->xdp_tx; + xdp_stats->redirect = stats->xdp_redirect; + xdp_stats->aborted = stats->xdp_aborted; + xdp_stats->invalid = stats->xdp_invalid; + } while (u64_stats_fetch_retry_irq(syncp, start)); + + xdp_stats++; + } + + return 0; +} + static const struct net_device_ops ena_netdev_ops = { .ndo_open = ena_open, .ndo_stop = ena_close, .ndo_start_xmit = ena_start_xmit, .ndo_select_queue = ena_select_queue, .ndo_get_stats64 = ena_get_stats64, + .ndo_get_xdp_stats_nch = ena_get_xdp_stats_nch, + .ndo_get_xdp_stats = ena_get_xdp_stats, .ndo_tx_timeout = ena_tx_timeout, .ndo_change_mtu = ena_change_mtu, .ndo_set_mac_address = NULL, diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 6451c8383639fc..7715aecedacc2f 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -1973,6 +1973,49 @@ static void dpaa2_eth_get_stats(struct net_device *net_dev, } } +static int dpaa2_eth_get_xdp_stats_nch(const struct net_device *net_dev, + u32 attr_id) +{ + const struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + + switch (attr_id) { + case IFLA_XDP_XSTATS_TYPE_XDP: + return priv->num_channels; + default: + return -EOPNOTSUPP; + } +} + +static int dpaa2_eth_get_xdp_stats(const struct net_device *net_dev, + u32 attr_id, void *attr_data) +{ + const struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + struct ifla_xdp_stats *xdp_stats = attr_data; + u32 i; + + switch (attr_id) { + case IFLA_XDP_XSTATS_TYPE_XDP: + break; + default: + return -EOPNOTSUPP; + } + + for (i = 0; i < priv->num_channels; i++) { + const struct dpaa2_eth_ch_stats *ch_stats; + + ch_stats = &priv->channel[i]->stats; + + xdp_stats->drop = ch_stats->xdp_drop; + xdp_stats->redirect = ch_stats->xdp_redirect; + xdp_stats->tx = ch_stats->xdp_tx; + xdp_stats->tx_errors = ch_stats->xdp_tx_err; + + xdp_stats++; + } + + return 0; +} + /* Copy mac unicast addresses from @net_dev to @priv. * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. */ @@ -2601,6 +2644,8 @@ static const struct net_device_ops dpaa2_eth_ops = { .ndo_stop = dpaa2_eth_stop, .ndo_set_mac_address = dpaa2_eth_set_addr, .ndo_get_stats64 = dpaa2_eth_get_stats, + .ndo_get_xdp_stats_nch = dpaa2_eth_get_xdp_stats_nch, + .ndo_get_xdp_stats = dpaa2_eth_get_xdp_stats, .ndo_set_rx_mode = dpaa2_eth_set_rx_mode, .ndo_set_features = dpaa2_eth_set_features, .ndo_eth_ioctl = dpaa2_eth_ioctl, diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 504e12554079e3..ec62765377a764 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -2575,6 +2575,54 @@ struct net_device_stats *enetc_get_stats(struct net_device *ndev) return stats; } +int enetc_get_xdp_stats_nch(const struct net_device *ndev, u32 attr_id) +{ + const struct enetc_ndev_priv *priv = netdev_priv(ndev); + + switch (attr_id) { + case IFLA_XDP_XSTATS_TYPE_XDP: + return max(priv->num_rx_rings, priv->num_tx_rings); + default: + return -EOPNOTSUPP; + } +} + +int enetc_get_xdp_stats(const struct net_device *ndev, u32 attr_id, + void *attr_data) +{ + struct ifla_xdp_stats *xdp_iter, *xdp_stats = attr_data; + const struct enetc_ndev_priv *priv = netdev_priv(ndev); + const struct enetc_ring_stats *stats; + u32 i; + + switch (attr_id) { + case IFLA_XDP_XSTATS_TYPE_XDP: + break; + default: + return -EOPNOTSUPP; + } + + for (i = 0; i < priv->num_tx_rings; i++) { + stats = &priv->tx_ring[i]->stats; + xdp_iter = xdp_stats + i; + + xdp_iter->tx = stats->xdp_tx; + xdp_iter->tx_errors = stats->xdp_tx_drops; + } + + for (i = 0; i < priv->num_rx_rings; i++) { + stats = &priv->rx_ring[i]->stats; + xdp_iter = xdp_stats + i; + + xdp_iter->drop = stats->xdp_drops; + xdp_iter->redirect = stats->xdp_redirect; + xdp_iter->redirect_errors = stats->xdp_redirect_failures; + xdp_iter->redirect_errors += stats->xdp_redirect_sg; + } + + return 0; +} + static int enetc_set_rss(struct net_device *ndev, int en) { struct enetc_ndev_priv *priv = netdev_priv(ndev); diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h index fb39e406b7fc00..8f175f0194e332 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.h +++ b/drivers/net/ethernet/freescale/enetc/enetc.h @@ -389,6 +389,9 @@ void enetc_start(struct net_device *ndev); void enetc_stop(struct net_device *ndev); netdev_tx_t enetc_xmit(struct sk_buff *skb, struct net_device *ndev); struct net_device_stats *enetc_get_stats(struct net_device *ndev); +int enetc_get_xdp_stats_nch(const struct net_device *ndev, u32 attr_id); +int enetc_get_xdp_stats(const struct net_device *ndev, u32 attr_id, + void *attr_data); int enetc_set_features(struct net_device *ndev, netdev_features_t features); int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index fe6a544f37f054..c7776b842a915f 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -729,6 +729,8 @@ static const struct net_device_ops enetc_ndev_ops = { .ndo_stop = enetc_close, .ndo_start_xmit = enetc_xmit, .ndo_get_stats = enetc_get_stats, + .ndo_get_xdp_stats_nch = enetc_get_xdp_stats_nch, + .ndo_get_xdp_stats = enetc_get_xdp_stats, .ndo_set_mac_address = enetc_pf_set_mac_addr, .ndo_set_rx_mode = enetc_pf_set_rx_mode, .ndo_vlan_rx_add_vid = enetc_vlan_rx_add_vid, diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 4d939af0a626c7..2e2a3936332f45 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -942,6 +942,7 @@ struct i40e_vsi { irqreturn_t (*irq_handler)(int irq, void *data); unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */ + struct xdp_drv_stats *xdp_stats; /* XDP/XSK stats array */ } ____cacheline_internodealigned_in_smp; struct i40e_netdev_priv { diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index e118cf9265c79e..e3619fc13630b8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -11087,7 +11087,7 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi) static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors) { struct i40e_ring **next_rings; - int size; + int size, i; int ret = 0; /* allocate memory for both Tx, XDP Tx and Rx ring pointers */ @@ -11103,6 +11103,15 @@ static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors) } vsi->rx_rings = next_rings; + vsi->xdp_stats = kcalloc(vsi->alloc_queue_pairs, + sizeof(*vsi->xdp_stats), + GFP_KERNEL); + if (!vsi->xdp_stats) + goto err_xdp_stats; + + for (i = 0; i < vsi->alloc_queue_pairs; i++) + xdp_init_drv_stats(vsi->xdp_stats + i); + if (alloc_qvectors) { /* allocate memory for q_vector pointers */ size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors; @@ -11115,6 +11124,10 @@ static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors) return ret; err_vectors: + kfree(vsi->xdp_stats); + vsi->xdp_stats = NULL; + +err_xdp_stats: kfree(vsi->tx_rings); return ret; } @@ -11225,6 +11238,10 @@ static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors) kfree(vsi->q_vectors); vsi->q_vectors = NULL; } + + kfree(vsi->xdp_stats); + vsi->xdp_stats = NULL; + kfree(vsi->tx_rings); vsi->tx_rings = NULL; vsi->rx_rings = NULL; @@ -11347,6 +11364,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE) ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; ring->itr_setting = pf->tx_itr_default; + ring->xdp_stats = vsi->xdp_stats + i; WRITE_ONCE(vsi->tx_rings[i], ring++); if (!i40e_enabled_xdp_vsi(vsi)) @@ -11365,6 +11383,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) ring->flags = I40E_TXR_FLAGS_WB_ON_ITR; set_ring_xdp(ring); ring->itr_setting = pf->tx_itr_default; + ring->xdp_stats = vsi->xdp_stats + i; WRITE_ONCE(vsi->xdp_rings[i], ring++); setup_rx: @@ -11378,6 +11397,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi) ring->size = 0; ring->dcb_tc = 0; ring->itr_setting = pf->rx_itr_default; + ring->xdp_stats = vsi->xdp_stats + i; WRITE_ONCE(vsi->rx_rings[i], ring); } @@ -13308,6 +13328,19 @@ static int i40e_xdp(struct net_device *dev, } } +static int i40e_get_xdp_stats_nch(const struct net_device *dev, u32 attr_id) +{ + const struct i40e_netdev_priv *np = netdev_priv(dev); + + switch (attr_id) { + case IFLA_XDP_XSTATS_TYPE_XDP: + case IFLA_XDP_XSTATS_TYPE_XSK: + return np->vsi->alloc_queue_pairs; + default: + return -EOPNOTSUPP; + } +} + static const struct net_device_ops i40e_netdev_ops = { .ndo_open = i40e_open, .ndo_stop = i40e_close, @@ -13343,6 +13376,8 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_bpf = i40e_xdp, .ndo_xdp_xmit = i40e_xdp_xmit, .ndo_xsk_wakeup = i40e_xsk_wakeup, + .ndo_get_xdp_stats_nch = i40e_get_xdp_stats_nch, + .ndo_get_xdp_stats = xdp_get_drv_stats_generic, .ndo_dfwd_add_station = i40e_fwd_add, .ndo_dfwd_del_station = i40e_fwd_del, }; @@ -13487,6 +13522,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) netdev->netdev_ops = &i40e_netdev_ops; netdev->watchdog_timeo = 5 * HZ; i40e_set_ethtool_ops(netdev); + netdev->xstats = vsi->xdp_stats; /* MTU range: 68 - 9706 */ netdev->min_mtu = ETH_MIN_MTU; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 10a83e5385c703..8854004fbec317 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1027,8 +1027,11 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, i40e_update_tx_stats(tx_ring, total_packets, total_bytes); i40e_arm_wb(tx_ring, vsi, budget); - if (ring_is_xdp(tx_ring)) + if (ring_is_xdp(tx_ring)) { + xdp_update_tx_drv_stats(&tx_ring->xdp_stats->xdp_tx, + total_packets, total_bytes); return !!budget; + } /* notify netdev of completed buffers */ netdev_tx_completed_queue(txring_txq(tx_ring), @@ -2290,8 +2293,10 @@ int i40e_xmit_xdp_tx_ring(struct xdp_buff *xdp, struct i40e_ring *xdp_ring) * i40e_run_xdp - run an XDP program * @rx_ring: Rx ring being processed * @xdp: XDP buffer containing the frame + * @lrstats: onstack Rx XDP stats **/ -static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp) +static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp, + struct xdp_rx_drv_stats_local *lrstats) { int err, result = I40E_XDP_PASS; struct i40e_ring *xdp_ring; @@ -2303,33 +2308,48 @@ static int i40e_run_xdp(struct i40e_ring *rx_ring, struct xdp_buff *xdp) if (!xdp_prog) goto xdp_out; + lrstats->bytes += xdp->data_end - xdp->data; + lrstats->packets++; + prefetchw(xdp->data_hard_start); /* xdp_frame write */ act = bpf_prog_run_xdp(xdp_prog, xdp); switch (act) { case XDP_PASS: + lrstats->pass++; break; case XDP_TX: xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); - if (result == I40E_XDP_CONSUMED) + if (result == I40E_XDP_CONSUMED) { + lrstats->tx_errors++; goto out_failure; + } + lrstats->tx++; break; case XDP_REDIRECT: err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); - if (err) + if (err) { + lrstats->redirect_errors++; goto out_failure; + } result = I40E_XDP_REDIR; + lrstats->redirect++; break; default: bpf_warn_invalid_xdp_action(act); - fallthrough; + lrstats->invalid++; + goto out_failure; case XDP_ABORTED: + lrstats->aborted++; out_failure: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); - fallthrough; /* handle aborts by dropping packet */ + /* handle aborts by dropping packet */ + result = I40E_XDP_CONSUMED; + break; case XDP_DROP: result = I40E_XDP_CONSUMED; + lrstats->drop++; break; } xdp_out: @@ -2441,6 +2461,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0, frame_sz = 0; u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); + struct xdp_rx_drv_stats_local lrstats = { }; unsigned int offset = rx_ring->rx_offset; struct sk_buff *skb = rx_ring->skb; unsigned int xdp_xmit = 0; @@ -2512,7 +2533,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) /* At larger PAGE_SIZE, frame_sz depend on len size */ xdp.frame_sz = i40e_rx_frame_truesize(rx_ring, size); #endif - xdp_res = i40e_run_xdp(rx_ring, &xdp); + xdp_res = i40e_run_xdp(rx_ring, &xdp, &lrstats); } if (xdp_res) { @@ -2569,6 +2590,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) rx_ring->skb = skb; i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); + xdp_update_rx_drv_stats(&rx_ring->xdp_stats->xdp_rx, &lrstats); /* guarantee a trip back through this routine if there was a failure */ return failure ? budget : (int)total_rx_packets; @@ -3696,6 +3718,7 @@ static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, dma_addr_t dma; if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) { + xdp_update_tx_drv_full(&xdp_ring->xdp_stats->xdp_tx); xdp_ring->tx_stats.tx_busy++; return I40E_XDP_CONSUMED; } @@ -3923,5 +3946,8 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, if (unlikely(flags & XDP_XMIT_FLUSH)) i40e_xdp_ring_update_tail(xdp_ring); + if (unlikely(nxmit < n)) + xdp_update_tx_drv_err(&xdp_ring->xdp_stats->xdp_tx, n - nxmit); + return nxmit; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index bfc2845c99d1cd..dcfcf20e2ea9a6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -368,6 +368,7 @@ struct i40e_ring { struct i40e_tx_queue_stats tx_stats; struct i40e_rx_queue_stats rx_stats; }; + struct xdp_drv_stats *xdp_stats; unsigned int size; /* length of descriptor ring in bytes */ dma_addr_t dma; /* physical address of ring */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index ea06e957393e62..54c5b8abbb53d6 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -143,16 +143,21 @@ int i40e_xsk_pool_setup(struct i40e_vsi *vsi, struct xsk_buff_pool *pool, * i40e_run_xdp_zc - Executes an XDP program on an xdp_buff * @rx_ring: Rx ring * @xdp: xdp_buff used as input to the XDP program + * @lrstats: onstack Rx XDP stats structure * * Returns any of I40E_XDP_{PASS, CONSUMED, TX, REDIR} **/ -static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) +static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp, + struct xdp_rx_drv_stats_local *lrstats) { int err, result = I40E_XDP_PASS; struct i40e_ring *xdp_ring; struct bpf_prog *xdp_prog; u32 act; + lrstats->bytes += xdp->data_end - xdp->data; + lrstats->packets++; + /* NB! xdp_prog will always be !NULL, due to the fact that * this path is enabled by setting an XDP program. */ @@ -161,29 +166,41 @@ static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) if (likely(act == XDP_REDIRECT)) { err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); - if (err) + if (err) { + lrstats->redirect_errors++; goto out_failure; + } + lrstats->redirect++; return I40E_XDP_REDIR; } switch (act) { case XDP_PASS: + lrstats->pass++; break; case XDP_TX: xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; result = i40e_xmit_xdp_tx_ring(xdp, xdp_ring); - if (result == I40E_XDP_CONSUMED) + if (result == I40E_XDP_CONSUMED) { + lrstats->tx_errors++; goto out_failure; + } + lrstats->tx++; break; default: bpf_warn_invalid_xdp_action(act); - fallthrough; + lrstats->invalid++; + goto out_failure; case XDP_ABORTED: + lrstats->aborted++; out_failure: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); - fallthrough; /* handle aborts by dropping packet */ + /* handle aborts by dropping packet */ + result = I40E_XDP_CONSUMED; + break; case XDP_DROP: result = I40E_XDP_CONSUMED; + lrstats->drop++; break; } return result; @@ -325,6 +342,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); + struct xdp_rx_drv_stats_local lrstats = { }; u16 next_to_clean = rx_ring->next_to_clean; u16 count_mask = rx_ring->count - 1; unsigned int xdp_res, xdp_xmit = 0; @@ -366,7 +384,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) xsk_buff_set_size(bi, size); xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool); - xdp_res = i40e_run_xdp_zc(rx_ring, bi); + xdp_res = i40e_run_xdp_zc(rx_ring, bi, &lrstats); i40e_handle_xdp_result_zc(rx_ring, bi, rx_desc, &rx_packets, &rx_bytes, size, xdp_res); total_rx_packets += rx_packets; @@ -383,6 +401,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) i40e_finalize_xdp_rx(rx_ring, xdp_xmit); i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); + xdp_update_rx_drv_stats(&rx_ring->xdp_stats->xsk_rx, &lrstats); if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { if (failure || next_to_clean == rx_ring->next_to_use) @@ -489,6 +508,8 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) i40e_xdp_ring_update_tail(xdp_ring); i40e_update_tx_stats(xdp_ring, nb_pkts, total_bytes); + xdp_update_tx_drv_stats(&xdp_ring->xdp_stats->xsk_tx, nb_pkts, + total_bytes); return nb_pkts < budget; } diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index b67ad51cbcc9a3..6cef8b4e887ff7 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -387,8 +387,10 @@ struct ice_vsi { struct ice_tc_cfg tc_cfg; struct bpf_prog *xdp_prog; struct ice_tx_ring **xdp_rings; /* XDP ring array */ + struct xdp_drv_stats *xdp_stats; /* XDP stats array */ unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */ u16 num_xdp_txq; /* Used XDP queues */ + u16 alloc_xdp_stats; /* Length of xdp_stats array */ u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ struct net_device **target_netdevs; diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 40562600a8cf2b..934152216df5e5 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -73,6 +73,7 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; struct device *dev; + u32 i; dev = ice_pf_to_dev(pf); if (vsi->type == ICE_VSI_CHNL) @@ -115,8 +116,23 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) if (!vsi->af_xdp_zc_qps) goto err_zc_qps; + vsi->alloc_xdp_stats = max_t(u16, vsi->alloc_rxq, num_possible_cpus()); + + vsi->xdp_stats = kcalloc(vsi->alloc_xdp_stats, sizeof(*vsi->xdp_stats), + GFP_KERNEL); + if (!vsi->xdp_stats) + goto err_xdp_stats; + + for (i = 0; i < vsi->alloc_xdp_stats; i++) + xdp_init_drv_stats(vsi->xdp_stats + i); + return 0; +err_xdp_stats: + vsi->alloc_xdp_stats = 0; + + bitmap_free(vsi->af_xdp_zc_qps); + vsi->af_xdp_zc_qps = NULL; err_zc_qps: devm_kfree(dev, vsi->q_vectors); err_vectors: @@ -317,6 +333,10 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi) dev = ice_pf_to_dev(pf); + kfree(vsi->xdp_stats); + vsi->xdp_stats = NULL; + vsi->alloc_xdp_stats = 0; + if (vsi->af_xdp_zc_qps) { bitmap_free(vsi->af_xdp_zc_qps); vsi->af_xdp_zc_qps = NULL; @@ -1422,6 +1442,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ring->netdev = vsi->netdev; ring->dev = dev; ring->count = vsi->num_rx_desc; + ring->xdp_stats = vsi->xdp_stats + i; WRITE_ONCE(vsi->rx_rings[i], ring); } diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index f2a5f2f965d12f..94d0bf440a491e 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2481,6 +2481,7 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi) xdp_ring->next_rs = ICE_TX_THRESH - 1; xdp_ring->dev = dev; xdp_ring->count = vsi->num_tx_desc; + xdp_ring->xdp_stats = vsi->xdp_stats + i; WRITE_ONCE(vsi->xdp_rings[i], xdp_ring); if (ice_setup_tx_ring(xdp_ring)) goto free_xdp_rings; @@ -2837,6 +2838,19 @@ static int ice_xdp(struct net_device *dev, struct netdev_bpf *xdp) } } +static int ice_get_xdp_stats_nch(const struct net_device *dev, u32 attr_id) +{ + const struct ice_netdev_priv *np = netdev_priv(dev); + + switch (attr_id) { + case IFLA_XDP_XSTATS_TYPE_XDP: + case IFLA_XDP_XSTATS_TYPE_XSK: + return np->vsi->alloc_xdp_stats; + default: + return -EOPNOTSUPP; + } +} + /** * ice_ena_misc_vector - enable the non-queue interrupts * @pf: board private structure @@ -3280,6 +3294,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi) ice_set_netdev_features(netdev); ice_set_ops(netdev); + netdev->xstats = vsi->xdp_stats; if (vsi->type == ICE_VSI_PF) { SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back)); @@ -8608,4 +8623,6 @@ static const struct net_device_ops ice_netdev_ops = { .ndo_bpf = ice_xdp, .ndo_xdp_xmit = ice_xdp_xmit, .ndo_xsk_wakeup = ice_xsk_wakeup, + .ndo_get_xdp_stats_nch = ice_get_xdp_stats_nch, + .ndo_get_xdp_stats = xdp_get_drv_stats_generic, }; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index bc3ba19dc88f8a..d32d6f2975b5d3 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -532,19 +532,25 @@ ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, unsigned int __maybe_unused s * @xdp: xdp_buff used as input to the XDP program * @xdp_prog: XDP program to run * @xdp_ring: ring to be used for XDP_TX action + * @lrstats: onstack Rx XDP stats * * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} */ static int ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, - struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring) + struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring, + struct xdp_rx_drv_stats_local *lrstats) { int err; u32 act; + lrstats->bytes += xdp->data_end - xdp->data; + lrstats->packets++; + act = bpf_prog_run_xdp(xdp_prog, xdp); switch (act) { case XDP_PASS: + lrstats->pass++; return ICE_XDP_PASS; case XDP_TX: if (static_branch_unlikely(&ice_xdp_locking_key)) @@ -552,22 +558,31 @@ ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, err = ice_xmit_xdp_ring(xdp->data, xdp->data_end - xdp->data, xdp_ring); if (static_branch_unlikely(&ice_xdp_locking_key)) spin_unlock(&xdp_ring->tx_lock); - if (err == ICE_XDP_CONSUMED) + if (err == ICE_XDP_CONSUMED) { + lrstats->tx_errors++; goto out_failure; + } + lrstats->tx++; return err; case XDP_REDIRECT: err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); - if (err) + if (err) { + lrstats->redirect_errors++; goto out_failure; + } + lrstats->redirect++; return ICE_XDP_REDIR; default: bpf_warn_invalid_xdp_action(act); - fallthrough; + lrstats->invalid++; + goto out_failure; case XDP_ABORTED: + lrstats->aborted++; out_failure: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); - fallthrough; + return ICE_XDP_CONSUMED; case XDP_DROP: + lrstats->drop++; return ICE_XDP_CONSUMED; } } @@ -627,6 +642,9 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, if (static_branch_unlikely(&ice_xdp_locking_key)) spin_unlock(&xdp_ring->tx_lock); + if (unlikely(nxmit < n)) + xdp_update_tx_drv_err(&xdp_ring->xdp_stats->xdp_tx, n - nxmit); + return nxmit; } @@ -1089,6 +1107,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0; u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); + struct xdp_rx_drv_stats_local lrstats = { }; unsigned int offset = rx_ring->rx_offset; struct ice_tx_ring *xdp_ring = NULL; unsigned int xdp_res, xdp_xmit = 0; @@ -1173,7 +1192,8 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) if (!xdp_prog) goto construct_skb; - xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog, xdp_ring); + xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog, xdp_ring, + &lrstats); if (!xdp_res) goto construct_skb; if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) { @@ -1254,6 +1274,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget) rx_ring->skb = skb; ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes); + xdp_update_rx_drv_stats(&rx_ring->xdp_stats->xdp_rx, &lrstats); /* guarantee a trip back through this routine if there was a failure */ return failure ? budget : (int)total_rx_pkts; diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h index c56dd174990316..c54be60c34794f 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.h +++ b/drivers/net/ethernet/intel/ice/ice_txrx.h @@ -284,9 +284,9 @@ struct ice_rx_ring { struct ice_rxq_stats rx_stats; struct ice_q_stats stats; struct u64_stats_sync syncp; + struct xdp_drv_stats *xdp_stats; - struct rcu_head rcu; /* to avoid race on free */ - /* CL4 - 3rd cacheline starts here */ + /* CL4 - 4rd cacheline starts here */ struct ice_channel *ch; struct bpf_prog *xdp_prog; struct ice_tx_ring *xdp_ring; @@ -298,6 +298,9 @@ struct ice_rx_ring { u8 dcb_tc; /* Traffic class of ring */ u8 ptp_rx; u8 flags; + + /* CL5 - 5th cacheline starts here */ + struct rcu_head rcu; /* to avoid race on free */ } ____cacheline_internodealigned_in_smp; struct ice_tx_ring { @@ -324,13 +327,16 @@ struct ice_tx_ring { /* stats structs */ struct ice_q_stats stats; struct u64_stats_sync syncp; - struct ice_txq_stats tx_stats; + struct xdp_drv_stats *xdp_stats; /* CL3 - 3rd cacheline starts here */ + struct ice_txq_stats tx_stats; struct rcu_head rcu; /* to avoid race on free */ DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */ struct ice_channel *ch; struct ice_ptp_tx *tx_tstamps; + + /* CL4 - 4th cacheline starts here */ spinlock_t tx_lock; u32 txq_teid; /* Added Tx queue TEID */ #define ICE_TX_FLAGS_RING_XDP BIT(0) diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c index 1dd7e84f41f877..7dc287bc3a1a27 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c @@ -258,6 +258,8 @@ static void ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring) xdp_ring->next_dd = ICE_TX_THRESH - 1; xdp_ring->next_to_clean = ntc; ice_update_tx_ring_stats(xdp_ring, total_pkts, total_bytes); + xdp_update_tx_drv_stats(&xdp_ring->xdp_stats->xdp_tx, total_pkts, + total_bytes); } /** @@ -277,6 +279,7 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring) ice_clean_xdp_irq(xdp_ring); if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) { + xdp_update_tx_drv_full(&xdp_ring->xdp_stats->xdp_tx); xdp_ring->tx_stats.tx_busy++; return ICE_XDP_CONSUMED; } diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index ff55cb415b110f..62ef47a38d9394 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -454,42 +454,58 @@ ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff **xdp_arr) * @xdp: xdp_buff used as input to the XDP program * @xdp_prog: XDP program to run * @xdp_ring: ring to be used for XDP_TX action + * @lrstats: onstack Rx XDP stats * * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR} */ static int ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp, - struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring) + struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring, + struct xdp_rx_drv_stats_local *lrstats) { int err, result = ICE_XDP_PASS; u32 act; + lrstats->bytes += xdp->data_end - xdp->data; + lrstats->packets++; + act = bpf_prog_run_xdp(xdp_prog, xdp); if (likely(act == XDP_REDIRECT)) { err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); - if (err) + if (err) { + lrstats->redirect_errors++; goto out_failure; + } + lrstats->redirect++; return ICE_XDP_REDIR; } switch (act) { case XDP_PASS: + lrstats->pass++; break; case XDP_TX: result = ice_xmit_xdp_buff(xdp, xdp_ring); - if (result == ICE_XDP_CONSUMED) + if (result == ICE_XDP_CONSUMED) { + lrstats->tx_errors++; goto out_failure; + } + lrstats->tx++; break; default: bpf_warn_invalid_xdp_action(act); - fallthrough; + lrstats->invalid++; + goto out_failure; case XDP_ABORTED: + lrstats->aborted++; out_failure: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); - fallthrough; + result = ICE_XDP_CONSUMED; + break; case XDP_DROP: result = ICE_XDP_CONSUMED; + lrstats->drop++; break; } @@ -507,6 +523,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) { unsigned int total_rx_bytes = 0, total_rx_packets = 0; u16 cleaned_count = ICE_DESC_UNUSED(rx_ring); + struct xdp_rx_drv_stats_local lrstats = { }; struct ice_tx_ring *xdp_ring; unsigned int xdp_xmit = 0; struct bpf_prog *xdp_prog; @@ -548,7 +565,8 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) xsk_buff_set_size(*xdp, size); xsk_buff_dma_sync_for_cpu(*xdp, rx_ring->xsk_pool); - xdp_res = ice_run_xdp_zc(rx_ring, *xdp, xdp_prog, xdp_ring); + xdp_res = ice_run_xdp_zc(rx_ring, *xdp, xdp_prog, xdp_ring, + &lrstats); if (xdp_res) { if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) xdp_xmit |= xdp_res; @@ -598,6 +616,7 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget) ice_finalize_xdp_rx(xdp_ring, xdp_xmit); ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes); + xdp_update_rx_drv_stats(&rx_ring->xdp_stats->xsk_rx, &lrstats); if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { if (failure || rx_ring->next_to_clean == rx_ring->next_to_use) @@ -629,6 +648,7 @@ static bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, int budget) struct ice_tx_buf *tx_buf; if (unlikely(!ICE_DESC_UNUSED(xdp_ring))) { + xdp_update_tx_drv_full(&xdp_ring->xdp_stats->xsk_tx); xdp_ring->tx_stats.tx_busy++; work_done = false; break; @@ -686,11 +706,11 @@ ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf) */ bool ice_clean_tx_irq_zc(struct ice_tx_ring *xdp_ring, int budget) { - int total_packets = 0, total_bytes = 0; s16 ntc = xdp_ring->next_to_clean; + u32 xdp_frames = 0, xdp_bytes = 0; + u32 xsk_frames = 0, xsk_bytes = 0; struct ice_tx_desc *tx_desc; struct ice_tx_buf *tx_buf; - u32 xsk_frames = 0; bool xmit_done; tx_desc = ICE_TX_DESC(xdp_ring, ntc); @@ -702,13 +722,14 @@ bool ice_clean_tx_irq_zc(struct ice_tx_ring *xdp_ring, int budget) cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE))) break; - total_bytes += tx_buf->bytecount; - total_packets++; - if (tx_buf->raw_buf) { ice_clean_xdp_tx_buf(xdp_ring, tx_buf); tx_buf->raw_buf = NULL; + + xdp_bytes += tx_buf->bytecount; + xdp_frames++; } else { + xsk_bytes += tx_buf->bytecount; xsk_frames++; } @@ -736,7 +757,13 @@ bool ice_clean_tx_irq_zc(struct ice_tx_ring *xdp_ring, int budget) if (xsk_uses_need_wakeup(xdp_ring->xsk_pool)) xsk_set_tx_need_wakeup(xdp_ring->xsk_pool); - ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes); + ice_update_tx_ring_stats(xdp_ring, xdp_frames + xsk_frames, + xdp_bytes + xsk_bytes); + xdp_update_tx_drv_stats(&xdp_ring->xdp_stats->xdp_tx, xdp_frames, + xdp_bytes); + xdp_update_tx_drv_stats(&xdp_ring->xdp_stats->xsk_tx, xsk_frames, + xsk_bytes); + xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK); return budget > 0 && xmit_done; diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index 2d3daf022651ce..a6c5355b82fcfd 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -303,6 +303,11 @@ struct igb_rx_queue_stats { u64 alloc_failed; }; +struct igb_xdp_stats { + struct xdp_rx_drv_stats rx; + struct xdp_tx_drv_stats tx; +} ____cacheline_aligned; + struct igb_ring_container { struct igb_ring *ring; /* pointer to linked list of rings */ unsigned int total_bytes; /* total bytes processed this int */ @@ -356,6 +361,7 @@ struct igb_ring { struct u64_stats_sync rx_syncp; }; }; + struct igb_xdp_stats *xdp_stats; struct xdp_rxq_info xdp_rxq; } ____cacheline_internodealigned_in_smp; @@ -531,6 +537,8 @@ struct igb_mac_addr { #define IGB_MAC_STATE_SRC_ADDR 0x4 #define IGB_MAC_STATE_QUEUE_STEERING 0x8 +#define IGB_MAX_ALLOC_QUEUES 16 + /* board specific private data structure */ struct igb_adapter { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; @@ -554,11 +562,11 @@ struct igb_adapter { u16 tx_work_limit; u32 tx_timeout_count; int num_tx_queues; - struct igb_ring *tx_ring[16]; + struct igb_ring *tx_ring[IGB_MAX_ALLOC_QUEUES]; /* RX */ int num_rx_queues; - struct igb_ring *rx_ring[16]; + struct igb_ring *rx_ring[IGB_MAX_ALLOC_QUEUES]; u32 max_frame_size; u32 min_frame_size; @@ -664,6 +672,8 @@ struct igb_adapter { struct igb_mac_addr *mac_table; struct vf_mac_filter vf_macs; struct vf_mac_filter *vf_mac_list; + + struct igb_xdp_stats *xdp_stats; }; /* flags controlling PTP/1588 function */ diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 18a019a4718221..c4e1ea9bc4a82a 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1266,6 +1266,7 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, u64_stats_init(&ring->tx_syncp); u64_stats_init(&ring->tx_syncp2); + ring->xdp_stats = adapter->xdp_stats + txr_idx; /* assign ring to adapter */ adapter->tx_ring[txr_idx] = ring; @@ -1300,6 +1301,7 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter, ring->queue_index = rxr_idx; u64_stats_init(&ring->rx_syncp); + ring->xdp_stats = adapter->xdp_stats + rxr_idx; /* assign ring to adapter */ adapter->rx_ring[rxr_idx] = ring; @@ -2973,6 +2975,9 @@ static int igb_xdp_xmit(struct net_device *dev, int n, nxmit++; } + if (unlikely(nxmit < n)) + xdp_update_tx_drv_err(&tx_ring->xdp_stats->tx, n - nxmit); + __netif_tx_unlock(nq); if (unlikely(flags & XDP_XMIT_FLUSH)) @@ -2981,6 +2986,42 @@ static int igb_xdp_xmit(struct net_device *dev, int n, return nxmit; } +static int igb_get_xdp_stats_nch(const struct net_device *dev, u32 attr_id) +{ + switch (attr_id) { + case IFLA_XDP_XSTATS_TYPE_XDP: + return IGB_MAX_ALLOC_QUEUES; + default: + return -EOPNOTSUPP; + } +} + +static int igb_get_xdp_stats(const struct net_device *dev, u32 attr_id, + void *attr_data) +{ + const struct igb_adapter *adapter = netdev_priv(dev); + const struct igb_xdp_stats *drv_iter = adapter->xdp_stats; + struct ifla_xdp_stats *iter = attr_data; + u32 i; + + switch (attr_id) { + case IFLA_XDP_XSTATS_TYPE_XDP: + break; + default: + return -EOPNOTSUPP; + } + + for (i = 0; i < IGB_MAX_ALLOC_QUEUES; i++) { + xdp_fetch_rx_drv_stats(iter, &drv_iter->rx); + xdp_fetch_tx_drv_stats(iter, &drv_iter->tx); + + drv_iter++; + iter++; + } + + return 0; +} + static const struct net_device_ops igb_netdev_ops = { .ndo_open = igb_open, .ndo_stop = igb_close, @@ -3007,6 +3048,8 @@ static const struct net_device_ops igb_netdev_ops = { .ndo_setup_tc = igb_setup_tc, .ndo_bpf = igb_xdp, .ndo_xdp_xmit = igb_xdp_xmit, + .ndo_get_xdp_stats_nch = igb_get_xdp_stats_nch, + .ndo_get_xdp_stats = igb_get_xdp_stats, }; /** @@ -3620,6 +3663,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (hw->flash_address) iounmap(hw->flash_address); err_sw_init: + kfree(adapter->xdp_stats); kfree(adapter->mac_table); kfree(adapter->shadow_vfta); igb_clear_interrupt_scheme(adapter); @@ -3833,6 +3877,7 @@ static void igb_remove(struct pci_dev *pdev) iounmap(hw->flash_address); pci_release_mem_regions(pdev); + kfree(adapter->xdp_stats); kfree(adapter->mac_table); kfree(adapter->shadow_vfta); free_netdev(netdev); @@ -3962,6 +4007,7 @@ static int igb_sw_init(struct igb_adapter *adapter) struct e1000_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; + u32 i; pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); @@ -4019,6 +4065,19 @@ static int igb_sw_init(struct igb_adapter *adapter) if (!adapter->shadow_vfta) return -ENOMEM; + adapter->xdp_stats = kcalloc(IGB_MAX_ALLOC_QUEUES, + sizeof(*adapter->xdp_stats), + GFP_KERNEL); + if (!adapter->xdp_stats) + return -ENOMEM; + + for (i = 0; i < IGB_MAX_ALLOC_QUEUES; i++) { + struct igb_xdp_stats *xdp_stats = adapter->xdp_stats + i; + + xdp_init_rx_drv_stats(&xdp_stats->rx); + xdp_init_tx_drv_stats(&xdp_stats->tx); + } + /* This call may decrease the number of queues */ if (igb_init_interrupt_scheme(adapter, true)) { dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); @@ -6264,8 +6323,10 @@ int igb_xmit_xdp_ring(struct igb_adapter *adapter, len = xdpf->len; - if (unlikely(!igb_desc_unused(tx_ring))) + if (unlikely(!igb_desc_unused(tx_ring))) { + xdp_update_tx_drv_full(&tx_ring->xdp_stats->tx); return IGB_XDP_CONSUMED; + } dma = dma_map_single(tx_ring->dev, xdpf->data, len, DMA_TO_DEVICE); if (dma_mapping_error(tx_ring->dev, dma)) @@ -8045,6 +8106,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) unsigned int total_bytes = 0, total_packets = 0; unsigned int budget = q_vector->tx.work_limit; unsigned int i = tx_ring->next_to_clean; + u32 xdp_packets = 0, xdp_bytes = 0; if (test_bit(__IGB_DOWN, &adapter->state)) return true; @@ -8075,10 +8137,13 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) total_packets += tx_buffer->gso_segs; /* free the skb */ - if (tx_buffer->type == IGB_TYPE_SKB) + if (tx_buffer->type == IGB_TYPE_SKB) { napi_consume_skb(tx_buffer->skb, napi_budget); - else + } else { xdp_return_frame(tx_buffer->xdpf); + xdp_bytes += tx_buffer->bytecount; + xdp_packets++; + } /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -8135,6 +8200,8 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget) tx_ring->tx_stats.bytes += total_bytes; tx_ring->tx_stats.packets += total_packets; u64_stats_update_end(&tx_ring->tx_syncp); + xdp_update_tx_drv_stats(&tx_ring->xdp_stats->tx, xdp_packets, + xdp_bytes); q_vector->tx.total_bytes += total_bytes; q_vector->tx.total_packets += total_packets; @@ -8393,7 +8460,8 @@ static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring, static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter, struct igb_ring *rx_ring, - struct xdp_buff *xdp) + struct xdp_buff *xdp, + struct xdp_rx_drv_stats_local *lrstats) { int err, result = IGB_XDP_PASS; struct bpf_prog *xdp_prog; @@ -8404,32 +8472,46 @@ static struct sk_buff *igb_run_xdp(struct igb_adapter *adapter, if (!xdp_prog) goto xdp_out; + lrstats->bytes += xdp->data_end - xdp->data; + lrstats->packets++; + prefetchw(xdp->data_hard_start); /* xdp_frame write */ act = bpf_prog_run_xdp(xdp_prog, xdp); switch (act) { case XDP_PASS: + lrstats->pass++; break; case XDP_TX: result = igb_xdp_xmit_back(adapter, xdp); - if (result == IGB_XDP_CONSUMED) + if (result == IGB_XDP_CONSUMED) { + lrstats->tx_errors++; goto out_failure; + } + lrstats->tx++; break; case XDP_REDIRECT: err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); - if (err) + if (err) { + lrstats->redirect_errors++; goto out_failure; + } result = IGB_XDP_REDIR; + lrstats->redirect++; break; default: bpf_warn_invalid_xdp_action(act); - fallthrough; + lrstats->invalid++; + goto out_failure; case XDP_ABORTED: + lrstats->aborted++; out_failure: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); - fallthrough; + result = IGB_XDP_CONSUMED; + break; case XDP_DROP: result = IGB_XDP_CONSUMED; + lrstats->drop++; break; } xdp_out: @@ -8677,6 +8759,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) { struct igb_adapter *adapter = q_vector->adapter; struct igb_ring *rx_ring = q_vector->rx.ring; + struct xdp_rx_drv_stats_local lrstats = { }; struct sk_buff *skb = rx_ring->skb; unsigned int total_bytes = 0, total_packets = 0; u16 cleaned_count = igb_desc_unused(rx_ring); @@ -8740,7 +8823,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) /* At larger PAGE_SIZE, frame_sz depend on len size */ xdp.frame_sz = igb_rx_frame_truesize(rx_ring, size); #endif - skb = igb_run_xdp(adapter, rx_ring, &xdp); + skb = igb_run_xdp(adapter, rx_ring, &xdp, &lrstats); } if (IS_ERR(skb)) { @@ -8814,6 +8897,7 @@ static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget) rx_ring->rx_stats.packets += total_packets; rx_ring->rx_stats.bytes += total_bytes; u64_stats_update_end(&rx_ring->rx_syncp); + xdp_update_rx_drv_stats(&rx_ring->xdp_stats->rx, &lrstats); q_vector->rx.total_packets += total_packets; q_vector->rx.total_bytes += total_bytes; diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 3e386c38d016c4..ec46134227ee50 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -21,6 +21,8 @@ void igc_ethtool_set_ops(struct net_device *); /* Transmit and receive queues */ #define IGC_MAX_RX_QUEUES 4 #define IGC_MAX_TX_QUEUES 4 +#define IGC_MAX_QUEUES max(IGC_MAX_RX_QUEUES, \ + IGC_MAX_TX_QUEUES) #define MAX_Q_VECTORS 8 #define MAX_STD_JUMBO_FRAME_SIZE 9216 @@ -125,6 +127,7 @@ struct igc_ring { struct sk_buff *skb; }; }; + struct xdp_drv_stats *xdp_stats; struct xdp_rxq_info xdp_rxq; struct xsk_buff_pool *xsk_pool; diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 8e448288ee2657..2ffe4b2bfde7a1 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -2148,8 +2148,10 @@ static int igc_xdp_init_tx_descriptor(struct igc_ring *ring, u32 cmd_type, olinfo_status; int err; - if (!igc_desc_unused(ring)) + if (!igc_desc_unused(ring)) { + xdp_update_tx_drv_full(&ring->xdp_stats->xdp_tx); return -EBUSY; + } buffer = &ring->tx_buffer_info[ring->next_to_use]; err = igc_xdp_init_tx_buffer(buffer, xdpf, ring); @@ -2214,36 +2216,51 @@ static int igc_xdp_xmit_back(struct igc_adapter *adapter, struct xdp_buff *xdp) /* This function assumes rcu_read_lock() is held by the caller. */ static int __igc_xdp_run_prog(struct igc_adapter *adapter, struct bpf_prog *prog, - struct xdp_buff *xdp) + struct xdp_buff *xdp, + struct xdp_rx_drv_stats_local *lrstats) { - u32 act = bpf_prog_run_xdp(prog, xdp); + u32 act; + + lrstats->bytes += xdp->data_end - xdp->data; + lrstats->packets++; + act = bpf_prog_run_xdp(prog, xdp); switch (act) { case XDP_PASS: + lrstats->pass++; return IGC_XDP_PASS; case XDP_TX: - if (igc_xdp_xmit_back(adapter, xdp) < 0) + if (igc_xdp_xmit_back(adapter, xdp) < 0) { + lrstats->tx_errors++; goto out_failure; + } + lrstats->tx++; return IGC_XDP_TX; case XDP_REDIRECT: - if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0) + if (xdp_do_redirect(adapter->netdev, xdp, prog) < 0) { + lrstats->redirect_errors++; goto out_failure; + } + lrstats->redirect++; return IGC_XDP_REDIRECT; - break; default: bpf_warn_invalid_xdp_action(act); - fallthrough; + lrstats->invalid++; + goto out_failure; case XDP_ABORTED: + lrstats->aborted++; out_failure: trace_xdp_exception(adapter->netdev, prog, act); - fallthrough; + return IGC_XDP_CONSUMED; case XDP_DROP: + lrstats->drop++; return IGC_XDP_CONSUMED; } } static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter, - struct xdp_buff *xdp) + struct xdp_buff *xdp, + struct xdp_rx_drv_stats_local *lrstats) { struct bpf_prog *prog; int res; @@ -2254,7 +2271,7 @@ static struct sk_buff *igc_xdp_run_prog(struct igc_adapter *adapter, goto out; } - res = __igc_xdp_run_prog(adapter, prog, xdp); + res = __igc_xdp_run_prog(adapter, prog, xdp, lrstats); out: return ERR_PTR(-res); @@ -2309,6 +2326,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) unsigned int total_bytes = 0, total_packets = 0; struct igc_adapter *adapter = q_vector->adapter; struct igc_ring *rx_ring = q_vector->rx.ring; + struct xdp_rx_drv_stats_local lrstats = { }; struct sk_buff *skb = rx_ring->skb; u16 cleaned_count = igc_desc_unused(rx_ring); int xdp_status = 0, rx_buffer_pgcnt; @@ -2356,7 +2374,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring), igc_rx_offset(rx_ring) + pkt_offset, size, false); - skb = igc_xdp_run_prog(adapter, &xdp); + skb = igc_xdp_run_prog(adapter, &xdp, &lrstats); } if (IS_ERR(skb)) { @@ -2425,6 +2443,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget) rx_ring->skb = skb; igc_update_rx_stats(q_vector, total_packets, total_bytes); + xdp_update_rx_drv_stats(&rx_ring->xdp_stats->xdp_rx, &lrstats); if (cleaned_count) igc_alloc_rx_buffers(rx_ring, cleaned_count); @@ -2481,6 +2500,7 @@ static void igc_dispatch_skb_zc(struct igc_q_vector *q_vector, static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget) { struct igc_adapter *adapter = q_vector->adapter; + struct xdp_rx_drv_stats_local lrstats = { }; struct igc_ring *ring = q_vector->rx.ring; u16 cleaned_count = igc_desc_unused(ring); int total_bytes = 0, total_packets = 0; @@ -2529,7 +2549,7 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget) bi->xdp->data_end = bi->xdp->data + size; xsk_buff_dma_sync_for_cpu(bi->xdp, ring->xsk_pool); - res = __igc_xdp_run_prog(adapter, prog, bi->xdp); + res = __igc_xdp_run_prog(adapter, prog, bi->xdp, &lrstats); switch (res) { case IGC_XDP_PASS: igc_dispatch_skb_zc(q_vector, desc, bi->xdp, timestamp); @@ -2562,6 +2582,7 @@ static int igc_clean_rx_irq_zc(struct igc_q_vector *q_vector, const int budget) igc_finalize_xdp(adapter, xdp_status); igc_update_rx_stats(q_vector, total_packets, total_bytes); + xdp_update_rx_drv_stats(&ring->xdp_stats->xsk_rx, &lrstats); if (xsk_uses_need_wakeup(ring->xsk_pool)) { if (failure || ring->next_to_clean == ring->next_to_use) @@ -2604,6 +2625,10 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring) __netif_tx_lock(nq, cpu); budget = igc_desc_unused(ring); + if (unlikely(!budget)) { + xdp_update_tx_drv_full(&ring->xdp_stats->xsk_tx); + goto out_unlock; + } while (xsk_tx_peek_desc(pool, &xdp_desc) && budget--) { u32 cmd_type, olinfo_status; @@ -2644,6 +2669,7 @@ static void igc_xdp_xmit_zc(struct igc_ring *ring) xsk_tx_release(pool); } +out_unlock: __netif_tx_unlock(nq); } @@ -2661,9 +2687,10 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) unsigned int budget = q_vector->tx.work_limit; struct igc_ring *tx_ring = q_vector->tx.ring; unsigned int i = tx_ring->next_to_clean; + u32 xdp_frames = 0, xdp_bytes = 0; + u32 xsk_frames = 0, xsk_bytes = 0; struct igc_tx_buffer *tx_buffer; union igc_adv_tx_desc *tx_desc; - u32 xsk_frames = 0; if (test_bit(__IGC_DOWN, &adapter->state)) return true; @@ -2695,11 +2722,14 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) switch (tx_buffer->type) { case IGC_TX_BUFFER_TYPE_XSK: + xsk_bytes += tx_buffer->bytecount; xsk_frames++; break; case IGC_TX_BUFFER_TYPE_XDP: xdp_return_frame(tx_buffer->xdpf); igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); + xdp_bytes += tx_buffer->bytecount; + xdp_frames++; break; case IGC_TX_BUFFER_TYPE_SKB: napi_consume_skb(tx_buffer->skb, napi_budget); @@ -2750,6 +2780,10 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) tx_ring->next_to_clean = i; igc_update_tx_stats(q_vector, total_packets, total_bytes); + xdp_update_tx_drv_stats(&tx_ring->xdp_stats->xdp_tx, xdp_frames, + xdp_bytes); + xdp_update_tx_drv_stats(&tx_ring->xdp_stats->xsk_tx, xsk_frames, + xsk_bytes); if (tx_ring->xsk_pool) { if (xsk_frames) @@ -4382,6 +4416,8 @@ static int igc_alloc_q_vector(struct igc_adapter *adapter, ring->count = adapter->tx_ring_count; ring->queue_index = txr_idx; + ring->xdp_stats = adapter->netdev->xstats + txr_idx; + /* assign ring to adapter */ adapter->tx_ring[txr_idx] = ring; @@ -4404,6 +4440,8 @@ static int igc_alloc_q_vector(struct igc_adapter *adapter, ring->count = adapter->rx_ring_count; ring->queue_index = rxr_idx; + ring->xdp_stats = adapter->netdev->xstats + rxr_idx; + /* assign ring to adapter */ adapter->rx_ring[rxr_idx] = ring; } @@ -4512,6 +4550,7 @@ static int igc_sw_init(struct igc_adapter *adapter) struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; struct igc_hw *hw = &adapter->hw; + u32 i; pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); @@ -4541,6 +4580,14 @@ static int igc_sw_init(struct igc_adapter *adapter) igc_init_queue_configuration(adapter); + netdev->xstats = kcalloc(IGC_MAX_QUEUES, sizeof(*netdev->xstats), + GFP_KERNEL); + if (!netdev->xstats) + return -ENOMEM; + + for (i = 0; i < IGC_MAX_QUEUES; i++) + xdp_init_drv_stats(netdev->xstats + i); + /* This call may decrease the number of queues */ if (igc_init_interrupt_scheme(adapter, true)) { netdev_err(netdev, "Unable to allocate memory for queues\n"); @@ -6043,11 +6090,25 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames, if (flags & XDP_XMIT_FLUSH) igc_flush_tx_descriptors(ring); + if (unlikely(drops)) + xdp_update_tx_drv_err(&ring->xdp_stats->xdp_tx, drops); + __netif_tx_unlock(nq); return num_frames - drops; } +static int igc_get_xdp_stats_nch(const struct net_device *dev, u32 attr_id) +{ + switch (attr_id) { + case IFLA_XDP_XSTATS_TYPE_XDP: + case IFLA_XDP_XSTATS_TYPE_XSK: + return IGC_MAX_QUEUES; + default: + return -EOPNOTSUPP; + } +} + static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter, struct igc_q_vector *q_vector) { @@ -6093,6 +6154,8 @@ static const struct net_device_ops igc_netdev_ops = { .ndo_set_mac_address = igc_set_mac, .ndo_change_mtu = igc_change_mtu, .ndo_get_stats64 = igc_get_stats64, + .ndo_get_xdp_stats_nch = igc_get_xdp_stats_nch, + .ndo_get_xdp_stats = xdp_get_drv_stats_generic, .ndo_fix_features = igc_fix_features, .ndo_set_features = igc_set_features, .ndo_features_check = igc_features_check, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 4a69823e6abd6b..d607946369257d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -349,6 +349,7 @@ struct ixgbe_ring { struct ixgbe_tx_queue_stats tx_stats; struct ixgbe_rx_queue_stats rx_stats; }; + struct xdp_drv_stats *xdp_stats; u16 rx_offset; struct xdp_rxq_info xdp_rxq; spinlock_t tx_lock; /* used in XDP mode */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c index 86b11164655e21..c146963adbd5d1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c @@ -951,6 +951,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, ring->queue_index = xdp_idx; set_ring_xdp(ring); spin_lock_init(&ring->tx_lock); + ring->xdp_stats = adapter->netdev->xstats + xdp_idx; /* assign ring to adapter */ WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring); @@ -994,6 +995,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, /* apply Rx specific ring traits */ ring->count = adapter->rx_ring_count; ring->queue_index = rxr_idx; + ring->xdp_stats = adapter->netdev->xstats + rxr_idx; /* assign ring to adapter */ WRITE_ONCE(adapter->rx_ring[rxr_idx], ring); @@ -1303,4 +1305,3 @@ void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens, context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); } - diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 0f9f022260d70f..d1cfd7d6a72bb9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1246,8 +1246,11 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, return true; } - if (ring_is_xdp(tx_ring)) + if (ring_is_xdp(tx_ring)) { + xdp_update_tx_drv_stats(&tx_ring->xdp_stats->xdp_tx, + total_packets, total_bytes); return !!budget; + } netdev_tx_completed_queue(txring_txq(tx_ring), total_packets, total_bytes); @@ -2196,7 +2199,8 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring, - struct xdp_buff *xdp) + struct xdp_buff *xdp, + struct xdp_rx_drv_stats_local *lrstats) { int err, result = IXGBE_XDP_PASS; struct bpf_prog *xdp_prog; @@ -2209,40 +2213,57 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter, if (!xdp_prog) goto xdp_out; + lrstats->bytes += xdp->data_end - xdp->data; + lrstats->packets++; + prefetchw(xdp->data_hard_start); /* xdp_frame write */ act = bpf_prog_run_xdp(xdp_prog, xdp); switch (act) { case XDP_PASS: + lrstats->pass++; break; case XDP_TX: xdpf = xdp_convert_buff_to_frame(xdp); - if (unlikely(!xdpf)) + if (unlikely(!xdpf)) { + lrstats->tx_errors++; goto out_failure; + } ring = ixgbe_determine_xdp_ring(adapter); if (static_branch_unlikely(&ixgbe_xdp_locking_key)) spin_lock(&ring->tx_lock); result = ixgbe_xmit_xdp_ring(ring, xdpf); if (static_branch_unlikely(&ixgbe_xdp_locking_key)) spin_unlock(&ring->tx_lock); - if (result == IXGBE_XDP_CONSUMED) + if (result == IXGBE_XDP_CONSUMED) { + lrstats->tx_errors++; goto out_failure; + } + lrstats->tx++; break; case XDP_REDIRECT: err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog); - if (err) + if (err) { + lrstats->redirect_errors++; goto out_failure; + } result = IXGBE_XDP_REDIR; + lrstats->redirect++; break; default: bpf_warn_invalid_xdp_action(act); - fallthrough; + lrstats->invalid++; + goto out_failure; case XDP_ABORTED: + lrstats->aborted++; out_failure: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); - fallthrough; /* handle aborts by dropping packet */ + /* handle aborts by dropping packet */ + result = IXGBE_XDP_CONSUMED; + break; case XDP_DROP: result = IXGBE_XDP_CONSUMED; + lrstats->drop++; break; } xdp_out: @@ -2301,6 +2322,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, unsigned int mss = 0; #endif /* IXGBE_FCOE */ u16 cleaned_count = ixgbe_desc_unused(rx_ring); + struct xdp_rx_drv_stats_local lrstats = { }; unsigned int offset = rx_ring->rx_offset; unsigned int xdp_xmit = 0; struct xdp_buff xdp; @@ -2348,7 +2370,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, /* At larger PAGE_SIZE, frame_sz depend on len size */ xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size); #endif - skb = ixgbe_run_xdp(adapter, rx_ring, &xdp); + skb = ixgbe_run_xdp(adapter, rx_ring, &xdp, &lrstats); } if (IS_ERR(skb)) { @@ -2440,6 +2462,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, rx_ring->stats.packets += total_rx_packets; rx_ring->stats.bytes += total_rx_bytes; u64_stats_update_end(&rx_ring->syncp); + xdp_update_rx_drv_stats(&rx_ring->xdp_stats->xdp_rx, &lrstats); q_vector->rx.total_packets += total_rx_packets; q_vector->rx.total_bytes += total_rx_bytes; @@ -8552,8 +8575,10 @@ int ixgbe_xmit_xdp_ring(struct ixgbe_ring *ring, len = xdpf->len; - if (unlikely(!ixgbe_desc_unused(ring))) + if (unlikely(!ixgbe_desc_unused(ring))) { + xdp_update_tx_drv_full(&ring->xdp_stats->xdp_tx); return IXGBE_XDP_CONSUMED; + } dma = dma_map_single(ring->dev, xdpf->data, len, DMA_TO_DEVICE); if (dma_mapping_error(ring->dev, dma)) @@ -10257,12 +10282,26 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n, if (unlikely(flags & XDP_XMIT_FLUSH)) ixgbe_xdp_ring_update_tail(ring); + if (unlikely(nxmit < n)) + xdp_update_tx_drv_err(&ring->xdp_stats->xdp_tx, n - nxmit); + if (static_branch_unlikely(&ixgbe_xdp_locking_key)) spin_unlock(&ring->tx_lock); return nxmit; } +static int ixgbe_get_xdp_stats_nch(const struct net_device *dev, u32 attr_id) +{ + switch (attr_id) { + case IFLA_XDP_XSTATS_TYPE_XDP: + case IFLA_XDP_XSTATS_TYPE_XSK: + return IXGBE_MAX_XDP_QS; + default: + return -EOPNOTSUPP; + } +} + static const struct net_device_ops ixgbe_netdev_ops = { .ndo_open = ixgbe_open, .ndo_stop = ixgbe_close, @@ -10306,6 +10345,8 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_bpf = ixgbe_xdp, .ndo_xdp_xmit = ixgbe_xdp_xmit, .ndo_xsk_wakeup = ixgbe_xsk_wakeup, + .ndo_get_xdp_stats_nch = ixgbe_get_xdp_stats_nch, + .ndo_get_xdp_stats = xdp_get_drv_stats_generic, }; static void ixgbe_disable_txr_hw(struct ixgbe_adapter *adapter, @@ -10712,6 +10753,16 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->watchdog_timeo = 5 * HZ; strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name)); + netdev->xstats = devm_kcalloc(&pdev->dev, IXGBE_MAX_XDP_QS, + sizeof(*netdev->xstats), GFP_KERNEL); + if (!netdev->xstats) { + err = -ENOMEM; + goto err_ioremap; + } + + for (i = 0; i < IXGBE_MAX_XDP_QS; i++) + xdp_init_drv_stats(netdev->xstats + i); + /* Setup hw api */ hw->mac.ops = *ii->mac_ops; hw->mac.type = ii->mac; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c index db2bc58dfcfd0f..47c4b4621ab191 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c @@ -96,7 +96,8 @@ int ixgbe_xsk_pool_setup(struct ixgbe_adapter *adapter, static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, struct ixgbe_ring *rx_ring, - struct xdp_buff *xdp) + struct xdp_buff *xdp, + struct xdp_rx_drv_stats_local *lrstats) { int err, result = IXGBE_XDP_PASS; struct bpf_prog *xdp_prog; @@ -104,41 +105,58 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter, struct xdp_frame *xdpf; u32 act; + lrstats->bytes += xdp->data_end - xdp->data; + lrstats->packets++; + xdp_prog = READ_ONCE(rx_ring->xdp_prog); act = bpf_prog_run_xdp(xdp_prog, xdp); if (likely(act == XDP_REDIRECT)) { err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); - if (err) + if (err) { + lrstats->redirect_errors++; goto out_failure; + } + lrstats->redirect++; return IXGBE_XDP_REDIR; } switch (act) { case XDP_PASS: + lrstats->pass++; break; case XDP_TX: xdpf = xdp_convert_buff_to_frame(xdp); - if (unlikely(!xdpf)) + if (unlikely(!xdpf)) { + lrstats->tx_errors++; goto out_failure; + } ring = ixgbe_determine_xdp_ring(adapter); if (static_branch_unlikely(&ixgbe_xdp_locking_key)) spin_lock(&ring->tx_lock); result = ixgbe_xmit_xdp_ring(ring, xdpf); if (static_branch_unlikely(&ixgbe_xdp_locking_key)) spin_unlock(&ring->tx_lock); - if (result == IXGBE_XDP_CONSUMED) + if (result == IXGBE_XDP_CONSUMED) { + lrstats->tx_errors++; goto out_failure; + } + lrstats->tx++; break; default: bpf_warn_invalid_xdp_action(act); - fallthrough; + lrstats->invalid++; + goto out_failure; case XDP_ABORTED: + lrstats->aborted++; out_failure: trace_xdp_exception(rx_ring->netdev, xdp_prog, act); - fallthrough; /* handle aborts by dropping packet */ + /* handle aborts by dropping packet */ + result = IXGBE_XDP_CONSUMED; + break; case XDP_DROP: result = IXGBE_XDP_CONSUMED; + lrstats->drop++; break; } return result; @@ -246,6 +264,7 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, unsigned int total_rx_bytes = 0, total_rx_packets = 0; struct ixgbe_adapter *adapter = q_vector->adapter; u16 cleaned_count = ixgbe_desc_unused(rx_ring); + struct xdp_rx_drv_stats_local lrstats = { }; unsigned int xdp_res, xdp_xmit = 0; bool failure = false; struct sk_buff *skb; @@ -299,7 +318,8 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, bi->xdp->data_end = bi->xdp->data + size; xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool); - xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp); + xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp, + &lrstats); if (xdp_res) { if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) @@ -349,6 +369,7 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector, rx_ring->stats.packets += total_rx_packets; rx_ring->stats.bytes += total_rx_bytes; u64_stats_update_end(&rx_ring->syncp); + xdp_update_rx_drv_stats(&rx_ring->xdp_stats->xsk_rx, &lrstats); q_vector->rx.total_packets += total_rx_packets; q_vector->rx.total_bytes += total_rx_bytes; @@ -392,6 +413,7 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) while (budget-- > 0) { if (unlikely(!ixgbe_desc_unused(xdp_ring)) || !netif_carrier_ok(xdp_ring->netdev)) { + xdp_update_tx_drv_full(&xdp_ring->xdp_stats->xsk_tx); work_done = false; break; } @@ -448,9 +470,10 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; unsigned int total_packets = 0, total_bytes = 0; struct xsk_buff_pool *pool = tx_ring->xsk_pool; + u32 xdp_frames = 0, xdp_bytes = 0; + u32 xsk_frames = 0, xsk_bytes = 0; union ixgbe_adv_tx_desc *tx_desc; struct ixgbe_tx_buffer *tx_bi; - u32 xsk_frames = 0; tx_bi = &tx_ring->tx_buffer_info[ntc]; tx_desc = IXGBE_TX_DESC(tx_ring, ntc); @@ -459,13 +482,14 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) break; - total_bytes += tx_bi->bytecount; - total_packets += tx_bi->gso_segs; - - if (tx_bi->xdpf) + if (tx_bi->xdpf) { ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi); - else + xdp_bytes += tx_bi->bytecount; + xdp_frames++; + } else { + xsk_bytes += tx_bi->bytecount; xsk_frames++; + } tx_bi->xdpf = NULL; @@ -483,11 +507,17 @@ bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, } tx_ring->next_to_clean = ntc; + total_bytes = xdp_bytes + xsk_bytes; + total_packets = xdp_frames + xsk_frames; u64_stats_update_begin(&tx_ring->syncp); tx_ring->stats.bytes += total_bytes; tx_ring->stats.packets += total_packets; u64_stats_update_end(&tx_ring->syncp); + xdp_update_tx_drv_stats(&tx_ring->xdp_stats->xdp_tx, xdp_frames, + xdp_bytes); + xdp_update_tx_drv_stats(&tx_ring->xdp_stats->xsk_tx, xsk_frames, + xsk_bytes); q_vector->tx.total_bytes += total_bytes; q_vector->tx.total_packets += total_packets; diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 80e4b500695e6f..5bb0bbfa1ee6c0 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -802,6 +802,59 @@ mvneta_get_stats64(struct net_device *dev, stats->tx_dropped = dev->stats.tx_dropped; } +static int mvneta_get_xdp_stats(const struct net_device *dev, u32 attr_id, + void *attr_data) +{ + const struct mvneta_port *pp = netdev_priv(dev); + struct ifla_xdp_stats *xdp_stats = attr_data; + u32 cpu; + + switch (attr_id) { + case IFLA_XDP_XSTATS_TYPE_XDP: + break; + default: + return -EOPNOTSUPP; + } + + for_each_possible_cpu(cpu) { + const struct mvneta_pcpu_stats *stats; + const struct mvneta_stats *ps; + u64 xdp_xmit_err; + u64 xdp_redirect; + u64 xdp_tx_err; + u64 xdp_pass; + u64 xdp_drop; + u64 xdp_xmit; + u64 xdp_tx; + u32 start; + + stats = per_cpu_ptr(pp->stats, cpu); + ps = &stats->es.ps; + + do { + start = u64_stats_fetch_begin_irq(&stats->syncp); + + xdp_drop = ps->xdp_drop; + xdp_pass = ps->xdp_pass; + xdp_redirect = ps->xdp_redirect; + xdp_tx = ps->xdp_tx; + xdp_tx_err = ps->xdp_tx_err; + xdp_xmit = ps->xdp_xmit; + xdp_xmit_err = ps->xdp_xmit_err; + } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + + xdp_stats->drop += xdp_drop; + xdp_stats->pass += xdp_pass; + xdp_stats->redirect += xdp_redirect; + xdp_stats->tx += xdp_tx; + xdp_stats->tx_errors += xdp_tx_err; + xdp_stats->xmit_packets += xdp_xmit; + xdp_stats->xmit_errors += xdp_xmit_err; + } + + return 0; +} + /* Rx descriptors helper methods */ /* Checks whether the RX descriptor having this status is both the first @@ -4949,18 +5002,19 @@ static int mvneta_setup_tc(struct net_device *dev, enum tc_setup_type type, } static const struct net_device_ops mvneta_netdev_ops = { - .ndo_open = mvneta_open, - .ndo_stop = mvneta_stop, - .ndo_start_xmit = mvneta_tx, - .ndo_set_rx_mode = mvneta_set_rx_mode, - .ndo_set_mac_address = mvneta_set_mac_addr, - .ndo_change_mtu = mvneta_change_mtu, - .ndo_fix_features = mvneta_fix_features, - .ndo_get_stats64 = mvneta_get_stats64, - .ndo_eth_ioctl = mvneta_ioctl, - .ndo_bpf = mvneta_xdp, - .ndo_xdp_xmit = mvneta_xdp_xmit, - .ndo_setup_tc = mvneta_setup_tc, + .ndo_open = mvneta_open, + .ndo_stop = mvneta_stop, + .ndo_start_xmit = mvneta_tx, + .ndo_set_rx_mode = mvneta_set_rx_mode, + .ndo_set_mac_address = mvneta_set_mac_addr, + .ndo_change_mtu = mvneta_change_mtu, + .ndo_fix_features = mvneta_fix_features, + .ndo_get_stats64 = mvneta_get_stats64, + .ndo_get_xdp_stats = mvneta_get_xdp_stats, + .ndo_eth_ioctl = mvneta_ioctl, + .ndo_bpf = mvneta_xdp, + .ndo_xdp_xmit = mvneta_xdp_xmit, + .ndo_setup_tc = mvneta_setup_tc, }; static const struct ethtool_ops mvneta_eth_tool_ops = { diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 97bd2ee8a010a7..58203cde3b60b2 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -5131,6 +5131,56 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_dropped = dev->stats.tx_dropped; } +static int mvpp2_get_xdp_stats_ndo(const struct net_device *dev, u32 attr_id, + void *attr_data) +{ + const struct mvpp2_port *port = netdev_priv(dev); + struct ifla_xdp_stats *xdp_stats = attr_data; + u32 cpu, start; + + switch (attr_id) { + case IFLA_XDP_XSTATS_TYPE_XDP: + break; + default: + return -EOPNOTSUPP; + } + + for_each_possible_cpu(cpu) { + const struct mvpp2_pcpu_stats *ps; + u64 xdp_xmit_err; + u64 xdp_redirect; + u64 xdp_tx_err; + u64 xdp_pass; + u64 xdp_drop; + u64 xdp_xmit; + u64 xdp_tx; + + ps = per_cpu_ptr(port->stats, cpu); + + do { + start = u64_stats_fetch_begin_irq(&ps->syncp); + + xdp_redirect = ps->xdp_redirect; + xdp_pass = ps->xdp_pass; + xdp_drop = ps->xdp_drop; + xdp_xmit = ps->xdp_xmit; + xdp_xmit_err = ps->xdp_xmit_err; + xdp_tx = ps->xdp_tx; + xdp_tx_err = ps->xdp_tx_err; + } while (u64_stats_fetch_retry_irq(&ps->syncp, start)); + + xdp_stats->redirect += xdp_redirect; + xdp_stats->pass += xdp_pass; + xdp_stats->drop += xdp_drop; + xdp_stats->xmit_packets += xdp_xmit; + xdp_stats->xmit_errors += xdp_xmit_err; + xdp_stats->tx += xdp_tx; + xdp_stats->tx_errors += xdp_tx_err; + } + + return 0; +} + static int mvpp2_set_ts_config(struct mvpp2_port *port, struct ifreq *ifr) { struct hwtstamp_config config; @@ -5719,6 +5769,7 @@ static const struct net_device_ops mvpp2_netdev_ops = { .ndo_set_mac_address = mvpp2_set_mac_address, .ndo_change_mtu = mvpp2_change_mtu, .ndo_get_stats64 = mvpp2_get_stats64, + .ndo_get_xdp_stats = mvpp2_get_xdp_stats_ndo, .ndo_eth_ioctl = mvpp2_ioctl, .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 48b12ee44b8d08..cc8cf3ff7d4902 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -1212,4 +1212,9 @@ int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_t int mlx5e_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi); int mlx5e_get_vf_stats(struct net_device *dev, int vf, struct ifla_vf_stats *vf_stats); #endif + +int mlx5e_get_xdp_stats_nch(const struct net_device *dev, u32 attr_id); +int mlx5e_get_xdp_stats(const struct net_device *dev, u32 attr_id, + void *attr_data); + #endif /* __MLX5_EN_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 2f0df5cc1a2d97..a9a8535c828bc5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -156,7 +156,8 @@ bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, case XDP_ABORTED: xdp_abort: trace_xdp_exception(rq->netdev, prog, act); - fallthrough; + rq->stats->xdp_errors++; + return true; case XDP_DROP: rq->stats->xdp_drop++; return true; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 65571593ec5c19..d5b3abf09c8243 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4532,6 +4532,8 @@ const struct net_device_ops mlx5e_netdev_ops = { .ndo_setup_tc = mlx5e_setup_tc, .ndo_select_queue = mlx5e_select_queue, .ndo_get_stats64 = mlx5e_get_stats, + .ndo_get_xdp_stats_nch = mlx5e_get_xdp_stats_nch, + .ndo_get_xdp_stats = mlx5e_get_xdp_stats, .ndo_set_rx_mode = mlx5e_set_rx_mode, .ndo_set_mac_address = mlx5e_set_mac, .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 3c91a11e27ad46..834457e3f19a1e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -141,6 +141,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_errors) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) }, @@ -208,6 +209,7 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_csum_none) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_ecn_mark) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_removed_vlan_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_errors) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_drop) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_xdp_redirect) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xsk_wqe_err) }, @@ -298,6 +300,7 @@ static void mlx5e_stats_grp_sw_update_stats_xskrq(struct mlx5e_sw_stats *s, s->rx_xsk_csum_none += xskrq_stats->csum_none; s->rx_xsk_ecn_mark += xskrq_stats->ecn_mark; s->rx_xsk_removed_vlan_packets += xskrq_stats->removed_vlan_packets; + s->rx_xsk_xdp_errors += xskrq_stats->xdp_errors; s->rx_xsk_xdp_drop += xskrq_stats->xdp_drop; s->rx_xsk_xdp_redirect += xskrq_stats->xdp_redirect; s->rx_xsk_wqe_err += xskrq_stats->wqe_err; @@ -331,6 +334,7 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s, s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow; s->rx_csum_unnecessary += rq_stats->csum_unnecessary; s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; + s->rx_xdp_errors += rq_stats->xdp_errors; s->rx_xdp_drop += rq_stats->xdp_drop; s->rx_xdp_redirect += rq_stats->xdp_redirect; s->rx_wqe_err += rq_stats->wqe_err; @@ -1766,6 +1770,7 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_errors) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) }, @@ -1869,6 +1874,7 @@ static const struct counter_desc xskrq_stats_desc[] = { { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, csum_none) }, { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, ecn_mark) }, { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, removed_vlan_packets) }, + { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_errors) }, { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_drop) }, { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, xdp_redirect) }, { MLX5E_DECLARE_XSKRQ_STAT(struct mlx5e_rq_stats, wqe_err) }, @@ -1940,6 +1946,7 @@ static const struct counter_desc ptp_rq_stats_desc[] = { { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary) }, { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, csum_none) }, + { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_errors) }, { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_drop) }, { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, xdp_redirect) }, { MLX5E_DECLARE_PTP_RQ_STAT(struct mlx5e_rq_stats, lro_packets) }, @@ -2285,3 +2292,72 @@ unsigned int mlx5e_nic_stats_grps_num(struct mlx5e_priv *priv) { return ARRAY_SIZE(mlx5e_nic_stats_grps); } + +int mlx5e_get_xdp_stats_nch(const struct net_device *dev, u32 attr_id) +{ + const struct mlx5e_priv *priv = netdev_priv(dev); + + switch (attr_id) { + case IFLA_XDP_XSTATS_TYPE_XDP: + return priv->max_nch; + case IFLA_XDP_XSTATS_TYPE_XSK: + return priv->xsk.ever_used ? priv->max_nch : -ENODATA; + default: + return -EOPNOTSUPP; + } +} + +int mlx5e_get_xdp_stats(const struct net_device *dev, u32 attr_id, + void *attr_data) +{ + const struct mlx5e_priv *priv = netdev_priv(dev); + struct ifla_xdp_stats *xdp_stats = attr_data; + u32 i; + + switch (attr_id) { + case IFLA_XDP_XSTATS_TYPE_XDP: + break; + case IFLA_XDP_XSTATS_TYPE_XSK: + if (!priv->xsk.ever_used) + return -ENODATA; + + break; + default: + return -EOPNOTSUPP; + } + + for (i = 0; i < priv->max_nch; i++) { + const struct mlx5e_channel_stats *cs = priv->channel_stats + i; + + switch (attr_id) { + case IFLA_XDP_XSTATS_TYPE_XDP: + /* mlx5e_rq_stats rq */ + xdp_stats->errors = cs->rq.xdp_errors; + xdp_stats->drop = cs->rq.xdp_drop; + xdp_stats->redirect = cs->rq.xdp_redirect; + /* mlx5e_xdpsq_stats rq_xdpsq */ + xdp_stats->tx = cs->rq_xdpsq.xmit; + xdp_stats->tx_errors = cs->rq_xdpsq.err + + cs->rq_xdpsq.full; + /* mlx5e_xdpsq_stats xdpsq */ + xdp_stats->xmit_packets = cs->xdpsq.xmit; + xdp_stats->xmit_errors = cs->xdpsq.err; + xdp_stats->xmit_full = cs->xdpsq.full; + break; + case IFLA_XDP_XSTATS_TYPE_XSK: + /* mlx5e_rq_stats xskrq */ + xdp_stats->errors = cs->xskrq.xdp_errors; + xdp_stats->drop = cs->xskrq.xdp_drop; + xdp_stats->redirect = cs->xskrq.xdp_redirect; + /* mlx5e_xdpsq_stats xsksq */ + xdp_stats->xmit_packets = cs->xsksq.xmit; + xdp_stats->xmit_errors = cs->xsksq.err; + xdp_stats->xmit_full = cs->xsksq.full; + break; + } + + xdp_stats++; + } + + return 0; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 2c1ed5b81be676..dd33465af0ff8b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -158,6 +158,7 @@ struct mlx5e_sw_stats { u64 rx_csum_complete_tail; u64 rx_csum_complete_tail_slow; u64 rx_csum_unnecessary_inner; + u64 rx_xdp_errors; u64 rx_xdp_drop; u64 rx_xdp_redirect; u64 rx_xdp_tx_xmit; @@ -237,6 +238,7 @@ struct mlx5e_sw_stats { u64 rx_xsk_csum_none; u64 rx_xsk_ecn_mark; u64 rx_xsk_removed_vlan_packets; + u64 rx_xsk_xdp_errors; u64 rx_xsk_xdp_drop; u64 rx_xsk_xdp_redirect; u64 rx_xsk_wqe_err; @@ -335,6 +337,7 @@ struct mlx5e_rq_stats { u64 mcast_packets; u64 ecn_mark; u64 removed_vlan_packets; + u64 xdp_errors; u64 xdp_drop; u64 xdp_redirect; u64 wqe_err; diff --git a/drivers/net/ethernet/sfc/ef100_netdev.c b/drivers/net/ethernet/sfc/ef100_netdev.c index 67fe44db6b6124..0367f7e043d861 100644 --- a/drivers/net/ethernet/sfc/ef100_netdev.c +++ b/drivers/net/ethernet/sfc/ef100_netdev.c @@ -219,6 +219,8 @@ static const struct net_device_ops ef100_netdev_ops = { .ndo_start_xmit = ef100_hard_start_xmit, .ndo_tx_timeout = efx_watchdog, .ndo_get_stats64 = efx_net_stats, + .ndo_get_xdp_stats_nch = efx_get_xdp_stats_nch, + .ndo_get_xdp_stats = efx_get_xdp_stats, .ndo_change_mtu = efx_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = efx_set_mac_address, diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index a8c252e2b25219..a6a015c4d3b4e1 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -588,6 +588,8 @@ static const struct net_device_ops efx_netdev_ops = { .ndo_open = efx_net_open, .ndo_stop = efx_net_stop, .ndo_get_stats64 = efx_net_stats, + .ndo_get_xdp_stats_nch = efx_get_xdp_stats_nch, + .ndo_get_xdp_stats = efx_get_xdp_stats, .ndo_tx_timeout = efx_watchdog, .ndo_start_xmit = efx_hard_start_xmit, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c index f187631b2c5cac..c2bf79fd66b4ee 100644 --- a/drivers/net/ethernet/sfc/efx_common.c +++ b/drivers/net/ethernet/sfc/efx_common.c @@ -606,6 +606,48 @@ void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats) spin_unlock_bh(&efx->stats_lock); } +int efx_get_xdp_stats_nch(const struct net_device *net_dev, u32 attr_id) +{ + const struct efx_nic *efx = netdev_priv(net_dev); + + switch (attr_id) { + case IFLA_XDP_XSTATS_TYPE_XDP: + return efx->n_channels; + default: + return -EOPNOTSUPP; + } +} + +int efx_get_xdp_stats(const struct net_device *net_dev, u32 attr_id, + void *attr_data) +{ + struct ifla_xdp_stats *xdp_stats = attr_data; + struct efx_nic *efx = netdev_priv(net_dev); + const struct efx_channel *channel; + + switch (attr_id) { + case IFLA_XDP_XSTATS_TYPE_XDP: + break; + default: + return -EOPNOTSUPP; + } + + spin_lock_bh(&efx->stats_lock); + + efx_for_each_channel(channel, efx) { + xdp_stats->drop = channel->n_rx_xdp_drops; + xdp_stats->errors = channel->n_rx_xdp_bad_drops; + xdp_stats->redirect = channel->n_rx_xdp_redirect; + xdp_stats->tx = channel->n_rx_xdp_tx; + + xdp_stats++; + } + + spin_unlock_bh(&efx->stats_lock); + + return 0; +} + /* Push loopback/power/transmit disable settings to the PHY, and reconfigure * the MAC appropriately. All other PHY configuration changes are pushed * through phy_op->set_settings(), and pushed asynchronously to the MAC diff --git a/drivers/net/ethernet/sfc/efx_common.h b/drivers/net/ethernet/sfc/efx_common.h index 65513fd0cf6c46..987d7c6608a255 100644 --- a/drivers/net/ethernet/sfc/efx_common.h +++ b/drivers/net/ethernet/sfc/efx_common.h @@ -32,6 +32,9 @@ void efx_start_all(struct efx_nic *efx); void efx_stop_all(struct efx_nic *efx); void efx_net_stats(struct net_device *net_dev, struct rtnl_link_stats64 *stats); +int efx_get_xdp_stats_nch(const struct net_device *net_dev, u32 attr_id); +int efx_get_xdp_stats(const struct net_device *net_dev, u32 attr_id, + void *attr_data); int efx_create_reset_workqueue(void); void efx_queue_reset_work(struct efx_nic *efx); diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 5ca0a899101d40..c12209fbd1bd5a 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -38,11 +38,12 @@ #define VETH_XDP_BATCH 16 struct veth_stats { + u64 packets; + u64 bytes; u64 rx_drops; /* xdp */ - u64 xdp_packets; - u64 xdp_bytes; u64 xdp_redirect; + u64 xdp_errors; u64 xdp_drops; u64 xdp_tx; u64 xdp_tx_err; @@ -92,10 +93,11 @@ struct veth_q_stat_desc { #define VETH_RQ_STAT(m) offsetof(struct veth_stats, m) static const struct veth_q_stat_desc veth_rq_stats_desc[] = { - { "xdp_packets", VETH_RQ_STAT(xdp_packets) }, - { "xdp_bytes", VETH_RQ_STAT(xdp_bytes) }, + { "packets", VETH_RQ_STAT(packets) }, + { "bytes", VETH_RQ_STAT(bytes) }, { "drops", VETH_RQ_STAT(rx_drops) }, { "xdp_redirect", VETH_RQ_STAT(xdp_redirect) }, + { "xdp_errors", VETH_RQ_STAT(xdp_errors) }, { "xdp_drops", VETH_RQ_STAT(xdp_drops) }, { "xdp_tx", VETH_RQ_STAT(xdp_tx) }, { "xdp_tx_errors", VETH_RQ_STAT(xdp_tx_err) }, @@ -376,9 +378,9 @@ static void veth_stats_rx(struct veth_stats *result, struct net_device *dev) int i; result->peer_tq_xdp_xmit_err = 0; - result->xdp_packets = 0; + result->packets = 0; result->xdp_tx_err = 0; - result->xdp_bytes = 0; + result->bytes = 0; result->rx_drops = 0; for (i = 0; i < dev->num_rx_queues; i++) { u64 packets, bytes, drops, xdp_tx_err, peer_tq_xdp_xmit_err; @@ -389,14 +391,14 @@ static void veth_stats_rx(struct veth_stats *result, struct net_device *dev) start = u64_stats_fetch_begin_irq(&stats->syncp); peer_tq_xdp_xmit_err = stats->vs.peer_tq_xdp_xmit_err; xdp_tx_err = stats->vs.xdp_tx_err; - packets = stats->vs.xdp_packets; - bytes = stats->vs.xdp_bytes; + packets = stats->vs.packets; + bytes = stats->vs.bytes; drops = stats->vs.rx_drops; } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); result->peer_tq_xdp_xmit_err += peer_tq_xdp_xmit_err; result->xdp_tx_err += xdp_tx_err; - result->xdp_packets += packets; - result->xdp_bytes += bytes; + result->packets += packets; + result->bytes += bytes; result->rx_drops += drops; } } @@ -416,8 +418,8 @@ static void veth_get_stats64(struct net_device *dev, veth_stats_rx(&rx, dev); tot->tx_dropped += rx.xdp_tx_err; tot->rx_dropped = rx.rx_drops + rx.peer_tq_xdp_xmit_err; - tot->rx_bytes = rx.xdp_bytes; - tot->rx_packets = rx.xdp_packets; + tot->rx_bytes = rx.bytes; + tot->rx_packets = rx.packets; rcu_read_lock(); peer = rcu_dereference(priv->peer); @@ -429,12 +431,77 @@ static void veth_get_stats64(struct net_device *dev, veth_stats_rx(&rx, peer); tot->tx_dropped += rx.peer_tq_xdp_xmit_err; tot->rx_dropped += rx.xdp_tx_err; - tot->tx_bytes += rx.xdp_bytes; - tot->tx_packets += rx.xdp_packets; + tot->tx_bytes += rx.bytes; + tot->tx_packets += rx.packets; } rcu_read_unlock(); } +static int veth_get_xdp_stats_nch(const struct net_device *dev, u32 attr_id) +{ + switch (attr_id) { + case IFLA_XDP_XSTATS_TYPE_XDP: + return max(dev->real_num_rx_queues, dev->real_num_tx_queues); + default: + return -EOPNOTSUPP; + } +} + +static int veth_get_xdp_stats(const struct net_device *dev, u32 attr_id, + void *attr_data) +{ + const struct veth_priv *priv = netdev_priv(dev); + const struct net_device *peer = rtnl_dereference(priv->peer); + struct ifla_xdp_stats *xdp_iter, *xdp_stats = attr_data; + const struct veth_rq_stats *rq_stats; + u64 xmit_packets, xmit_errors; + u32 i, start; + + switch (attr_id) { + case IFLA_XDP_XSTATS_TYPE_XDP: + break; + default: + return -EOPNOTSUPP; + } + + for (i = 0; i < dev->real_num_rx_queues; i++) { + rq_stats = &priv->rq[i].stats; + xdp_iter = xdp_stats + i; + + do { + start = u64_stats_fetch_begin_irq(&rq_stats->syncp); + + xdp_iter->errors = rq_stats->vs.xdp_errors; + xdp_iter->redirect = rq_stats->vs.xdp_redirect; + xdp_iter->drop = rq_stats->vs.xdp_drops; + xdp_iter->tx = rq_stats->vs.xdp_tx; + xdp_iter->tx_errors = rq_stats->vs.xdp_tx_err; + } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start)); + } + + if (!peer) + return 0; + + priv = netdev_priv(peer); + + for (i = 0; i < peer->real_num_rx_queues; i++) { + rq_stats = &priv->rq[i].stats; + xdp_iter = xdp_stats + (i % dev->real_num_tx_queues); + + do { + start = u64_stats_fetch_begin_irq(&rq_stats->syncp); + + xmit_packets = rq_stats->vs.peer_tq_xdp_xmit; + xmit_errors = rq_stats->vs.peer_tq_xdp_xmit_err; + } while (u64_stats_fetch_retry_irq(&rq_stats->syncp, start)); + + xdp_iter->xmit_packets += xmit_packets; + xdp_iter->xmit_errors += xmit_errors; + } + + return 0; +} + /* fake multicast ability */ static void veth_set_multicast_list(struct net_device *dev) { @@ -655,16 +722,18 @@ static struct xdp_frame *veth_xdp_rcv_one(struct veth_rq *rq, fallthrough; case XDP_ABORTED: trace_xdp_exception(rq->dev, xdp_prog, act); - fallthrough; + goto err_xdp; case XDP_DROP: stats->xdp_drops++; - goto err_xdp; + goto xdp_drop; } } rcu_read_unlock(); return frame; err_xdp: + stats->xdp_errors++; +xdp_drop: rcu_read_unlock(); xdp_return_frame(frame); xdp_xmit: @@ -805,7 +874,8 @@ static struct sk_buff *veth_xdp_rcv_skb(struct veth_rq *rq, fallthrough; case XDP_ABORTED: trace_xdp_exception(rq->dev, xdp_prog, act); - fallthrough; + stats->xdp_errors++; + goto xdp_drop; case XDP_DROP: stats->xdp_drops++; goto xdp_drop; @@ -862,7 +932,7 @@ static int veth_xdp_rcv(struct veth_rq *rq, int budget, /* ndo_xdp_xmit */ struct xdp_frame *frame = veth_ptr_to_xdp(ptr); - stats->xdp_bytes += frame->len; + stats->bytes += frame->len; frame = veth_xdp_rcv_one(rq, frame, bq, stats); if (frame) { /* XDP_PASS */ @@ -877,7 +947,7 @@ static int veth_xdp_rcv(struct veth_rq *rq, int budget, /* ndo_start_xmit */ struct sk_buff *skb = ptr; - stats->xdp_bytes += skb->len; + stats->bytes += skb->len; skb = veth_xdp_rcv_skb(rq, skb, bq, stats); if (skb) napi_gro_receive(&rq->xdp_napi, skb); @@ -890,10 +960,10 @@ static int veth_xdp_rcv(struct veth_rq *rq, int budget, u64_stats_update_begin(&rq->stats.syncp); rq->stats.vs.xdp_redirect += stats->xdp_redirect; - rq->stats.vs.xdp_bytes += stats->xdp_bytes; + rq->stats.vs.bytes += stats->bytes; rq->stats.vs.xdp_drops += stats->xdp_drops; rq->stats.vs.rx_drops += stats->rx_drops; - rq->stats.vs.xdp_packets += done; + rq->stats.vs.packets += done; u64_stats_update_end(&rq->stats.syncp); return done; @@ -1527,13 +1597,15 @@ static int veth_xdp(struct net_device *dev, struct netdev_bpf *xdp) } static const struct net_device_ops veth_netdev_ops = { - .ndo_init = veth_dev_init, - .ndo_open = veth_open, - .ndo_stop = veth_close, - .ndo_start_xmit = veth_xmit, - .ndo_get_stats64 = veth_get_stats64, - .ndo_set_rx_mode = veth_set_multicast_list, - .ndo_set_mac_address = eth_mac_addr, + .ndo_init = veth_dev_init, + .ndo_open = veth_open, + .ndo_stop = veth_close, + .ndo_start_xmit = veth_xmit, + .ndo_get_stats64 = veth_get_stats64, + .ndo_get_xdp_stats_nch = veth_get_xdp_stats_nch, + .ndo_get_xdp_stats = veth_get_xdp_stats, + .ndo_set_rx_mode = veth_set_multicast_list, + .ndo_set_mac_address = eth_mac_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = veth_poll_controller, #endif diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index c74af526d79b81..0b4cc9662d9118 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -77,8 +77,8 @@ struct virtnet_sq_stats { struct u64_stats_sync syncp; u64 packets; u64 bytes; - u64 xdp_tx; - u64 xdp_tx_drops; + u64 xdp_xmit; + u64 xdp_xmit_errors; u64 kicks; u64 tx_timeouts; }; @@ -92,6 +92,7 @@ struct virtnet_rq_stats { u64 xdp_tx; u64 xdp_redirects; u64 xdp_drops; + u64 xdp_errors; u64 kicks; }; @@ -101,8 +102,8 @@ struct virtnet_rq_stats { static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = { { "packets", VIRTNET_SQ_STAT(packets) }, { "bytes", VIRTNET_SQ_STAT(bytes) }, - { "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) }, - { "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) }, + { "xdp_xmit", VIRTNET_SQ_STAT(xdp_xmit) }, + { "xdp_xmit_errors", VIRTNET_SQ_STAT(xdp_xmit_errors) }, { "kicks", VIRTNET_SQ_STAT(kicks) }, { "tx_timeouts", VIRTNET_SQ_STAT(tx_timeouts) }, }; @@ -115,6 +116,7 @@ static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = { { "xdp_tx", VIRTNET_RQ_STAT(xdp_tx) }, { "xdp_redirects", VIRTNET_RQ_STAT(xdp_redirects) }, { "xdp_drops", VIRTNET_RQ_STAT(xdp_drops) }, + { "xdp_errors", VIRTNET_RQ_STAT(xdp_errors) }, { "kicks", VIRTNET_RQ_STAT(kicks) }, }; @@ -627,8 +629,8 @@ static int virtnet_xdp_xmit(struct net_device *dev, u64_stats_update_begin(&sq->stats.syncp); sq->stats.bytes += bytes; sq->stats.packets += packets; - sq->stats.xdp_tx += n; - sq->stats.xdp_tx_drops += n - nxmit; + sq->stats.xdp_xmit += n; + sq->stats.xdp_xmit_errors += n - nxmit; sq->stats.kicks += kicks; u64_stats_update_end(&sq->stats.syncp); @@ -818,7 +820,8 @@ static struct sk_buff *receive_small(struct net_device *dev, trace_xdp_exception(vi->dev, xdp_prog, act); goto err_xdp; case XDP_DROP: - goto err_xdp; + stats->xdp_drops++; + goto xdp_drop; } } rcu_read_unlock(); @@ -843,8 +846,9 @@ static struct sk_buff *receive_small(struct net_device *dev, return skb; err_xdp: + stats->xdp_errors++; +xdp_drop: rcu_read_unlock(); - stats->xdp_drops++; err_len: stats->drops++; put_page(page); @@ -1033,7 +1037,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, case XDP_DROP: if (unlikely(xdp_page != page)) __free_pages(xdp_page, 0); - goto err_xdp; + + if (unlikely(act != XDP_DROP)) + goto err_xdp; + + stats->xdp_drops++; + goto xdp_drop; } } rcu_read_unlock(); @@ -1103,8 +1112,9 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, return head_skb; err_xdp: + stats->xdp_errors++; +xdp_drop: rcu_read_unlock(); - stats->xdp_drops++; err_skb: put_page(page); while (num_buf-- > 1) { @@ -1909,6 +1919,60 @@ static void virtnet_stats(struct net_device *dev, tot->rx_frame_errors = dev->stats.rx_frame_errors; } +static int virtnet_get_xdp_stats_nch(const struct net_device *dev, u32 attr_id) +{ + const struct virtnet_info *vi = netdev_priv(dev); + + switch (attr_id) { + case IFLA_XDP_XSTATS_TYPE_XDP: + return vi->curr_queue_pairs; + default: + return -EOPNOTSUPP; + } +} + +static int virtnet_get_xdp_stats(const struct net_device *dev, u32 attr_id, + void *attr_data) +{ + const struct virtnet_info *vi = netdev_priv(dev); + struct ifla_xdp_stats *xdp_stats = attr_data; + u32 i; + + switch (attr_id) { + case IFLA_XDP_XSTATS_TYPE_XDP: + break; + default: + return -EOPNOTSUPP; + } + + for (i = 0; i < vi->curr_queue_pairs; i++) { + const struct virtnet_rq_stats *rqs = &vi->rq[i].stats; + const struct virtnet_sq_stats *sqs = &vi->sq[i].stats; + u32 start; + + do { + start = u64_stats_fetch_begin_irq(&rqs->syncp); + + xdp_stats->packets = rqs->xdp_packets; + xdp_stats->tx = rqs->xdp_tx; + xdp_stats->redirect = rqs->xdp_redirects; + xdp_stats->drop = rqs->xdp_drops; + xdp_stats->errors = rqs->xdp_errors; + } while (u64_stats_fetch_retry_irq(&rqs->syncp, start)); + + do { + start = u64_stats_fetch_begin_irq(&sqs->syncp); + + xdp_stats->xmit_packets = sqs->xdp_xmit; + xdp_stats->xmit_errors = sqs->xdp_xmit_errors; + } while (u64_stats_fetch_retry_irq(&sqs->syncp, start)); + + xdp_stats++; + } + + return 0; +} + static void virtnet_ack_link_announce(struct virtnet_info *vi) { rtnl_lock(); @@ -2700,15 +2764,17 @@ static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue) } static const struct net_device_ops virtnet_netdev = { - .ndo_open = virtnet_open, - .ndo_stop = virtnet_close, - .ndo_start_xmit = start_xmit, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = virtnet_set_mac_address, - .ndo_set_rx_mode = virtnet_set_rx_mode, - .ndo_get_stats64 = virtnet_stats, - .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, + .ndo_open = virtnet_open, + .ndo_stop = virtnet_close, + .ndo_start_xmit = start_xmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = virtnet_set_mac_address, + .ndo_set_rx_mode = virtnet_set_rx_mode, + .ndo_get_stats64 = virtnet_stats, + .ndo_get_xdp_stats_nch = virtnet_get_xdp_stats_nch, + .ndo_get_xdp_stats = virtnet_get_xdp_stats, + .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, .ndo_bpf = virtnet_xdp, .ndo_xdp_xmit = virtnet_xdp_xmit, .ndo_features_check = passthru_features_check, diff --git a/include/linux/if_link.h b/include/linux/if_link.h index 622658dfbf0a98..a0dac6cb3e6a96 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h @@ -4,8 +4,8 @@ #include +/* We don't want these structures exposed to user space */ -/* We don't want this structure exposed to user space */ struct ifla_vf_stats { __u64 rx_packets; __u64 tx_packets; @@ -30,4 +30,41 @@ struct ifla_vf_info { __u32 trusted; __be16 vlan_proto; }; + +/** + * struct ifla_xdp_stats - driver-side XDP statistics + * @packets: number of frames passed to bpf_prog_run_xdp(). + * @bytes: number of bytes went through bpf_prog_run_xdp(). + * @errors: number of general XDP errors, if driver has one unified counter. + * @aborted: number of %XDP_ABORTED returns. + * @drop: number of %XDP_DROP returns. + * @invalid: number of returns of unallowed values (i.e. not XDP_*). + * @pass: number of %XDP_PASS returns. + * @redirect: number of successfully performed %XDP_REDIRECT requests. + * @redirect_errors: number of failed %XDP_REDIRECT requests. + * @tx: number of successfully performed %XDP_TX requests. + * @tx_errors: number of failed %XDP_TX requests. + * @xmit_packets: number of successfully transmitted XDP/XSK frames. + * @xmit_bytes: number of successfully transmitted XDP/XSK frames. + * @xmit_errors: of XDP/XSK frames failed to transmit. + * @xmit_full: number of XDP/XSK queue being full at the moment of transmission. + */ +struct ifla_xdp_stats { + __u64 packets; + __u64 bytes; + __u64 errors; + __u64 aborted; + __u64 drop; + __u64 invalid; + __u64 pass; + __u64 redirect; + __u64 redirect_errors; + __u64 tx; + __u64 tx_errors; + __u64 xmit_packets; + __u64 xmit_bytes; + __u64 xmit_errors; + __u64 xmit_full; +}; + #endif /* _LINUX_IF_LINK_H */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index db3bff1ae7fdf5..728c650d290e73 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1327,6 +1327,13 @@ struct netdev_net_notifier { * queue id bound to an AF_XDP socket. The flags field specifies if * only RX, only Tx, or both should be woken up using the flags * XDP_WAKEUP_RX and XDP_WAKEUP_TX. + * int (*ndo_get_xdp_stats_nch)(const struct net_device *dev, u32 attr_id); + * Get the number of channels which ndo_get_xdp_stats will return + * statistics for. + * + * int (*ndo_get_xdp_stats)(const struct net_device *dev, u32 attr_id, + * void *attr_data); + * Get attr_id XDP statistics into the attr_data pointer. * struct devlink_port *(*ndo_get_devlink_port)(struct net_device *dev); * Get devlink port instance associated with a given netdev. * Called with a reference on the netdevice and devlink locks only, @@ -1550,6 +1557,11 @@ struct net_device_ops { struct xdp_buff *xdp); int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags); + int (*ndo_get_xdp_stats_nch)(const struct net_device *dev, + u32 attr_id); + int (*ndo_get_xdp_stats)(const struct net_device *dev, + u32 attr_id, + void *attr_data); struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev); int (*ndo_tunnel_ctl)(struct net_device *dev, struct ip_tunnel_parm *p, int cmd); @@ -2213,6 +2225,7 @@ struct net_device { struct pcpu_lstats __percpu *lstats; struct pcpu_sw_netstats __percpu *tstats; struct pcpu_dstats __percpu *dstats; + struct xdp_drv_stats /* per-channel */ *xstats; }; #if IS_ENABLED(CONFIG_GARP) diff --git a/include/net/xdp.h b/include/net/xdp.h index 447f9b1578f387..e4f06a34d46297 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -7,6 +7,7 @@ #define __LINUX_NET_XDP_H__ #include /* skb_shared_info */ +#include /* u64_stats_* */ /** * DOC: XDP RX-queue information @@ -292,4 +293,165 @@ void xdp_attachment_setup(struct xdp_attachment_info *info, #define DEV_MAP_BULK_SIZE XDP_BULK_QUEUE_SIZE +/* Suggested XDP/XSK driver stats, mirror &ifla_xdp_stats except + * for generic errors, refer to its documentation for the details. + * The intended usage is to either have them as a standalone array + * of xdp_drv_stats, or embed &xdp_{rx,tx}_drv_stats into a ring + * structure. Having separate XDP and XSK counters is recommended. + */ + +struct ifla_xdp_stats; + +struct xdp_rx_drv_stats { + struct u64_stats_sync syncp; + u64_stats_t packets; + u64_stats_t bytes; + u64_stats_t pass; + u64_stats_t drop; + u64_stats_t redirect; + u64_stats_t tx; + u64_stats_t redirect_errors; + u64_stats_t tx_errors; + u64_stats_t aborted; + u64_stats_t invalid; +}; + +struct xdp_tx_drv_stats { + struct u64_stats_sync syncp; + u64_stats_t packets; + u64_stats_t bytes; + u64_stats_t errors; + u64_stats_t full; +}; + +struct xdp_drv_stats { + struct xdp_rx_drv_stats xdp_rx; + struct xdp_tx_drv_stats xdp_tx; + struct xdp_rx_drv_stats xsk_rx ____cacheline_aligned; + struct xdp_tx_drv_stats xsk_tx; +} ____cacheline_aligned; + +/* Shortened copy of Rx stats to put on stack */ +struct xdp_rx_drv_stats_local { + u32 bytes; + u32 packets; + u32 pass; + u32 drop; + u32 tx; + u32 tx_errors; + u32 redirect; + u32 redirect_errors; + u32 aborted; + u32 invalid; +}; + +#define xdp_init_rx_drv_stats(rstats) u64_stats_init(&(rstats)->syncp) +#define xdp_init_tx_drv_stats(tstats) u64_stats_init(&(tstats)->syncp) + +/** + * xdp_init_drv_stats - initialize driver XDP stats + * @xdp_stats: driver container if it uses generic xdp_drv_stats + * + * Initializes atomic/seqcount sync points inside the containers. + */ +static inline void xdp_init_drv_stats(struct xdp_drv_stats *xdp_stats) +{ + xdp_init_rx_drv_stats(&xdp_stats->xdp_rx); + xdp_init_tx_drv_stats(&xdp_stats->xdp_tx); + xdp_init_rx_drv_stats(&xdp_stats->xsk_rx); + xdp_init_tx_drv_stats(&xdp_stats->xsk_tx); +} + +/** + * xdp_update_rx_drv_stats - update driver XDP stats + * @rstats: target driver container + * @lrstats: filled onstack structure + * + * Fetches Rx path XDP statistics from the onstack structure to the + * driver container, respecting atomic/seqcount synchronization. + * Typical usage is to call it at the end of Rx NAPI polling. + */ +static inline void +xdp_update_rx_drv_stats(struct xdp_rx_drv_stats *rstats, + const struct xdp_rx_drv_stats_local *lrstats) +{ + if (!lrstats->packets) + return; + + u64_stats_update_begin(&rstats->syncp); + u64_stats_add(&rstats->packets, lrstats->packets); + u64_stats_add(&rstats->bytes, lrstats->bytes); + u64_stats_add(&rstats->pass, lrstats->pass); + u64_stats_add(&rstats->drop, lrstats->drop); + u64_stats_add(&rstats->redirect, lrstats->redirect); + u64_stats_add(&rstats->tx, lrstats->tx); + u64_stats_add(&rstats->redirect_errors, lrstats->redirect_errors); + u64_stats_add(&rstats->tx_errors, lrstats->tx_errors); + u64_stats_add(&rstats->aborted, lrstats->aborted); + u64_stats_add(&rstats->invalid, lrstats->invalid); + u64_stats_update_end(&rstats->syncp); +} + +/** + * xdp_update_tx_drv_stats - update driver XDP stats + * @tstats: target driver container + * @packets: onstack packet counter + * @bytes: onstack octete counter + * + * Adds onstack packet/byte Tx XDP counter values from the current session + * to the driver container. Typical usage is to call it on completion path / + * Tx NAPI polling. + */ +static inline void xdp_update_tx_drv_stats(struct xdp_tx_drv_stats *tstats, + u32 packets, u32 bytes) +{ + if (!packets) + return; + + u64_stats_update_begin(&tstats->syncp); + u64_stats_add(&tstats->packets, packets); + u64_stats_add(&tstats->bytes, bytes); + u64_stats_update_end(&tstats->syncp); +} + +/** + * xdp_update_tx_drv_err - update driver Tx XDP errors counter + * @tstats: target driver container + * @num: onstack error counter / number of non-xmitted frames + * + * Adds onstack error Tx XDP counter value from the current session + * to the driver container. Typical usage is to call it at on error + * path of .ndo_xdp_xmit() / XSK zerocopy xmit. + */ +static inline void xdp_update_tx_drv_err(struct xdp_tx_drv_stats *tstats, + u32 num) +{ + u64_stats_update_begin(&tstats->syncp); + u64_stats_add(&tstats->errors, num); + u64_stats_update_end(&tstats->syncp); +} + +/** + * xdp_update_tx_drv_full - update driver Tx XDP ring full counter + * @tstats: target driver container + * + * Adds onstack error Tx XDP counter value from the current session + * to the driver container. Typical usage is to call it at in case + * of no free descs available on a ring in .ndo_xdp_xmit() / XSK + * zerocopy xmit. + */ +static inline void xdp_update_tx_drv_full(struct xdp_tx_drv_stats *tstats) +{ + u64_stats_update_begin(&tstats->syncp); + u64_stats_inc(&tstats->full); + u64_stats_update_end(&tstats->syncp); +} + +void xdp_fetch_rx_drv_stats(struct ifla_xdp_stats *if_stats, + const struct xdp_rx_drv_stats *rstats); +void xdp_fetch_tx_drv_stats(struct ifla_xdp_stats *if_stats, + const struct xdp_tx_drv_stats *tstats); +int xdp_get_drv_stats_generic(const struct net_device *dev, u32 attr_id, + void *attr_data); + #endif /* __LINUX_NET_XDP_H__ */ diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h index eebd3894fe89a0..dc1dd31e8274ec 100644 --- a/include/uapi/linux/if_link.h +++ b/include/uapi/linux/if_link.h @@ -1147,6 +1147,7 @@ enum { IFLA_STATS_LINK_XSTATS_SLAVE, IFLA_STATS_LINK_OFFLOAD_XSTATS, IFLA_STATS_AF_SPEC, + IFLA_STATS_LINK_XDP_XSTATS, __IFLA_STATS_MAX, }; @@ -1175,6 +1176,72 @@ enum { }; #define IFLA_OFFLOAD_XSTATS_MAX (__IFLA_OFFLOAD_XSTATS_MAX - 1) +/* These are embedded into IFLA_STATS_LINK_XDP_XSTATS */ +enum { + IFLA_XDP_XSTATS_TYPE_UNSPEC, + /* Stats collected on a "regular" channel(s) */ + IFLA_XDP_XSTATS_TYPE_XDP, + /* Stats collected on an XSK channel(s) */ + IFLA_XDP_XSTATS_TYPE_XSK, + + __IFLA_XDP_XSTATS_TYPE_CNT, +}; + +#define IFLA_XDP_XSTATS_TYPE_START (IFLA_XDP_XSTATS_TYPE_UNSPEC + 1) +#define IFLA_XDP_XSTATS_TYPE_MAX (__IFLA_XDP_XSTATS_TYPE_CNT - 1) + +/* Embedded into IFLA_XDP_XSTATS_TYPE_XDP or IFLA_XDP_XSTATS_TYPE_XSK */ +enum { + IFLA_XDP_XSTATS_SCOPE_UNSPEC, + /* netdev-wide stats */ + IFLA_XDP_XSTATS_SCOPE_SHARED, + /* Per-channel stats */ + IFLA_XDP_XSTATS_SCOPE_CHANNEL, + + __IFLA_XDP_XSTATS_SCOPE_CNT, +}; + +/* Embedded into IFLA_XDP_XSTATS_SCOPE_SHARED/IFLA_XDP_XSTATS_SCOPE_CHANNEL */ +enum { + /* Padding for 64-bit alignment */ + IFLA_XDP_XSTATS_UNSPEC, + /* Number of frames passed to bpf_prog_run_xdp() */ + IFLA_XDP_XSTATS_PACKETS, + /* Number of bytes went through bpf_prog_run_xdp() */ + IFLA_XDP_XSTATS_BYTES, + /* Number of general XDP errors if driver counts them together */ + IFLA_XDP_XSTATS_ERRORS, + /* Number of %XDP_ABORTED returns */ + IFLA_XDP_XSTATS_ABORTED, + /* Number of %XDP_DROP returns */ + IFLA_XDP_XSTATS_DROP, + /* Number of returns of unallowed values (i.e. not XDP_*) */ + IFLA_XDP_XSTATS_INVALID, + /* Number of %XDP_PASS returns */ + IFLA_XDP_XSTATS_PASS, + /* Number of successfully performed %XDP_REDIRECT requests */ + IFLA_XDP_XSTATS_REDIRECT, + /* Number of failed %XDP_REDIRECT requests */ + IFLA_XDP_XSTATS_REDIRECT_ERRORS, + /* Number of successfully performed %XDP_TX requests */ + IFLA_XDP_XSTATS_TX, + /* Number of failed %XDP_TX requests */ + IFLA_XDP_XSTATS_TX_ERRORS, + /* Number of successfully transmitted XDP/XSK frames */ + IFLA_XDP_XSTATS_XMIT_PACKETS, + /* Number of successfully transmitted XDP/XSK bytes */ + IFLA_XDP_XSTATS_XMIT_BYTES, + /* Number of XDP/XSK frames failed to transmit */ + IFLA_XDP_XSTATS_XMIT_ERRORS, + /* Number of XDP/XSK queue being full at the moment of transmission */ + IFLA_XDP_XSTATS_XMIT_FULL, + + __IFLA_XDP_XSTATS_CNT, +}; + +#define IFLA_XDP_XSTATS_START (IFLA_XDP_XSTATS_UNSPEC + 1) +#define IFLA_XDP_XSTATS_MAX (__IFLA_XDP_XSTATS_CNT - 1) + /* XDP section */ #define XDP_FLAGS_UPDATE_IF_NOEXIST (1U << 0) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 6f25c0a8aebe17..b7db68fb087910 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -5107,6 +5107,262 @@ static int rtnl_get_offload_stats_size(const struct net_device *dev) return nla_size; } +#define IFLA_XDP_XSTATS_NUM (__IFLA_XDP_XSTATS_CNT - \ + IFLA_XDP_XSTATS_START) + +static_assert(sizeof(struct ifla_xdp_stats) / sizeof(__u64) == + IFLA_XDP_XSTATS_NUM); + +static u32 rtnl_get_xdp_stats_num(u32 attr_id) +{ + switch (attr_id) { + case IFLA_XDP_XSTATS_TYPE_XDP: + case IFLA_XDP_XSTATS_TYPE_XSK: + return IFLA_XDP_XSTATS_NUM; + default: + return 0; + } +} + +static bool rtnl_get_xdp_stats_xdpxsk(struct sk_buff *skb, u32 ch, + const void *attr_data) +{ + const struct ifla_xdp_stats *xstats = attr_data; + + xstats += ch; + + if (nla_put_u64_64bit(skb, IFLA_XDP_XSTATS_PACKETS, xstats->packets, + IFLA_XDP_XSTATS_UNSPEC) || + nla_put_u64_64bit(skb, IFLA_XDP_XSTATS_BYTES, xstats->bytes, + IFLA_XDP_XSTATS_UNSPEC) || + nla_put_u64_64bit(skb, IFLA_XDP_XSTATS_ERRORS, xstats->errors, + IFLA_XDP_XSTATS_UNSPEC) || + nla_put_u64_64bit(skb, IFLA_XDP_XSTATS_ABORTED, xstats->aborted, + IFLA_XDP_XSTATS_UNSPEC) || + nla_put_u64_64bit(skb, IFLA_XDP_XSTATS_DROP, xstats->drop, + IFLA_XDP_XSTATS_UNSPEC) || + nla_put_u64_64bit(skb, IFLA_XDP_XSTATS_INVALID, xstats->invalid, + IFLA_XDP_XSTATS_UNSPEC) || + nla_put_u64_64bit(skb, IFLA_XDP_XSTATS_PASS, xstats->pass, + IFLA_XDP_XSTATS_UNSPEC) || + nla_put_u64_64bit(skb, IFLA_XDP_XSTATS_REDIRECT, xstats->redirect, + IFLA_XDP_XSTATS_UNSPEC) || + nla_put_u64_64bit(skb, IFLA_XDP_XSTATS_REDIRECT_ERRORS, + xstats->redirect_errors, + IFLA_XDP_XSTATS_UNSPEC) || + nla_put_u64_64bit(skb, IFLA_XDP_XSTATS_TX, xstats->tx, + IFLA_XDP_XSTATS_UNSPEC) || + nla_put_u64_64bit(skb, IFLA_XDP_XSTATS_TX_ERRORS, + xstats->tx_errors, IFLA_XDP_XSTATS_UNSPEC) || + nla_put_u64_64bit(skb, IFLA_XDP_XSTATS_XMIT_PACKETS, + xstats->xmit_packets, IFLA_XDP_XSTATS_UNSPEC) || + nla_put_u64_64bit(skb, IFLA_XDP_XSTATS_XMIT_BYTES, + xstats->xmit_bytes, IFLA_XDP_XSTATS_UNSPEC) || + nla_put_u64_64bit(skb, IFLA_XDP_XSTATS_XMIT_ERRORS, + xstats->xmit_errors, IFLA_XDP_XSTATS_UNSPEC) || + nla_put_u64_64bit(skb, IFLA_XDP_XSTATS_XMIT_FULL, + xstats->xmit_full, IFLA_XDP_XSTATS_UNSPEC)) + return false; + + return true; +} + +static bool rtnl_get_xdp_stats_one(struct sk_buff *skb, u32 attr_id, + u32 scope_id, u32 ch, const void *attr_data) +{ + struct nlattr *scope; + + scope = nla_nest_start_noflag(skb, scope_id); + if (!scope) + return false; + + switch (attr_id) { + case IFLA_XDP_XSTATS_TYPE_XDP: + case IFLA_XDP_XSTATS_TYPE_XSK: + if (!rtnl_get_xdp_stats_xdpxsk(skb, ch, attr_data)) + goto fail; + + break; + default: +fail: + nla_nest_cancel(skb, scope); + + return false; + } + + nla_nest_end(skb, scope); + + return true; +} + +static bool rtnl_get_xdp_stats(struct sk_buff *skb, + const struct net_device *dev, + int *idxattr, int *prividx) +{ + const struct net_device_ops *ops = dev->netdev_ops; + struct nlattr *xstats, *type = NULL; + u32 saved_ch = *prividx & U16_MAX; + u32 saved_attr = *prividx >> 16; + bool nuke_xstats = true; + u32 attr_id, ch = 0; + int ret; + + if (!ops || !ops->ndo_get_xdp_stats) + goto nodata; + + *idxattr = IFLA_STATS_LINK_XDP_XSTATS; + + xstats = nla_nest_start_noflag(skb, IFLA_STATS_LINK_XDP_XSTATS); + if (!xstats) + return false; + + for (attr_id = IFLA_XDP_XSTATS_TYPE_START; + attr_id < __IFLA_XDP_XSTATS_TYPE_CNT; + attr_id++) { + u32 nstat, scope_id, nch; + bool nuke_type = true; + void *attr_data; + size_t size; + + if (attr_id > saved_attr) + saved_ch = 0; + if (attr_id < saved_attr) + continue; + + nstat = rtnl_get_xdp_stats_num(attr_id); + if (!nstat) + continue; + + scope_id = IFLA_XDP_XSTATS_SCOPE_SHARED; + nch = 1; + + if (!ops->ndo_get_xdp_stats_nch) + goto shared; + + ret = ops->ndo_get_xdp_stats_nch(dev, attr_id); + if (ret == -EOPNOTSUPP || ret == -ENODATA) + continue; + if (ret < 0) + goto out; + if (!ret) + goto shared; + + scope_id = IFLA_XDP_XSTATS_SCOPE_CHANNEL; + nch = ret; + +shared: + size = array3_size(nch, nstat, sizeof(__u64)); + if (unlikely(size == SIZE_MAX)) { + ret = -EOVERFLOW; + goto out; + } + + attr_data = kzalloc(size, GFP_KERNEL); + if (!attr_data) { + ret = -ENOMEM; + goto out; + } + + ret = ops->ndo_get_xdp_stats(dev, attr_id, attr_data); + if (ret == -EOPNOTSUPP || ret == -ENODATA) + goto kfree_cont; + if (ret) { +kfree_out: + kfree(attr_data); + goto out; + } + + ret = -EMSGSIZE; + + type = nla_nest_start_noflag(skb, attr_id); + if (!type) + goto kfree_out; + + for (ch = saved_ch; ch < nch; ch++) + if (!rtnl_get_xdp_stats_one(skb, attr_id, scope_id, + ch, attr_data)) { + if (nuke_type) + nla_nest_cancel(skb, type); + else + nla_nest_end(skb, type); + + goto kfree_out; + } else { + nuke_xstats = false; + nuke_type = false; + } + + nla_nest_end(skb, type); +kfree_cont: + kfree(attr_data); + } + + ret = 0; + +out: + if (nuke_xstats) + nla_nest_cancel(skb, xstats); + else + nla_nest_end(skb, xstats); + + if (ret && ret != -EOPNOTSUPP && ret != -ENODATA) { + /* If the driver has 60+ queues, we can run out of skb + * tailroom even when putting stats for one type. Save + * channel number in prividx to resume from it next time + * rather than restaring the whole type and running into + * the same problem again. + */ + *prividx = (attr_id << 16) | ch; + return false; + } + + *prividx = 0; +nodata: + *idxattr = 0; + + return true; +} + +static size_t rtnl_get_xdp_stats_size(const struct net_device *dev) +{ + const struct net_device_ops *ops = dev->netdev_ops; + size_t size = 0; + u32 attr_id; + + if (!ops || !ops->ndo_get_xdp_stats) + return 0; + + for (attr_id = IFLA_XDP_XSTATS_TYPE_START; + attr_id < __IFLA_XDP_XSTATS_TYPE_CNT; + attr_id++) { + u32 nstat = rtnl_get_xdp_stats_num(attr_id); + u32 nch = 1; + int ret; + + if (!nstat) + continue; + + if (!ops->ndo_get_xdp_stats_nch) + goto shared; + + ret = ops->ndo_get_xdp_stats_nch(dev, attr_id); + if (ret < 0) + continue; + if (ret > 0) + nch = ret; + +shared: + size += nla_total_size(0) + /* IFLA_XDP_XSTATS_TYPE_* */ + (nla_total_size(0) + /* IFLA_XDP_XSTATS_SCOPE_* */ + nla_total_size_64bit(sizeof(__u64)) * nstat) * nch; + } + + if (size) + size += nla_total_size(0); /* IFLA_STATS_LINK_XDP_XSTATS */ + + return size; +} + static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev, int type, u32 pid, u32 seq, u32 change, unsigned int flags, unsigned int filter_mask, @@ -5243,6 +5499,11 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev, *idxattr = 0; } + if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XDP_XSTATS, + *idxattr) && + !rtnl_get_xdp_stats(skb, dev, idxattr, prividx)) + goto nla_put_failure; + nlmsg_end(skb, nlh); return 0; @@ -5318,6 +5579,9 @@ static size_t if_nlmsg_stats_size(const struct net_device *dev, rcu_read_unlock(); } + if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_XDP_XSTATS, 0)) + size += rtnl_get_xdp_stats_size(dev); + return size; } diff --git a/net/core/xdp.c b/net/core/xdp.c index 5ddc29f29bada2..24980207303cda 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -611,3 +611,127 @@ struct xdp_frame *xdpf_clone(struct xdp_frame *xdpf) return nxdpf; } + +/** + * xdp_fetch_rx_drv_stats - helper for implementing .ndo_get_xdp_stats() + * @if_stats: target container passed from rtnetlink core + * @rstats: driver container if it uses generic xdp_rx_drv_stats + * + * Fetches Rx path XDP statistics from a suggested driver structure to + * the one used by rtnetlink, respecting atomic/seqcount synchronization. + */ +void xdp_fetch_rx_drv_stats(struct ifla_xdp_stats *if_stats, + const struct xdp_rx_drv_stats *rstats) +{ + u32 start; + + do { + start = u64_stats_fetch_begin_irq(&rstats->syncp); + + if_stats->packets = u64_stats_read(&rstats->packets); + if_stats->bytes = u64_stats_read(&rstats->bytes); + if_stats->pass = u64_stats_read(&rstats->pass); + if_stats->drop = u64_stats_read(&rstats->drop); + if_stats->tx = u64_stats_read(&rstats->tx); + if_stats->tx_errors = u64_stats_read(&rstats->tx_errors); + if_stats->redirect = u64_stats_read(&rstats->redirect); + if_stats->redirect_errors = + u64_stats_read(&rstats->redirect_errors); + if_stats->aborted = u64_stats_read(&rstats->aborted); + if_stats->invalid = u64_stats_read(&rstats->invalid); + } while (u64_stats_fetch_retry_irq(&rstats->syncp, start)); +} +EXPORT_SYMBOL_GPL(xdp_fetch_rx_drv_stats); + +/** + * xdp_fetch_tx_drv_stats - helper for implementing .ndo_get_xdp_stats() + * @if_stats: target container passed from rtnetlink core + * @tstats: driver container if it uses generic xdp_tx_drv_stats + * + * Fetches Tx path XDP statistics from a suggested driver structure to + * the one used by rtnetlink, respecting atomic/seqcount synchronization. + */ +void xdp_fetch_tx_drv_stats(struct ifla_xdp_stats *if_stats, + const struct xdp_tx_drv_stats *tstats) +{ + u32 start; + + do { + start = u64_stats_fetch_begin_irq(&tstats->syncp); + + if_stats->xmit_packets = u64_stats_read(&tstats->packets); + if_stats->xmit_bytes = u64_stats_read(&tstats->bytes); + if_stats->xmit_errors = u64_stats_read(&tstats->errors); + if_stats->xmit_full = u64_stats_read(&tstats->full); + } while (u64_stats_fetch_retry_irq(&tstats->syncp, start)); +} +EXPORT_SYMBOL_GPL(xdp_fetch_tx_drv_stats); + +/** + * xdp_get_drv_stats_generic - generic implementation of .ndo_get_xdp_stats() + * @dev: network interface device structure + * @attr_id: type of statistics (XDP, XSK, ...) + * @attr_data: target stats container + * + * Returns 0 on success, -%EOPNOTSUPP if either driver or this function doesn't + * support this attr_id, -%ENODATA if the driver supports attr_id, but can't + * provide anything right now, and -%EINVAL if driver configuration is invalid. + */ +int xdp_get_drv_stats_generic(const struct net_device *dev, u32 attr_id, + void *attr_data) +{ + const bool xsk = attr_id == IFLA_XDP_XSTATS_TYPE_XSK; + const struct xdp_drv_stats *drv_iter = dev->xstats; + const struct net_device_ops *ops = dev->netdev_ops; + struct ifla_xdp_stats *iter = attr_data; + int nch; + u32 i; + + switch (attr_id) { + case IFLA_XDP_XSTATS_TYPE_XDP: + if (unlikely(!ops->ndo_bpf)) + return -EINVAL; + + break; + case IFLA_XDP_XSTATS_TYPE_XSK: + if (!ops->ndo_xsk_wakeup) + return -EOPNOTSUPP; + + break; + default: + return -EOPNOTSUPP; + } + + if (unlikely(!drv_iter || !ops->ndo_get_xdp_stats_nch)) + return -EINVAL; + + nch = ops->ndo_get_xdp_stats_nch(dev, attr_id); + switch (nch) { + case 0: + /* Stats are shared across the netdev */ + nch = 1; + break; + case 1 ... INT_MAX: + /* Stats are per-channel */ + break; + default: + return nch; + } + + for (i = 0; i < nch; i++) { + const struct xdp_rx_drv_stats *rstats; + const struct xdp_tx_drv_stats *tstats; + + rstats = xsk ? &drv_iter->xsk_rx : &drv_iter->xdp_rx; + xdp_fetch_rx_drv_stats(iter, rstats); + + tstats = xsk ? &drv_iter->xsk_tx : &drv_iter->xdp_tx; + xdp_fetch_tx_drv_stats(iter, tstats); + + drv_iter++; + iter++; + } + + return 0; +} +EXPORT_SYMBOL_GPL(xdp_get_drv_stats_generic);