diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h index f05ed84600fda6..f7688ecc3fb13b 100644 --- a/drivers/net/ethernet/intel/idpf/idpf.h +++ b/drivers/net/ethernet/intel/idpf/idpf.h @@ -37,6 +37,7 @@ struct idpf_vport_max_q; #define IDPF_MB_MAX_ERR 20 #define IDPF_NUM_CHUNKS_PER_MSG(struct_sz, chunk_sz) \ ((IDPF_CTLQ_MAX_BUF_LEN - (struct_sz)) / (chunk_sz)) +#define IDPF_WAIT_FOR_MARKER_TIMEO 500 #define IDPF_WAIT_FOR_EVENT_TIMEO_MIN 2000 #define IDPF_WAIT_FOR_EVENT_TIMEO 60000 @@ -291,13 +292,10 @@ enum idpf_vport_reset_cause { /** * enum idpf_vport_flags - Vport flags * @IDPF_VPORT_DEL_QUEUES: To send delete queues message - * @IDPF_VPORT_SW_MARKER: Indicate TX pipe drain software marker packets - * processing is done * @IDPF_VPORT_FLAGS_NBITS: Must be last */ enum idpf_vport_flags { IDPF_VPORT_DEL_QUEUES, - IDPF_VPORT_SW_MARKER, IDPF_VPORT_FLAGS_NBITS, }; @@ -361,7 +359,6 @@ struct idpf_port_stats { * @vc_msg: Virtchnl message buffer * @vc_state: Virtchnl message state * @vchnl_wq: Wait queue for virtchnl messages - * @sw_marker_wq: workqueue for marker packets * @vc_buf_lock: Lock to protect virtchnl buffer */ struct idpf_vport { @@ -419,7 +416,6 @@ struct idpf_vport { DECLARE_BITMAP(vc_state, IDPF_VC_NBITS); wait_queue_head_t vchnl_wq; - wait_queue_head_t sw_marker_wq; struct mutex vc_buf_lock; }; diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index af4f708b82f391..1747ca2aeeb8dd 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -1565,7 +1565,6 @@ void idpf_init_task(struct work_struct *work) index = vport->idx; vport_config = adapter->vport_config[index]; - init_waitqueue_head(&vport->sw_marker_wq); init_waitqueue_head(&vport->vchnl_wq); mutex_init(&vport->vc_buf_lock); diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index c57450fc6d2504..7fc5e5f6d6b267 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -757,8 +757,8 @@ int idpf_rx_bufs_init_all(struct idpf_vport *vport) */ int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model) { + struct device *dev = &rxq->vport->adapter->pdev->dev; enum virtchnl2_queue_type type; - struct device *dev = rxq->dev; if (bufq) rxq->size = rxq->desc_count * @@ -1395,6 +1395,7 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) } } + rx_qgrp->splitq.num_bufq_sets = vport->num_bufqs_per_qgrp; rx_qgrp->splitq.bufq_sets = kcalloc(vport->num_bufqs_per_qgrp, sizeof(struct idpf_bufq_set), GFP_KERNEL); @@ -1524,21 +1525,21 @@ int idpf_vport_queues_alloc(struct idpf_vport *vport) if (err) goto err_out; - err = idpf_tx_desc_alloc_all(vport); + err = idpf_vport_init_fast_path_txqs(vport); if (err) goto err_out; + idpf_vport_xdpq_get(vport); - err = idpf_rx_desc_alloc_all(vport); + err = idpf_tx_desc_alloc_all(vport); if (err) goto err_out; - err = idpf_vport_init_fast_path_txqs(vport); + err = idpf_rx_desc_alloc_all(vport); if (err) goto err_out; prog = vport->adapter->vport_config[vport->idx]->user_config.xdp_prog; idpf_copy_xdp_prog_to_qs(vport, prog); - idpf_vport_xdpq_get(vport); return 0; @@ -1548,31 +1549,6 @@ int idpf_vport_queues_alloc(struct idpf_vport *vport) return err; } -/** - * idpf_tx_handle_sw_marker - Handle queue marker packet - * @tx_q: tx queue to handle software marker - */ -void idpf_tx_handle_sw_marker(struct idpf_queue *tx_q) -{ - struct idpf_vport *vport = tx_q->vport; - int i; - - clear_bit(__IDPF_Q_SW_MARKER, tx_q->flags); - /* Hardware must write marker packets to all queues associated with - * completion queues. So check if all queues received marker packets - */ - for (i = 0; i < vport->num_txq; i++) - /* If we're still waiting on any other TXQ marker completions, - * just return now since we cannot wake up the marker_wq yet. - */ - if (test_bit(__IDPF_Q_SW_MARKER, vport->txqs[i]->flags)) - return; - - /* Drain complete */ - set_bit(IDPF_VPORT_SW_MARKER, vport->flags); - wake_up(&vport->sw_marker_wq); -} - /** * idpf_tx_clean_stashed_bufs - clean bufs that were stored for * out of order completions @@ -1861,6 +1837,23 @@ idpf_tx_handle_rs_cmpl_fb(struct idpf_queue *txq, idpf_tx_clean_stashed_bufs(txq, compl_tag, cleaned, budget); } +/** + * idpf_tx_update_complq_indexes - update completion queue indexes + * @complq: completion queue being updated + * @ntc: current "next to clean" index value + * @gen_flag: current "genearation" flag value + */ +static void idpf_tx_update_complq_indexes(struct idpf_queue *complq, + int ntc, bool gen_flag) +{ + ntc += complq->desc_count; + complq->next_to_clean = ntc; + if (gen_flag) + set_bit(__IDPF_Q_GEN_CHK, complq->flags); + else + clear_bit(__IDPF_Q_GEN_CHK, complq->flags); +} + /** * idpf_tx_finalize_complq - Finalize completion queue cleaning * @complq: completion queue to finalize @@ -1913,12 +1906,7 @@ static void idpf_tx_finalize_complq(struct idpf_queue *complq, int ntc, tx_q->cleaned_pkts = 0; } - ntc += complq->desc_count; - complq->next_to_clean = ntc; - if (gen_flag) - set_bit(__IDPF_Q_GEN_CHK, complq->flags); - else - clear_bit(__IDPF_Q_GEN_CHK, complq->flags); + idpf_tx_update_complq_indexes(complq, ntc, gen_flag); } /** @@ -1954,6 +1942,7 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget, ctype = idpf_parse_compl_desc(tx_desc, complq, &tx_q, gen_flag); + switch (ctype) { case IDPF_TXD_COMPLT_RE: if (unlikely(!flow)) @@ -1974,9 +1963,6 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget, &cleaned_stats, budget); break; - case IDPF_TXD_COMPLT_SW_MARKER: - idpf_tx_handle_sw_marker(tx_q); - break; case -ENODATA: goto exit_clean_complq; case -EINVAL: @@ -2019,6 +2005,60 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget, return !!complq_budget; } +/** + * idpf_wait_for_sw_marker_completion - wait for SW marker of disabled Tx queue + * @txq: disabled Tx queue + */ +void idpf_wait_for_sw_marker_completion(struct idpf_queue *txq) +{ + struct idpf_queue *complq = txq->txq_grp->complq; + struct idpf_splitq_4b_tx_compl_desc *tx_desc; + s16 ntc = complq->next_to_clean; + unsigned long timeout; + bool flow, gen_flag; + u32 pos = ntc; + + if (!test_bit(__IDPF_Q_SW_MARKER, txq->flags)) + return; + + flow = test_bit(__IDPF_Q_FLOW_SCH_EN, complq->flags); + gen_flag = test_bit(__IDPF_Q_GEN_CHK, complq->flags); + + timeout = jiffies + msecs_to_jiffies(IDPF_WAIT_FOR_MARKER_TIMEO); + tx_desc = flow ? &complq->comp[pos].common : &complq->comp_4b[pos]; + ntc -= complq->desc_count; + + do { + struct idpf_queue *tx_q; + int ctype; + + ctype = idpf_parse_compl_desc(tx_desc, complq, &tx_q, + gen_flag); + if (ctype == IDPF_TXD_COMPLT_SW_MARKER) { + clear_bit(__IDPF_Q_SW_MARKER, tx_q->flags); + if (txq == tx_q) + break; + } else if (ctype == -ENODATA) { + usleep_range(500, 1000); + continue; + } + + pos++; + ntc++; + if (unlikely(!ntc)) { + ntc -= complq->desc_count; + pos = 0; + gen_flag = !gen_flag; + } + + tx_desc = flow ? &complq->comp[pos].common : + &complq->comp_4b[pos]; + prefetch(tx_desc); + } while (time_before(jiffies, timeout)); + + idpf_tx_update_complq_indexes(complq, ntc, gen_flag); +} + /** * idpf_tx_splitq_build_ctb - populate command tag and size for queue * based scheduling descriptors @@ -3922,15 +3962,7 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget) else idpf_vport_intr_set_wb_on_itr(q_vector); - /* Switch to poll mode in the tear-down path after sending disable - * queues virtchnl message, as the interrupts will be disabled after - * that - */ - if (unlikely(q_vector->num_txq && test_bit(__IDPF_Q_POLL_MODE, - q_vector->tx[0]->flags))) - return budget; - else - return work_done; + return work_done; } /** diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h index 02248dce54174a..927e507bc38bd4 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h @@ -311,7 +311,6 @@ struct idpf_ptype_state { * and RFLGQ_GEN is the SW bit. * @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling * @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions - * @__IDPF_Q_POLL_MODE: Enable poll mode * @__IDPF_Q_FLAGS_NBITS: Must be last * @__IDPF_Q_XSK: Queue used to handle the AF_XDP socket */ @@ -320,7 +319,6 @@ enum idpf_queue_flags_t { __IDPF_RFLQ_GEN_CHK, __IDPF_Q_FLOW_SCH_EN, __IDPF_Q_SW_MARKER, - __IDPF_Q_POLL_MODE, __IDPF_Q_XDP, __IDPF_Q_XSK, @@ -759,6 +757,7 @@ struct idpf_rxq_group { } singleq; struct { u16 num_rxq_sets; + u16 num_bufq_sets; struct idpf_rxq_set *rxq_sets[IDPF_LARGE_MAX_Q]; struct idpf_bufq_set *bufq_sets; } splitq; @@ -953,12 +952,12 @@ struct virtchnl2_rx_flex_desc_adv_nic_3; int idpf_rx_process_skb_fields(struct idpf_queue *rxq, struct sk_buff *skb, const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc); int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off); -void idpf_tx_handle_sw_marker(struct idpf_queue *tx_q); int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model); void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model); int idpf_tx_desc_alloc(struct idpf_queue *tx_q, bool bufq); void idpf_tx_desc_rel(struct idpf_queue *txq, bool bufq); int idpf_rx_bufs_init(struct idpf_queue *rxbufq, enum libie_rx_buf_type type); +void idpf_wait_for_sw_marker_completion(struct idpf_queue *txq); /** * idpf_xdpq_update_tail - Updates the XDP Tx queue tail register diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c index 24df268ad49eb0..b8b25fa1deb875 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -760,21 +760,19 @@ static int idpf_wait_for_selected_marker_events(struct idpf_vport *vport, struct idpf_queue **qs, int num_qs) { - int event; + bool markers_rcvd = true; int i; - for (i = 0; i < num_qs; i++) - set_bit(__IDPF_Q_SW_MARKER, qs[i]->flags); + for (i = 0; i < num_qs; i++) { + struct idpf_queue *txq = qs[i]; - event = wait_event_timeout(vport->sw_marker_wq, - test_and_clear_bit(IDPF_VPORT_SW_MARKER, - vport->flags), - msecs_to_jiffies(500)); + set_bit(__IDPF_Q_SW_MARKER, txq->flags); + idpf_wait_for_sw_marker_completion(txq); - for (i = 0; i < num_qs; i++) - clear_bit(__IDPF_Q_POLL_MODE, qs[i]->flags); + markers_rcvd &= !test_bit(__IDPF_Q_SW_MARKER, txq->flags); + } - if (event) + if (markers_rcvd) return 0; dev_warn(&vport->adapter->pdev->dev, "Failed to receive marker packets\n"); @@ -2260,22 +2258,12 @@ int idpf_send_enable_queues_msg(struct idpf_vport *vport) */ int idpf_send_disable_queues_msg(struct idpf_vport *vport) { - int err, i; + int err; err = idpf_send_ena_dis_queues_msg(vport, VIRTCHNL2_OP_DISABLE_QUEUES); if (err) return err; - /* switch to poll mode as interrupts will be disabled after disable - * queues virtchnl message is sent - */ - for (i = 0; i < vport->num_txq; i++) - set_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags); - - /* schedule the napi to receive all the marker packets */ - for (i = 0; i < vport->num_q_vectors; i++) - napi_schedule(&vport->q_vectors[i].napi); - return idpf_wait_for_marker_event(vport); } @@ -2323,15 +2311,9 @@ int idpf_send_disable_selected_queues_msg(struct idpf_vport *vport, if (!tx_qs) return -ENOMEM; - for (i = 0; i < num_q; i++) { - if (qs[i]->q_type == VIRTCHNL2_QUEUE_TYPE_TX) { - set_bit(__IDPF_Q_POLL_MODE, qs[i]->flags); + for (i = 0; i < num_q; i++) + if (qs[i]->q_type == VIRTCHNL2_QUEUE_TYPE_TX) tx_qs[tx_idx++] = qs[i]; - } - - if (qs[i]->q_type == VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION) - napi_schedule(&qs[i]->q_vector->napi); - } err = idpf_wait_for_selected_marker_events(vport, tx_qs, tx_idx); diff --git a/drivers/net/ethernet/intel/idpf/idpf_xdp.c b/drivers/net/ethernet/intel/idpf/idpf_xdp.c index c3629465d0899e..d5ecba34d25e07 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_xdp.c +++ b/drivers/net/ethernet/intel/idpf/idpf_xdp.c @@ -39,13 +39,13 @@ static int idpf_rxq_for_each(const struct idpf_vport *vport, } /** - * idpf_xdp_rxq_info_init - Setup XDP RxQ info for a given Rx queue + * __idpf_xdp_rxq_info_init - Setup XDP RxQ info for a given Rx queue * @rxq: Rx queue for which the resources are setup * @arg: flag indicating if the HW works in split queue mode * * Return: 0 on success, negative on failure. */ -static int idpf_xdp_rxq_info_init(struct idpf_queue *rxq, void *arg) +static int __idpf_xdp_rxq_info_init(struct idpf_queue *rxq, void *arg) { const struct idpf_vport *vport = rxq->vport; int err; @@ -60,6 +60,7 @@ static int idpf_xdp_rxq_info_init(struct idpf_queue *rxq, void *arg) err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_XSK_BUFF_POOL, NULL); + xsk_pool_set_rxq_info(rxq->xsk_rx, &rxq->xdp_rxq); } else { const struct page_pool *pp; @@ -80,6 +81,15 @@ static int idpf_xdp_rxq_info_init(struct idpf_queue *rxq, void *arg) return err; } +int idpf_xdp_rxq_info_init(struct idpf_queue *rxq) +{ + struct idpf_vport *vport = rxq->vport; + void *arg; + + arg = (void *)(size_t)idpf_is_queue_model_split(vport->rxq_model); + return __idpf_xdp_rxq_info_init(rxq, arg); +} + /** * idpf_xdp_rxq_info_init_all - initialize RxQ info for all Rx queues in vport * @vport: vport to setup the info @@ -92,14 +102,14 @@ int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport) arg = (void *)(size_t)idpf_is_queue_model_split(vport->rxq_model); - return idpf_rxq_for_each(vport, idpf_xdp_rxq_info_init, arg); + return idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_init, arg); } /** - * idpf_xdp_rxq_info_deinit - Deinit XDP RxQ info for a given Rx queue + * __idpf_xdp_rxq_info_deinit - Deinit XDP RxQ info for a given Rx queue * @rxq: Rx queue for which the resources are destroyed */ -static int idpf_xdp_rxq_info_deinit(struct idpf_queue *rxq, void *arg) +static int __idpf_xdp_rxq_info_deinit(struct idpf_queue *rxq, void *arg) { rxq->xdpqs = NULL; rxq->num_xdp_txq = 0; @@ -112,13 +122,18 @@ static int idpf_xdp_rxq_info_deinit(struct idpf_queue *rxq, void *arg) return 0; } +int idpf_xdp_rxq_info_deinit(struct idpf_queue *rxq) +{ + return __idpf_xdp_rxq_info_deinit(rxq, NULL); +} + /** * idpf_xdp_rxq_info_deinit_all - deinit RxQ info for all Rx queues in vport * @vport: vport to setup the info */ void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport) { - idpf_rxq_for_each(vport, idpf_xdp_rxq_info_deinit, NULL); + idpf_rxq_for_each(vport, __idpf_xdp_rxq_info_deinit, NULL); } static int idpf_xdp_rxq_assign_prog(struct idpf_queue *rxq, void *arg) @@ -239,9 +254,6 @@ static u32 idpf_clean_xdp_irq(struct idpf_queue *xdpq) } switch (ctype) { - case IDPF_TXD_COMPLT_SW_MARKER: - idpf_tx_handle_sw_marker(xdpq); - break; case -ENODATA: goto exit_xdp_irq; case -EINVAL: diff --git a/drivers/net/ethernet/intel/idpf/idpf_xdp.h b/drivers/net/ethernet/intel/idpf/idpf_xdp.h index f1444482f69d44..0f2ce053e44040 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_xdp.h +++ b/drivers/net/ethernet/intel/idpf/idpf_xdp.h @@ -8,6 +8,8 @@ struct idpf_vport; +int idpf_xdp_rxq_info_init(struct idpf_queue *rxq); +int idpf_xdp_rxq_info_deinit(struct idpf_queue *rxq); int idpf_xdp_rxq_info_init_all(const struct idpf_vport *vport); void idpf_xdp_rxq_info_deinit_all(const struct idpf_vport *vport); void idpf_copy_xdp_prog_to_qs(const struct idpf_vport *vport, diff --git a/drivers/net/ethernet/intel/idpf/idpf_xsk.c b/drivers/net/ethernet/intel/idpf/idpf_xsk.c index b5b7e229f80a50..e2bf368cc64c95 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_xsk.c +++ b/drivers/net/ethernet/intel/idpf/idpf_xsk.c @@ -5,6 +5,7 @@ #include "idpf.h" #include "idpf_xsk.h" +#include "idpf_xdp.h" /** * idpf_xsk_setup_queue - set xsk_pool pointer from netdev to the queue structure @@ -117,6 +118,13 @@ idpf_qp_cfg_qs(struct idpf_vport *vport, struct idpf_queue **qs, int num_qs) netdev_err(vport->netdev, "Could not allocate buffer for RX queue.\n"); break; } + + err = idpf_xdp_rxq_info_init(q); + if (err) { + netdev_err(vport->netdev, "Could not allocate buffer for RX queue.\n"); + break; + } + if (!splitq) err = idpf_rx_bufs_init(q, LIBIE_RX_BUF_MTU); break; @@ -162,6 +170,7 @@ idpf_qp_clean_qs(struct idpf_vport *vport, struct idpf_queue **qs, int num_qs) switch (q->q_type) { case VIRTCHNL2_QUEUE_TYPE_RX: idpf_rx_desc_rel(q, false, vport->rxq_model); + idpf_xdp_rxq_info_deinit(q); break; case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER: idpf_rx_desc_rel(q, true, vport->rxq_model); @@ -801,9 +810,6 @@ static u32 idpf_clean_xdp_irq_zc(struct idpf_queue *complq) } switch (ctype) { - case IDPF_TXD_COMPLT_SW_MARKER: - idpf_tx_handle_sw_marker(xdpq); - break; case -ENODATA: goto clean_xdpq; case -EINVAL: @@ -933,7 +939,7 @@ static bool idpf_xsk_tx_flush_bulk(struct libie_xdp_tx_bulk *bq) static bool idpf_xsk_run_prog(struct xdp_buff *xdp, struct libie_xdp_tx_bulk *bq) { - return libie_xdp_run_prog(xdp, bq, idpf_xsk_tx_flush_bulk); + return libie_xsk_run_prog(xdp, bq, idpf_xsk_tx_flush_bulk); } static void idpf_xsk_finalize_rx(struct libie_xdp_tx_bulk *bq) @@ -992,19 +998,23 @@ idpf_xsk_rx_skb(struct xdp_buff *xdp, */ int idpf_clean_rx_irq_zc(struct idpf_queue *rxq, int budget) { + struct { + bool valid; + u32 buf_id; + } bufqs[IDPF_MAX_BUFQS_PER_RXQ_GRP] = { }; struct libie_rq_onstack_stats rs = { }; - struct idpf_queue *rx_bufq = NULL; u32 ntc = rxq->next_to_clean; struct libie_xdp_tx_bulk bq; - u32 buf_id, to_refill; bool failure = false; + u32 to_refill; libie_xsk_tx_init_bulk(&bq, rxq->xdp_prog, rxq->xdp_rxq.dev, rxq->xdpqs, rxq->num_xdp_txq); while (likely(rs.packets < budget)) { const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc; - u32 field, rxdid, pkt_len, bufq_id, xdp_act; + u32 field, rxdid, bufq_id, buf_id, pkt_len, xdp_act; + struct idpf_queue *rx_bufq = NULL; struct xdp_buff *xdp; rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb; @@ -1034,7 +1044,10 @@ int idpf_clean_rx_irq_zc(struct idpf_queue *rxq, int budget) pkt_len = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M, field); - xdp = libie_xsk_process_buff(rxq->xsk, buf_id, pkt_len); + bufqs[bufq_id].buf_id = buf_id; + bufqs[bufq_id].valid = true; + + xdp = libie_xsk_process_buff(rx_bufq->xsk, buf_id, pkt_len); if (!xdp) goto next; @@ -1048,7 +1061,6 @@ int idpf_clean_rx_irq_zc(struct idpf_queue *rxq, int budget) rs.bytes += pkt_len; rs.packets++; - next: IDPF_RX_BUMP_NTC(rxq, ntc); } @@ -1061,17 +1073,19 @@ int idpf_clean_rx_irq_zc(struct idpf_queue *rxq, int budget) u64_stats_add(&rxq->q_stats.rx.bytes, rs.bytes); u64_stats_update_end(&rxq->stats_sync); - if (!rx_bufq) - goto skip_refill; + for (u32 i = 0; i < rxq->rxq_grp->splitq.num_bufq_sets; i++) { + struct idpf_queue *q = &rxq->rxq_grp->splitq.bufq_sets[i].bufq; - IDPF_RX_BUMP_NTC(rx_bufq, buf_id); - rx_bufq->next_to_clean = buf_id; + if (bufqs[i].valid) { + IDPF_RX_BUMP_NTC(q, bufqs[i].buf_id); + q->next_to_clean = bufqs[i].buf_id; + } - to_refill = IDPF_DESC_UNUSED(rx_bufq); - if (to_refill > IDPF_QUEUE_QUARTER(rx_bufq)) - failure |= !idpf_alloc_rx_buffers_zc(rx_bufq, to_refill); + to_refill = IDPF_DESC_UNUSED(q); + if (to_refill > IDPF_QUEUE_QUARTER(q)) + failure |= !idpf_alloc_rx_buffers_zc(q, to_refill); + } -skip_refill: if (xsk_uses_need_wakeup(rxq->xsk_rx)) { if (failure || rxq->next_to_clean == rxq->next_to_use) xsk_set_rx_need_wakeup(rxq->xsk_rx); @@ -1081,7 +1095,7 @@ int idpf_clean_rx_irq_zc(struct idpf_queue *rxq, int budget) return rs.packets; } - return unlikely(failure) ? budget : rs.bytes; + return unlikely(failure) ? budget : rs.packets; } /** diff --git a/include/linux/net/intel/libie/xdp.h b/include/linux/net/intel/libie/xdp.h index b622d06387f11b..e6310440c7b813 100644 --- a/include/linux/net/intel/libie/xdp.h +++ b/include/linux/net/intel/libie/xdp.h @@ -549,7 +549,7 @@ __libie_xdp_run_flush(struct xdp_buff *xdp, struct libie_xdp_tx_bulk *bq, #define libie_xdp_run_prog(xdp, bq, fl) \ (__libie_xdp_run_flush(xdp, bq, __libie_xdp_run_prog, fl) == \ - XDP_PASS) + LIBIE_XDP_PASS) static __always_inline void libie_xdp_finalize_rx(struct libie_xdp_tx_bulk *bq,