From de5e22af42e2fb58532d6fc7f37ee97bd68b76c9 Mon Sep 17 00:00:00 2001 From: Michal Kubiak Date: Mon, 19 Aug 2024 18:17:54 +0200 Subject: [PATCH 1/5] idpf: fix error handling in soft_reset for XDP The commit 35d653aee88a ("idpf: implement XDP_SETUP_PROG in ndo_bpf for splitq") uses soft_reset to perform a full vport reconfiguration after changes in XDP setup. Unfortunately, the soft_reset may fail after attaching the XDP program to the vport. It can happen when the HW limits resources that can be allocated to fulfill XDP requirements. In such a case, before we return an error from XDP_SETUP_PROG, we have to fully restore the previous vport state, including removing the XDP program. In order to remove the already loaded XDP program in case of reset error, re-implement the error handling path and move some calls to the XDP callback. Fixes: 35d653aee88a ("idpf: implement XDP_SETUP_PROG in ndo_bpf for splitq") Signed-off-by: Michal Kubiak --- drivers/net/ethernet/intel/idpf/idpf.h | 1 + drivers/net/ethernet/intel/idpf/idpf_lib.c | 14 +++---------- drivers/net/ethernet/intel/idpf/xdp.c | 24 +++++++++++++++++++--- 3 files changed, 25 insertions(+), 14 deletions(-) diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h index 90bcae479e0a0e..941253b8c7b30a 100644 --- a/drivers/net/ethernet/intel/idpf/idpf.h +++ b/drivers/net/ethernet/intel/idpf/idpf.h @@ -836,6 +836,7 @@ void idpf_vf_dev_ops_init(struct idpf_adapter *adapter); int idpf_intr_req(struct idpf_adapter *adapter); void idpf_intr_rel(struct idpf_adapter *adapter); u16 idpf_get_max_tx_hdr_size(struct idpf_adapter *adapter); +int idpf_vport_open(struct idpf_vport *vport); int idpf_initiate_soft_reset(struct idpf_vport *vport, enum idpf_vport_reset_cause reset_cause); void idpf_deinit_task(struct idpf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index e875a3c24f74ba..9ba72e891b787b 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -1314,7 +1314,7 @@ static void idpf_rx_init_buf_tail(struct idpf_vport *vport) * idpf_vport_open - Bring up a vport * @vport: vport to bring up */ -static int idpf_vport_open(struct idpf_vport *vport) +int idpf_vport_open(struct idpf_vport *vport) { struct idpf_netdev_priv *np = netdev_priv(vport->netdev); struct idpf_adapter *adapter = vport->adapter; @@ -1901,7 +1901,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, new_vport->num_rxq, new_vport->num_bufq); if (err) - goto err_reset; + goto free_vport; /* Same comment as above regarding avoiding copying the wait_queues and * mutexes applies here. We do not want to mess with those if possible. @@ -1915,7 +1915,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, vport->num_txq - vport->num_xdp_txq, vport->num_xdp_txq); if (err) - goto err_open; + goto free_vport; if (current_state == __IDPF_VPORT_UP) err = idpf_vport_open(vport); @@ -1924,14 +1924,6 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, return err; -err_reset: - idpf_send_add_queues_msg(vport, vport->num_txq, vport->num_complq, - vport->num_rxq, vport->num_bufq); - -err_open: - if (current_state == __IDPF_VPORT_UP) - idpf_vport_open(vport); - free_vport: kfree(new_vport); diff --git a/drivers/net/ethernet/intel/idpf/xdp.c b/drivers/net/ethernet/intel/idpf/xdp.c index 9b0de9a0d2d3f9..fbe112c42c7645 100644 --- a/drivers/net/ethernet/intel/idpf/xdp.c +++ b/drivers/net/ethernet/intel/idpf/xdp.c @@ -467,6 +467,8 @@ void idpf_xdp_set_features(const struct idpf_vport *vport) static int idpf_xdp_setup_prog(struct idpf_vport *vport, struct netdev_bpf *xdp) { + struct idpf_netdev_priv *np = netdev_priv(vport->netdev); + enum idpf_vport_state current_state = np->state; struct bpf_prog *prog = xdp->prog; struct xdp_attachment_info *info; bool reconfig; @@ -482,16 +484,32 @@ idpf_xdp_setup_prog(struct idpf_vport *vport, struct netdev_bpf *xdp) return 0; } - libeth_xdp_set_redirect(vport->netdev, prog); - ret = idpf_initiate_soft_reset(vport, IDPF_SR_Q_CHANGE); if (ret) { NL_SET_ERR_MSG_MOD(xdp->extack, "Could not reopen the vport after XDP setup"); - return ret; + goto err_reset; } + libeth_xdp_set_redirect(vport->netdev, prog); return 0; + +err_reset: + if (info->prog) + bpf_prog_put(info->prog); + info->prog = NULL; + info->flags = 0; + + if (idpf_initiate_soft_reset(vport, IDPF_SR_Q_CHANGE)) { + NL_SET_ERR_MSG_MOD(xdp->extack, + "Could not restore the vport config after failed XDP setup"); + return ret; + } + + if (current_state == __IDPF_VPORT_UP) + idpf_vport_open(vport); + + return ret; } /** From 2ab12250ec535d9802ff66eaa4e4b8392f7cbafa Mon Sep 17 00:00:00 2001 From: Michal Kubiak Date: Fri, 6 Sep 2024 14:37:00 +0200 Subject: [PATCH 2/5] libeth: fix releasing page_pool resources The commit f00334f288c6 ("libeth: support native XDP and register memory model") removed calling of "page_pool_destroy()" from "libeth_rx_fq_destroy()" and replaced it with the call of "xdp_unreg_page_pool()". There was an assumption that "page_pool_destroy()" will be called during unregistering the memory model. Unfortunately, although "page_pool_destroy()" is called from "xdp_unreg_mem_model()", it does not perform releasing of all resources. All page pool resources can be released completely only if the user reference counter is decremented to zero. In the libeth scenario, the "user_cnt" page_pool reference counter is set to 1 during the page_pool creation. Then, it is again incremented to 2 while the memory model is being registered. Therefore, calling "xdp_unreg_mem_model()" decrements the reference counter only by one and some page_pool resources are never released. The page_pool API should be called in a symmetric way, so: - each explicit call of "page_pool_create()" should be followed by "page_pool_destroy()", - each call of "xdp_reg_page_pool()" should be followed by "xdp_unreg_page_pool()". Fix the issue by restoring the call of "page_pool_destroy()" to let the page_pool to decrement its reference counter back to zero. Fixes: f00334f288c6 ("libeth: support native XDP and register memory model") Signed-off-by: Michal Kubiak --- drivers/net/ethernet/intel/libeth/rx.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/intel/libeth/rx.c b/drivers/net/ethernet/intel/libeth/rx.c index 63ffa48e2f989f..4615cd8eccfb2f 100644 --- a/drivers/net/ethernet/intel/libeth/rx.c +++ b/drivers/net/ethernet/intel/libeth/rx.c @@ -189,6 +189,7 @@ int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi) return 0; err_mem: + xdp_unreg_page_pool(pool); kvfree(fqes); err_buf: page_pool_destroy(pool); @@ -205,6 +206,7 @@ void libeth_rx_fq_destroy(struct libeth_fq *fq) { xdp_unreg_page_pool(fq->pp); kvfree(fq->fqes); + page_pool_destroy(fq->pp); } EXPORT_SYMBOL_NS_GPL(libeth_rx_fq_destroy, LIBETH); From c1d15477aacd4c78bed109783b71095d00b56763 Mon Sep 17 00:00:00 2001 From: Michal Kubiak Date: Wed, 18 Sep 2024 15:58:42 +0200 Subject: [PATCH 3/5] idpf: fix unloading the xdp program on rmmod Block the soft reset and vport re-configuration from "idpf_remove()" context. The XDP_SETUP_PROG command is normally called while the netdev is being unregistered. In such a case the IDPF driver should unload the XDP program and return success. Otherwise, the kernel warning will be shown in dmesg. Fixes: 79d940b2757d ("idpf: implement XDP_SETUP_PROG in ndo_bpf for splitq") Signed-off-by: Michal Kubiak --- drivers/net/ethernet/intel/idpf/xdp.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/idpf/xdp.c b/drivers/net/ethernet/intel/idpf/xdp.c index fbe112c42c7645..445a057fbf7e6c 100644 --- a/drivers/net/ethernet/intel/idpf/xdp.c +++ b/drivers/net/ethernet/intel/idpf/xdp.c @@ -475,7 +475,8 @@ idpf_xdp_setup_prog(struct idpf_vport *vport, struct netdev_bpf *xdp) int ret; info = &vport->adapter->vport_config[vport->idx]->user_config.xdp; - reconfig = !!info->prog != !!prog; + reconfig = !!info->prog != !!prog && + !test_bit(IDPF_REMOVE_IN_PROG, vport->adapter->flags); xdp_attachment_setup(info, xdp); From ffe9fc64f0cc0feebd5cc63066bcb0c9f3bd37ab Mon Sep 17 00:00:00 2001 From: Michal Kubiak Date: Tue, 24 Sep 2024 17:00:33 +0200 Subject: [PATCH 4/5] idpf: stop mapping XDP queues with q_vector Fixes: 98e4247c8f63 ("idpf: prepare structures to support xdp") Fixes: f12c11e9b79d ("idpf: add vc functions to manage selected queues") Signed-off-by: Michal Kubiak --- .../net/ethernet/intel/idpf/idpf_virtchnl.c | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c index ec3f3a9eb8f956..e4e36969491da9 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -2127,13 +2127,11 @@ idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set *qs, split = idpf_is_queue_model_split(qs->vport->txq_model); - for (u32 i = 0; i < qs->num; i++) { + for (u32 i = 0, j = 0; i < qs->num; i++) { const struct idpf_queue_ptr *q = &qs->qs[i]; const struct idpf_q_vector *vec; u32 qid, v_idx, itr_idx; - vqv[i].queue_type = cpu_to_le32(q->type); - switch (q->type) { case VIRTCHNL2_QUEUE_TYPE_RX: qid = q->rxq->q_id; @@ -2147,8 +2145,8 @@ idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set *qs, v_idx = vec->v_idx; itr_idx = vec->rx_itr_idx; } else { - v_idx = 0; - itr_idx = VIRTCHNL2_ITR_IDX_0; + params.num_chunks--; + continue; } break; case VIRTCHNL2_QUEUE_TYPE_TX: @@ -2167,17 +2165,19 @@ idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set *qs, v_idx = vec->v_idx; itr_idx = vec->tx_itr_idx; } else { - v_idx = 0; - itr_idx = VIRTCHNL2_ITR_IDX_1; + params.num_chunks--; + continue; } break; default: return -EINVAL; } - vqv[i].queue_id = cpu_to_le32(qid); - vqv[i].vector_id = cpu_to_le16(v_idx); - vqv[i].itr_idx = cpu_to_le32(itr_idx); + vqv[j].queue_type = cpu_to_le32(q->type); + vqv[j].queue_id = cpu_to_le32(qid); + vqv[j].vector_id = cpu_to_le16(v_idx); + vqv[j].itr_idx = cpu_to_le32(itr_idx); + j++; } return idpf_send_chunked_msg(qs->vport, ¶ms); From 91d0f074131f3b5695a40274244a4fc79b82c3a6 Mon Sep 17 00:00:00 2001 From: Michal Kubiak Date: Mon, 30 Sep 2024 14:38:34 +0200 Subject: [PATCH 5/5] idpf: keep the current xdp program in the vport structure Signed-off-by: Michal Kubiak --- drivers/net/ethernet/intel/idpf/idpf.h | 6 ++---- drivers/net/ethernet/intel/idpf/idpf_txrx.c | 3 ++- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h index 941253b8c7b30a..3cad178fb11946 100644 --- a/drivers/net/ethernet/intel/idpf/idpf.h +++ b/drivers/net/ethernet/intel/idpf/idpf.h @@ -290,6 +290,7 @@ struct idpf_vport { struct idpf_tx_queue **txqs; bool crc_enable; + struct bpf_prog *xdp_prog; bool xdpq_share; u16 num_xdp_txq; u16 xdp_txq_offset; @@ -599,10 +600,7 @@ static inline int idpf_is_queue_model_split(u16 q_model) */ static inline bool idpf_xdp_is_prog_ena(const struct idpf_vport *vport) { - if (!vport->adapter) - return false; - - return !!vport->adapter->vport_config[vport->idx]->user_config.xdp.prog; + return !!vport->xdp_prog; } #define idpf_is_cap_ena(adapter, field, flag) \ diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index e0dcab4d7771c2..4f22d1b4ffdb88 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -1585,6 +1585,7 @@ void idpf_vport_init_num_qs(struct idpf_vport *vport, config_data->num_req_tx_qs = le16_to_cpu(vport_msg->num_tx_q); config_data->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q); } + vport->xdp_prog = config_data->xdp.prog; if (idpf_is_queue_model_split(vport->txq_model)) vport->num_complq = le16_to_cpu(vport_msg->num_tx_complq); @@ -2098,7 +2099,7 @@ int idpf_vport_queues_alloc(struct idpf_vport *vport) if (err) goto err_out; - prog = vport->adapter->vport_config[vport->idx]->user_config.xdp.prog; + prog = vport->xdp_prog; idpf_copy_xdp_prog_to_qs(vport, prog); return 0;