diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h index 90bcae479e0a0e..3cad178fb11946 100644 --- a/drivers/net/ethernet/intel/idpf/idpf.h +++ b/drivers/net/ethernet/intel/idpf/idpf.h @@ -290,6 +290,7 @@ struct idpf_vport { struct idpf_tx_queue **txqs; bool crc_enable; + struct bpf_prog *xdp_prog; bool xdpq_share; u16 num_xdp_txq; u16 xdp_txq_offset; @@ -599,10 +600,7 @@ static inline int idpf_is_queue_model_split(u16 q_model) */ static inline bool idpf_xdp_is_prog_ena(const struct idpf_vport *vport) { - if (!vport->adapter) - return false; - - return !!vport->adapter->vport_config[vport->idx]->user_config.xdp.prog; + return !!vport->xdp_prog; } #define idpf_is_cap_ena(adapter, field, flag) \ @@ -836,6 +834,7 @@ void idpf_vf_dev_ops_init(struct idpf_adapter *adapter); int idpf_intr_req(struct idpf_adapter *adapter); void idpf_intr_rel(struct idpf_adapter *adapter); u16 idpf_get_max_tx_hdr_size(struct idpf_adapter *adapter); +int idpf_vport_open(struct idpf_vport *vport); int idpf_initiate_soft_reset(struct idpf_vport *vport, enum idpf_vport_reset_cause reset_cause); void idpf_deinit_task(struct idpf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index e875a3c24f74ba..9ba72e891b787b 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -1314,7 +1314,7 @@ static void idpf_rx_init_buf_tail(struct idpf_vport *vport) * idpf_vport_open - Bring up a vport * @vport: vport to bring up */ -static int idpf_vport_open(struct idpf_vport *vport) +int idpf_vport_open(struct idpf_vport *vport) { struct idpf_netdev_priv *np = netdev_priv(vport->netdev); struct idpf_adapter *adapter = vport->adapter; @@ -1901,7 +1901,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, new_vport->num_rxq, new_vport->num_bufq); if (err) - goto err_reset; + goto free_vport; /* Same comment as above regarding avoiding copying the wait_queues and * mutexes applies here. We do not want to mess with those if possible. @@ -1915,7 +1915,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, vport->num_txq - vport->num_xdp_txq, vport->num_xdp_txq); if (err) - goto err_open; + goto free_vport; if (current_state == __IDPF_VPORT_UP) err = idpf_vport_open(vport); @@ -1924,14 +1924,6 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, return err; -err_reset: - idpf_send_add_queues_msg(vport, vport->num_txq, vport->num_complq, - vport->num_rxq, vport->num_bufq); - -err_open: - if (current_state == __IDPF_VPORT_UP) - idpf_vport_open(vport); - free_vport: kfree(new_vport); diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index e0dcab4d7771c2..4f22d1b4ffdb88 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -1585,6 +1585,7 @@ void idpf_vport_init_num_qs(struct idpf_vport *vport, config_data->num_req_tx_qs = le16_to_cpu(vport_msg->num_tx_q); config_data->num_req_rx_qs = le16_to_cpu(vport_msg->num_rx_q); } + vport->xdp_prog = config_data->xdp.prog; if (idpf_is_queue_model_split(vport->txq_model)) vport->num_complq = le16_to_cpu(vport_msg->num_tx_complq); @@ -2098,7 +2099,7 @@ int idpf_vport_queues_alloc(struct idpf_vport *vport) if (err) goto err_out; - prog = vport->adapter->vport_config[vport->idx]->user_config.xdp.prog; + prog = vport->xdp_prog; idpf_copy_xdp_prog_to_qs(vport, prog); return 0; diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c index ec3f3a9eb8f956..e4e36969491da9 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -2127,13 +2127,11 @@ idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set *qs, split = idpf_is_queue_model_split(qs->vport->txq_model); - for (u32 i = 0; i < qs->num; i++) { + for (u32 i = 0, j = 0; i < qs->num; i++) { const struct idpf_queue_ptr *q = &qs->qs[i]; const struct idpf_q_vector *vec; u32 qid, v_idx, itr_idx; - vqv[i].queue_type = cpu_to_le32(q->type); - switch (q->type) { case VIRTCHNL2_QUEUE_TYPE_RX: qid = q->rxq->q_id; @@ -2147,8 +2145,8 @@ idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set *qs, v_idx = vec->v_idx; itr_idx = vec->rx_itr_idx; } else { - v_idx = 0; - itr_idx = VIRTCHNL2_ITR_IDX_0; + params.num_chunks--; + continue; } break; case VIRTCHNL2_QUEUE_TYPE_TX: @@ -2167,17 +2165,19 @@ idpf_send_map_unmap_queue_set_vector_msg(const struct idpf_queue_set *qs, v_idx = vec->v_idx; itr_idx = vec->tx_itr_idx; } else { - v_idx = 0; - itr_idx = VIRTCHNL2_ITR_IDX_1; + params.num_chunks--; + continue; } break; default: return -EINVAL; } - vqv[i].queue_id = cpu_to_le32(qid); - vqv[i].vector_id = cpu_to_le16(v_idx); - vqv[i].itr_idx = cpu_to_le32(itr_idx); + vqv[j].queue_type = cpu_to_le32(q->type); + vqv[j].queue_id = cpu_to_le32(qid); + vqv[j].vector_id = cpu_to_le16(v_idx); + vqv[j].itr_idx = cpu_to_le32(itr_idx); + j++; } return idpf_send_chunked_msg(qs->vport, ¶ms); diff --git a/drivers/net/ethernet/intel/idpf/xdp.c b/drivers/net/ethernet/intel/idpf/xdp.c index 9b0de9a0d2d3f9..445a057fbf7e6c 100644 --- a/drivers/net/ethernet/intel/idpf/xdp.c +++ b/drivers/net/ethernet/intel/idpf/xdp.c @@ -467,13 +467,16 @@ void idpf_xdp_set_features(const struct idpf_vport *vport) static int idpf_xdp_setup_prog(struct idpf_vport *vport, struct netdev_bpf *xdp) { + struct idpf_netdev_priv *np = netdev_priv(vport->netdev); + enum idpf_vport_state current_state = np->state; struct bpf_prog *prog = xdp->prog; struct xdp_attachment_info *info; bool reconfig; int ret; info = &vport->adapter->vport_config[vport->idx]->user_config.xdp; - reconfig = !!info->prog != !!prog; + reconfig = !!info->prog != !!prog && + !test_bit(IDPF_REMOVE_IN_PROG, vport->adapter->flags); xdp_attachment_setup(info, xdp); @@ -482,16 +485,32 @@ idpf_xdp_setup_prog(struct idpf_vport *vport, struct netdev_bpf *xdp) return 0; } - libeth_xdp_set_redirect(vport->netdev, prog); - ret = idpf_initiate_soft_reset(vport, IDPF_SR_Q_CHANGE); if (ret) { NL_SET_ERR_MSG_MOD(xdp->extack, "Could not reopen the vport after XDP setup"); - return ret; + goto err_reset; } + libeth_xdp_set_redirect(vport->netdev, prog); return 0; + +err_reset: + if (info->prog) + bpf_prog_put(info->prog); + info->prog = NULL; + info->flags = 0; + + if (idpf_initiate_soft_reset(vport, IDPF_SR_Q_CHANGE)) { + NL_SET_ERR_MSG_MOD(xdp->extack, + "Could not restore the vport config after failed XDP setup"); + return ret; + } + + if (current_state == __IDPF_VPORT_UP) + idpf_vport_open(vport); + + return ret; } /** diff --git a/drivers/net/ethernet/intel/libeth/rx.c b/drivers/net/ethernet/intel/libeth/rx.c index 63ffa48e2f989f..4615cd8eccfb2f 100644 --- a/drivers/net/ethernet/intel/libeth/rx.c +++ b/drivers/net/ethernet/intel/libeth/rx.c @@ -189,6 +189,7 @@ int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi) return 0; err_mem: + xdp_unreg_page_pool(pool); kvfree(fqes); err_buf: page_pool_destroy(pool); @@ -205,6 +206,7 @@ void libeth_rx_fq_destroy(struct libeth_fq *fq) { xdp_unreg_page_pool(fq->pp); kvfree(fq->fqes); + page_pool_destroy(fq->pp); } EXPORT_SYMBOL_NS_GPL(libeth_rx_fq_destroy, LIBETH);