Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 1 addition & 5 deletions drivers/net/ethernet/intel/idpf/idpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ struct idpf_vport_max_q;
#define IDPF_MB_MAX_ERR 20
#define IDPF_NUM_CHUNKS_PER_MSG(struct_sz, chunk_sz) \
((IDPF_CTLQ_MAX_BUF_LEN - (struct_sz)) / (chunk_sz))
#define IDPF_WAIT_FOR_MARKER_TIMEO 500
#define IDPF_WAIT_FOR_EVENT_TIMEO_MIN 2000
#define IDPF_WAIT_FOR_EVENT_TIMEO 60000

Expand Down Expand Up @@ -291,13 +292,10 @@ enum idpf_vport_reset_cause {
/**
* enum idpf_vport_flags - Vport flags
* @IDPF_VPORT_DEL_QUEUES: To send delete queues message
* @IDPF_VPORT_SW_MARKER: Indicate TX pipe drain software marker packets
* processing is done
* @IDPF_VPORT_FLAGS_NBITS: Must be last
*/
enum idpf_vport_flags {
IDPF_VPORT_DEL_QUEUES,
IDPF_VPORT_SW_MARKER,
IDPF_VPORT_FLAGS_NBITS,
};

Expand Down Expand Up @@ -361,7 +359,6 @@ struct idpf_port_stats {
* @vc_msg: Virtchnl message buffer
* @vc_state: Virtchnl message state
* @vchnl_wq: Wait queue for virtchnl messages
* @sw_marker_wq: workqueue for marker packets
* @vc_buf_lock: Lock to protect virtchnl buffer
*/
struct idpf_vport {
Expand Down Expand Up @@ -419,7 +416,6 @@ struct idpf_vport {
DECLARE_BITMAP(vc_state, IDPF_VC_NBITS);

wait_queue_head_t vchnl_wq;
wait_queue_head_t sw_marker_wq;
struct mutex vc_buf_lock;
};

Expand Down
1 change: 0 additions & 1 deletion drivers/net/ethernet/intel/idpf/idpf_lib.c
Original file line number Diff line number Diff line change
Expand Up @@ -1565,7 +1565,6 @@ void idpf_init_task(struct work_struct *work)
index = vport->idx;
vport_config = adapter->vport_config[index];

init_waitqueue_head(&vport->sw_marker_wq);
init_waitqueue_head(&vport->vchnl_wq);

mutex_init(&vport->vc_buf_lock);
Expand Down
128 changes: 80 additions & 48 deletions drivers/net/ethernet/intel/idpf/idpf_txrx.c
Original file line number Diff line number Diff line change
Expand Up @@ -757,8 +757,8 @@ int idpf_rx_bufs_init_all(struct idpf_vport *vport)
*/
int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model)
{
struct device *dev = &rxq->vport->adapter->pdev->dev;
enum virtchnl2_queue_type type;
struct device *dev = rxq->dev;

if (bufq)
rxq->size = rxq->desc_count *
Expand Down Expand Up @@ -1395,6 +1395,7 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq)
}
}

rx_qgrp->splitq.num_bufq_sets = vport->num_bufqs_per_qgrp;
rx_qgrp->splitq.bufq_sets = kcalloc(vport->num_bufqs_per_qgrp,
sizeof(struct idpf_bufq_set),
GFP_KERNEL);
Expand Down Expand Up @@ -1524,21 +1525,21 @@ int idpf_vport_queues_alloc(struct idpf_vport *vport)
if (err)
goto err_out;

err = idpf_tx_desc_alloc_all(vport);
err = idpf_vport_init_fast_path_txqs(vport);
if (err)
goto err_out;
idpf_vport_xdpq_get(vport);

err = idpf_rx_desc_alloc_all(vport);
err = idpf_tx_desc_alloc_all(vport);
if (err)
goto err_out;

err = idpf_vport_init_fast_path_txqs(vport);
err = idpf_rx_desc_alloc_all(vport);
if (err)
goto err_out;

prog = vport->adapter->vport_config[vport->idx]->user_config.xdp_prog;
idpf_copy_xdp_prog_to_qs(vport, prog);
idpf_vport_xdpq_get(vport);

return 0;

Expand All @@ -1548,31 +1549,6 @@ int idpf_vport_queues_alloc(struct idpf_vport *vport)
return err;
}

/**
* idpf_tx_handle_sw_marker - Handle queue marker packet
* @tx_q: tx queue to handle software marker
*/
void idpf_tx_handle_sw_marker(struct idpf_queue *tx_q)
{
struct idpf_vport *vport = tx_q->vport;
int i;

clear_bit(__IDPF_Q_SW_MARKER, tx_q->flags);
/* Hardware must write marker packets to all queues associated with
* completion queues. So check if all queues received marker packets
*/
for (i = 0; i < vport->num_txq; i++)
/* If we're still waiting on any other TXQ marker completions,
* just return now since we cannot wake up the marker_wq yet.
*/
if (test_bit(__IDPF_Q_SW_MARKER, vport->txqs[i]->flags))
return;

/* Drain complete */
set_bit(IDPF_VPORT_SW_MARKER, vport->flags);
wake_up(&vport->sw_marker_wq);
}

/**
* idpf_tx_clean_stashed_bufs - clean bufs that were stored for
* out of order completions
Expand Down Expand Up @@ -1861,6 +1837,23 @@ idpf_tx_handle_rs_cmpl_fb(struct idpf_queue *txq,
idpf_tx_clean_stashed_bufs(txq, compl_tag, cleaned, budget);
}

/**
* idpf_tx_update_complq_indexes - update completion queue indexes
* @complq: completion queue being updated
* @ntc: current "next to clean" index value
* @gen_flag: current "genearation" flag value
*/
static void idpf_tx_update_complq_indexes(struct idpf_queue *complq,
int ntc, bool gen_flag)
{
ntc += complq->desc_count;
complq->next_to_clean = ntc;
if (gen_flag)
set_bit(__IDPF_Q_GEN_CHK, complq->flags);
else
clear_bit(__IDPF_Q_GEN_CHK, complq->flags);
}

/**
* idpf_tx_finalize_complq - Finalize completion queue cleaning
* @complq: completion queue to finalize
Expand Down Expand Up @@ -1913,12 +1906,7 @@ static void idpf_tx_finalize_complq(struct idpf_queue *complq, int ntc,
tx_q->cleaned_pkts = 0;
}

ntc += complq->desc_count;
complq->next_to_clean = ntc;
if (gen_flag)
set_bit(__IDPF_Q_GEN_CHK, complq->flags);
else
clear_bit(__IDPF_Q_GEN_CHK, complq->flags);
idpf_tx_update_complq_indexes(complq, ntc, gen_flag);
}

/**
Expand Down Expand Up @@ -1954,6 +1942,7 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget,

ctype = idpf_parse_compl_desc(tx_desc, complq, &tx_q,
gen_flag);

switch (ctype) {
case IDPF_TXD_COMPLT_RE:
if (unlikely(!flow))
Expand All @@ -1974,9 +1963,6 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget,
&cleaned_stats,
budget);
break;
case IDPF_TXD_COMPLT_SW_MARKER:
idpf_tx_handle_sw_marker(tx_q);
break;
case -ENODATA:
goto exit_clean_complq;
case -EINVAL:
Expand Down Expand Up @@ -2019,6 +2005,60 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget,
return !!complq_budget;
}

/**
* idpf_wait_for_sw_marker_completion - wait for SW marker of disabled Tx queue
* @txq: disabled Tx queue
*/
void idpf_wait_for_sw_marker_completion(struct idpf_queue *txq)
{
struct idpf_queue *complq = txq->txq_grp->complq;
struct idpf_splitq_4b_tx_compl_desc *tx_desc;
s16 ntc = complq->next_to_clean;
unsigned long timeout;
bool flow, gen_flag;
u32 pos = ntc;

if (!test_bit(__IDPF_Q_SW_MARKER, txq->flags))
return;

flow = test_bit(__IDPF_Q_FLOW_SCH_EN, complq->flags);
gen_flag = test_bit(__IDPF_Q_GEN_CHK, complq->flags);

timeout = jiffies + msecs_to_jiffies(IDPF_WAIT_FOR_MARKER_TIMEO);
tx_desc = flow ? &complq->comp[pos].common : &complq->comp_4b[pos];
ntc -= complq->desc_count;

do {
struct idpf_queue *tx_q;
int ctype;

ctype = idpf_parse_compl_desc(tx_desc, complq, &tx_q,
gen_flag);
if (ctype == IDPF_TXD_COMPLT_SW_MARKER) {
clear_bit(__IDPF_Q_SW_MARKER, tx_q->flags);
if (txq == tx_q)
break;
} else if (ctype == -ENODATA) {
usleep_range(500, 1000);
continue;
}

pos++;
ntc++;
if (unlikely(!ntc)) {
ntc -= complq->desc_count;
pos = 0;
gen_flag = !gen_flag;
}

tx_desc = flow ? &complq->comp[pos].common :
&complq->comp_4b[pos];
prefetch(tx_desc);
} while (time_before(jiffies, timeout));

idpf_tx_update_complq_indexes(complq, ntc, gen_flag);
}

/**
* idpf_tx_splitq_build_ctb - populate command tag and size for queue
* based scheduling descriptors
Expand Down Expand Up @@ -3922,15 +3962,7 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget)
else
idpf_vport_intr_set_wb_on_itr(q_vector);

/* Switch to poll mode in the tear-down path after sending disable
* queues virtchnl message, as the interrupts will be disabled after
* that
*/
if (unlikely(q_vector->num_txq && test_bit(__IDPF_Q_POLL_MODE,
q_vector->tx[0]->flags)))
return budget;
else
return work_done;
return work_done;
}

/**
Expand Down
5 changes: 2 additions & 3 deletions drivers/net/ethernet/intel/idpf/idpf_txrx.h
Original file line number Diff line number Diff line change
Expand Up @@ -311,7 +311,6 @@ struct idpf_ptype_state {
* and RFLGQ_GEN is the SW bit.
* @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling
* @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions
* @__IDPF_Q_POLL_MODE: Enable poll mode
* @__IDPF_Q_FLAGS_NBITS: Must be last
* @__IDPF_Q_XSK: Queue used to handle the AF_XDP socket
*/
Expand All @@ -320,7 +319,6 @@ enum idpf_queue_flags_t {
__IDPF_RFLQ_GEN_CHK,
__IDPF_Q_FLOW_SCH_EN,
__IDPF_Q_SW_MARKER,
__IDPF_Q_POLL_MODE,
__IDPF_Q_XDP,
__IDPF_Q_XSK,

Expand Down Expand Up @@ -759,6 +757,7 @@ struct idpf_rxq_group {
} singleq;
struct {
u16 num_rxq_sets;
u16 num_bufq_sets;
struct idpf_rxq_set *rxq_sets[IDPF_LARGE_MAX_Q];
struct idpf_bufq_set *bufq_sets;
} splitq;
Expand Down Expand Up @@ -953,12 +952,12 @@ struct virtchnl2_rx_flex_desc_adv_nic_3;
int idpf_rx_process_skb_fields(struct idpf_queue *rxq, struct sk_buff *skb,
const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc);
int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off);
void idpf_tx_handle_sw_marker(struct idpf_queue *tx_q);
int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model);
void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model);
int idpf_tx_desc_alloc(struct idpf_queue *tx_q, bool bufq);
void idpf_tx_desc_rel(struct idpf_queue *txq, bool bufq);
int idpf_rx_bufs_init(struct idpf_queue *rxbufq, enum libie_rx_buf_type type);
void idpf_wait_for_sw_marker_completion(struct idpf_queue *txq);

/**
* idpf_xdpq_update_tail - Updates the XDP Tx queue tail register
Expand Down
40 changes: 11 additions & 29 deletions drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
Original file line number Diff line number Diff line change
Expand Up @@ -760,21 +760,19 @@ static int idpf_wait_for_selected_marker_events(struct idpf_vport *vport,
struct idpf_queue **qs,
int num_qs)
{
int event;
bool markers_rcvd = true;
int i;

for (i = 0; i < num_qs; i++)
set_bit(__IDPF_Q_SW_MARKER, qs[i]->flags);
for (i = 0; i < num_qs; i++) {
struct idpf_queue *txq = qs[i];

event = wait_event_timeout(vport->sw_marker_wq,
test_and_clear_bit(IDPF_VPORT_SW_MARKER,
vport->flags),
msecs_to_jiffies(500));
set_bit(__IDPF_Q_SW_MARKER, txq->flags);
idpf_wait_for_sw_marker_completion(txq);

for (i = 0; i < num_qs; i++)
clear_bit(__IDPF_Q_POLL_MODE, qs[i]->flags);
markers_rcvd &= !test_bit(__IDPF_Q_SW_MARKER, txq->flags);
}

if (event)
if (markers_rcvd)
return 0;

dev_warn(&vport->adapter->pdev->dev, "Failed to receive marker packets\n");
Expand Down Expand Up @@ -2260,22 +2258,12 @@ int idpf_send_enable_queues_msg(struct idpf_vport *vport)
*/
int idpf_send_disable_queues_msg(struct idpf_vport *vport)
{
int err, i;
int err;

err = idpf_send_ena_dis_queues_msg(vport, VIRTCHNL2_OP_DISABLE_QUEUES);
if (err)
return err;

/* switch to poll mode as interrupts will be disabled after disable
* queues virtchnl message is sent
*/
for (i = 0; i < vport->num_txq; i++)
set_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags);

/* schedule the napi to receive all the marker packets */
for (i = 0; i < vport->num_q_vectors; i++)
napi_schedule(&vport->q_vectors[i].napi);

return idpf_wait_for_marker_event(vport);
}

Expand Down Expand Up @@ -2323,15 +2311,9 @@ int idpf_send_disable_selected_queues_msg(struct idpf_vport *vport,
if (!tx_qs)
return -ENOMEM;

for (i = 0; i < num_q; i++) {
if (qs[i]->q_type == VIRTCHNL2_QUEUE_TYPE_TX) {
set_bit(__IDPF_Q_POLL_MODE, qs[i]->flags);
for (i = 0; i < num_q; i++)
if (qs[i]->q_type == VIRTCHNL2_QUEUE_TYPE_TX)
tx_qs[tx_idx++] = qs[i];
}

if (qs[i]->q_type == VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION)
napi_schedule(&qs[i]->q_vector->napi);
}

err = idpf_wait_for_selected_marker_events(vport, tx_qs, tx_idx);

Expand Down
Loading