int gve_clean_tx_done_dqo()

in build/gve_tx_dqo.c [1305:1391]


int gve_clean_tx_done_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
			  struct napi_struct *napi)
{
	u64 reinject_compl_bytes = 0;
	u64 reinject_compl_pkts = 0;
	int num_descs_cleaned = 0;
	u64 miss_compl_bytes = 0;
	u64 miss_compl_pkts = 0;
	u64 pkt_compl_bytes = 0;
	u64 pkt_compl_pkts = 0;

	/* Limit in order to avoid blocking for too long */
	while (!napi || pkt_compl_pkts < napi->weight) {
		struct gve_tx_compl_desc *compl_desc =
			&tx->dqo.compl_ring[tx->dqo_compl.head];
		u16 type;

		if (compl_desc->generation == tx->dqo_compl.cur_gen_bit)
			break;

		/* Prefetch the next descriptor. */
		prefetch(&tx->dqo.compl_ring[(tx->dqo_compl.head + 1) &
				tx->dqo.complq_mask]);

		/* Do not read data until we own the descriptor */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0)
		dma_rmb();
#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0) */
		rmb();
#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0) */
		type = compl_desc->type;

		if (type == GVE_COMPL_TYPE_DQO_DESC) {
			/* This is the last descriptor fetched by HW plus one */
			u16 tx_head = le16_to_cpu(compl_desc->tx_head);

			atomic_set_release(&tx->dqo_compl.hw_tx_head, tx_head);
		} else if (type == GVE_COMPL_TYPE_DQO_PKT) {
			u16 compl_tag = le16_to_cpu(compl_desc->completion_tag);
			if (compl_tag & GVE_ALT_MISS_COMPL_BIT) {
				compl_tag &= ~GVE_ALT_MISS_COMPL_BIT;
				gve_handle_miss_completion(priv, tx, compl_tag,
							   &miss_compl_bytes,
							   &miss_compl_pkts);
			} else {
				gve_handle_packet_completion(priv, tx, !!napi,
							     compl_tag,
							     &pkt_compl_bytes,
							     &pkt_compl_pkts,
							     false);
			}
		} else if (type == GVE_COMPL_TYPE_DQO_MISS) {
			u16 compl_tag = le16_to_cpu(compl_desc->completion_tag);

			gve_handle_miss_completion(priv, tx, compl_tag,
						   &miss_compl_bytes,
						   &miss_compl_pkts);
		} else if (type == GVE_COMPL_TYPE_DQO_REINJECTION) {
			u16 compl_tag = le16_to_cpu(compl_desc->completion_tag);

			gve_handle_packet_completion(priv, tx, !!napi,
						     compl_tag,
						     &reinject_compl_bytes,
						     &reinject_compl_pkts,
						     true);
		}

		tx->dqo_compl.head =
			(tx->dqo_compl.head + 1) & tx->dqo.complq_mask;
		/* Flip the generation bit when we wrap around */
		tx->dqo_compl.cur_gen_bit ^= tx->dqo_compl.head == 0;
		num_descs_cleaned++;
	}

	netdev_tx_completed_queue(tx->netdev_txq,
				  pkt_compl_pkts + miss_compl_pkts,
				  pkt_compl_bytes + miss_compl_bytes);

	remove_miss_completions(priv, tx);
	remove_timed_out_completions(priv, tx);

	u64_stats_update_begin(&tx->statss);
	tx->bytes_done += pkt_compl_bytes + reinject_compl_bytes;
	tx->pkt_done += pkt_compl_pkts + reinject_compl_pkts;
	u64_stats_update_end(&tx->statss);
	return num_descs_cleaned;
}