static int gve_clean_rx_done()

in build/gve_rx.c [1099:1187]


static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
			     netdev_features_t feat)
{
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL)
	u64 xdp_redirects = rx->xdp_actions[XDP_REDIRECT];
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL)
	u64 xdp_txs = rx->xdp_actions[XDP_TX];
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */
	struct gve_rx_ctx *ctx = &rx->ctx;
	struct gve_priv *priv = rx->gve;
	struct gve_rx_cnts cnts = {0};
	struct gve_rx_desc *next_desc;
	u32 idx = rx->cnt & rx->mask;
	u32 work_done = 0;

	struct gve_rx_desc *desc = &rx->desc.desc_ring[idx];

	// Exceed budget only if (and till) the inflight packet is consumed.
	while ((GVE_SEQNO(desc->flags_seq) == rx->desc.seqno) &&
	       (work_done < budget || ctx->frag_cnt)) {
		next_desc = &rx->desc.desc_ring[(idx + 1) & rx->mask];
		prefetch(next_desc);

		gve_rx(rx, feat, desc, idx, &cnts);

		rx->cnt++;
		idx = rx->cnt & rx->mask;
		desc = &rx->desc.desc_ring[idx];
		rx->desc.seqno = gve_next_seqno(rx->desc.seqno);
		work_done++;
	}

	// The device will only send whole packets.
	if (unlikely(ctx->frag_cnt)) {
		struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi;

		napi_free_frags(napi);
		gve_rx_ctx_clear(&rx->ctx);
		netdev_warn(priv->dev, "Unexpected seq number %d with incomplete packet, expected %d, scheduling reset",
			    GVE_SEQNO(desc->flags_seq), rx->desc.seqno);
		gve_schedule_reset(rx->gve);
	}

	if (!work_done && rx->fill_cnt - rx->cnt > rx->db_threshold)
		return 0;

	if (work_done) {
		u64_stats_update_begin(&rx->statss);
		rx->rpackets += cnts.ok_pkt_cnt;
		rx->rbytes += cnts.ok_pkt_bytes;
		rx->rx_cont_packet_cnt += cnts.cont_pkt_cnt;
		rx->rx_desc_err_dropped_pkt += cnts.desc_err_pkt_cnt;
		u64_stats_update_end(&rx->statss);
	}

#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL)
	if (xdp_txs != rx->xdp_actions[XDP_TX])
		gve_xdp_tx_flush(priv, rx->q_num);
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */

#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL)
	if (xdp_redirects != rx->xdp_actions[XDP_REDIRECT])
		xdp_do_flush();
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(5,14,0)) || defined(KUNIT_KERNEL) */

	/* restock ring slots */
	if (!rx->data.raw_addressing) {
		/* In QPL mode buffs are refilled as the desc are processed */
		rx->fill_cnt += work_done;
	} else if (rx->fill_cnt - rx->cnt <= rx->db_threshold) {
		/* In raw addressing mode buffs are only refilled if the avail
		 * falls below a threshold.
		 */
		if (!gve_rx_refill_buffers(priv, rx))
			return 0;

		/* If we were not able to completely refill buffers, we'll want
		 * to schedule this queue for work again to refill buffers.
		 */
		if (rx->fill_cnt - rx->cnt <= rx->db_threshold) {
			gve_rx_write_doorbell(priv, rx);
			return budget;
		}
	}

	gve_rx_write_doorbell(priv, rx);
	return cnts.total_pkt_cnt;
}