in build/gve_rx_dqo.c [625:770]
static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
const struct gve_rx_compl_desc_dqo *compl_desc,
u32 desc_idx, int queue_idx)
{
const u16 buffer_id = le16_to_cpu(compl_desc->buf_id);
const bool hbo = compl_desc->header_buffer_overflow;
const bool eop = compl_desc->end_of_packet != 0;
const bool hsplit = compl_desc->split_header;
struct gve_rx_buf_state_dqo *buf_state;
struct gve_priv *priv = rx->gve;
u16 buf_len;
u16 hdr_len;
if (unlikely(buffer_id >= rx->dqo.num_buf_states)) {
net_err_ratelimited("%s: Invalid RX buffer_id=%u\n",
priv->dev->name, buffer_id);
return -EINVAL;
}
buf_state = &rx->dqo.buf_states[buffer_id];
if (unlikely(!gve_buf_state_is_allocated(rx, buf_state))) {
net_err_ratelimited("%s: RX buffer_id is not allocated: %u\n",
priv->dev->name, buffer_id);
return -EINVAL;
}
if (unlikely(compl_desc->rx_error)) {
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,7,0))
gve_free_buffer(rx, buf_state);
#else
gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states,
buf_state);
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(6,7,0)) */
return -EINVAL;
}
buf_len = compl_desc->packet_len;
hdr_len = compl_desc->header_len;
/* Page might have not been used for awhile and was likely last written
* by a different thread.
*/
prefetch(buf_state->page_info.page);
/* Copy the header into the skb in the case of header split */
if (hsplit) {
int unsplit = 0;
if (hdr_len && !hbo) {
rx->ctx.skb_head = gve_rx_copy_data(priv->dev, napi,
rx->dqo.hdr_bufs.data +
desc_idx * priv->header_buf_size,
hdr_len);
if (unlikely(!rx->ctx.skb_head))
goto error;
rx->ctx.skb_tail = rx->ctx.skb_head;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,7,0))
if (rx->dqo.page_pool)
skb_mark_for_recycle(rx->ctx.skb_head);
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(6,7,0)) */
} else {
unsplit = 1;
}
u64_stats_update_begin(&rx->statss);
rx->rx_hsplit_pkt++;
rx->rx_hsplit_unsplit_pkt += unsplit;
rx->rx_hsplit_bytes += hdr_len;
u64_stats_update_end(&rx->statss);
}
/* Sync the portion of dma buffer for CPU to read. */
dma_sync_single_range_for_cpu(&priv->pdev->dev, buf_state->addr,
buf_state->page_info.page_offset,
buf_len, DMA_FROM_DEVICE);
/* Append to current skb if one exists. */
if (rx->ctx.skb_head) {
if (unlikely(gve_rx_append_frags(napi, buf_state, buf_len, rx,
priv)) != 0) {
goto error;
}
return 0;
}
if (eop && buf_len <= priv->rx_copybreak) {
rx->ctx.skb_head = gve_rx_copy(priv->dev, napi,
&buf_state->page_info, buf_len);
if (unlikely(!rx->ctx.skb_head))
goto error;
rx->ctx.skb_tail = rx->ctx.skb_head;
u64_stats_update_begin(&rx->statss);
rx->rx_copied_pkt++;
rx->rx_copybreak_pkt++;
u64_stats_update_end(&rx->statss);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,7,0))
gve_free_buffer(rx, buf_state);
#else
gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states,
buf_state);
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(6,7,0)) */
return 0;
}
rx->ctx.skb_head = napi_get_frags(napi);
if (unlikely(!rx->ctx.skb_head))
goto error;
rx->ctx.skb_tail = rx->ctx.skb_head;
if (gve_rx_should_trigger_copy_ondemand(rx)) {
if (gve_rx_copy_ondemand(rx, buf_state, buf_len) < 0)
goto error;
return 0;
}
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,7,0))
if (rx->dqo.page_pool)
skb_mark_for_recycle(rx->ctx.skb_head);
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(6,7,0)) */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,7,0))
skb_add_rx_frag(rx->ctx.skb_head, 0, buf_state->page_info.page,
buf_state->page_info.page_offset, buf_len,
buf_state->page_info.buf_size);
#else
skb_add_rx_frag(rx->ctx.skb_head, 0, buf_state->page_info.page,
buf_state->page_info.page_offset, buf_len,
priv->data_buffer_size_dqo);
#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(6,7,0))
gve_dec_pagecnt_bias(&buf_state->page_info);
gve_try_recycle_buf(priv, rx, buf_state);
#else
gve_reuse_buffer(rx, buf_state);
#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(6,7,0)) */
return 0;
error:
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,7,0))
gve_free_buffer(rx, buf_state);
#else
gve_enqueue_buf_state(rx, &rx->dqo.recycled_buf_states, buf_state);
#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(6,7,0)) */
return -ENOMEM;
}