in build/gve_tx.c [665:764]
static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
struct sk_buff *skb)
{
const struct skb_shared_info *shinfo = skb_shinfo(skb);
int hlen, num_descriptors, l4_hdr_offset;
union gve_tx_desc *pkt_desc, *mtd_desc, *seg_desc;
struct gve_tx_buffer_state *info;
int mtd_desc_nr = !!skb->l4_hash;
bool is_gso = skb_is_gso(skb);
u32 idx = tx->req & tx->mask;
u64 addr;
u32 len;
int i;
info = &tx->info[idx];
pkt_desc = &tx->desc[idx];
l4_hdr_offset = skb_checksum_start_offset(skb);
/* If the skb is gso, then we want only up to the tcp header in the first segment
* to efficiently replicate on each segment otherwise we want the linear portion
* of the skb (which will contain the checksum because skb->csum_start and
* skb->csum_offset are given relative to skb->head) in the first segment.
*/
hlen = is_gso ? l4_hdr_offset + tcp_hdrlen(skb) : skb_headlen(skb);
len = skb_headlen(skb);
info->skb = skb;
addr = dma_map_single(tx->dev, skb->data, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(tx->dev, addr))) {
tx->dma_mapping_error++;
goto drop;
}
dma_unmap_len_set(info, len, len);
dma_unmap_addr_set(info, dma, addr);
num_descriptors = 1 + shinfo->nr_frags;
if (hlen < len)
num_descriptors++;
if (mtd_desc_nr)
num_descriptors++;
gve_tx_fill_pkt_desc(pkt_desc, skb->csum_offset, skb->ip_summed,
is_gso, l4_hdr_offset,
num_descriptors, hlen, addr, skb->len);
if (mtd_desc_nr) {
idx = (idx + 1) & tx->mask;
mtd_desc = &tx->desc[idx];
gve_tx_fill_mtd_desc(mtd_desc, skb);
}
if (hlen < len) {
/* For gso the rest of the linear portion of the skb needs to
* be in its own descriptor.
*/
len -= hlen;
addr += hlen;
idx = (idx + 1) & tx->mask;
seg_desc = &tx->desc[idx];
gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb),
skb_shinfo(skb)->gso_size,
skb_is_gso_v6(skb), is_gso, len, addr);
}
for (i = 0; i < shinfo->nr_frags; i++) {
const skb_frag_t *frag = &shinfo->frags[i];
idx = (idx + 1) & tx->mask;
seg_desc = &tx->desc[idx];
len = skb_frag_size(frag);
addr = skb_frag_dma_map(tx->dev, frag, 0, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(tx->dev, addr))) {
tx->dma_mapping_error++;
goto unmap_drop;
}
tx->info[idx].skb = NULL;
dma_unmap_len_set(&tx->info[idx], len, len);
dma_unmap_addr_set(&tx->info[idx], dma, addr);
gve_tx_fill_seg_desc(seg_desc, skb_network_offset(skb),
skb_shinfo(skb)->gso_size,
skb_is_gso_v6(skb), is_gso, len, addr);
}
return num_descriptors;
unmap_drop:
i += num_descriptors - shinfo->nr_frags;
while (i--) {
/* Skip metadata descriptor, if set */
if (i == 1 && mtd_desc_nr == 1)
continue;
idx--;
gve_tx_unmap_buf(tx->dev, &tx->info[idx & tx->mask]);
}
drop:
tx->dropped_pkt++;
return 0;
}