in virtio_ring.c [1166:1316]
static inline int virtqueue_add_packed(struct virtqueue *_vq,
struct scatterlist *sgs[],
unsigned int total_sg,
unsigned int out_sgs,
unsigned int in_sgs,
void *data,
void *ctx,
gfp_t gfp)
{
struct vring_virtqueue *vq = to_vvq(_vq);
struct vring_packed_desc *desc;
struct scatterlist *sg;
unsigned int i, n, c, descs_used, err_idx;
__le16 head_flags, flags;
u16 head, id, prev, curr, avail_used_flags;
int err;
START_USE(vq);
BUG_ON(data == NULL);
BUG_ON(ctx && vq->indirect);
if (unlikely(vq->broken)) {
END_USE(vq);
return -EIO;
}
LAST_ADD_TIME_UPDATE(vq);
BUG_ON(total_sg == 0);
if (virtqueue_use_indirect(_vq, total_sg)) {
err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs,
in_sgs, data, gfp);
if (err != -ENOMEM) {
END_USE(vq);
return err;
}
/* fall back on direct */
}
head = vq->packed.next_avail_idx;
avail_used_flags = vq->packed.avail_used_flags;
WARN_ON_ONCE(total_sg > vq->packed.vring.num && !vq->indirect);
desc = vq->packed.vring.desc;
i = head;
descs_used = total_sg;
if (unlikely(vq->vq.num_free < descs_used)) {
pr_debug("Can't add buf len %i - avail = %i\n",
descs_used, vq->vq.num_free);
END_USE(vq);
return -ENOSPC;
}
id = vq->free_head;
BUG_ON(id == vq->packed.vring.num);
curr = id;
c = 0;
for (n = 0; n < out_sgs + in_sgs; n++) {
for (sg = sgs[n]; sg; sg = sg_next(sg)) {
dma_addr_t addr = vring_map_one_sg(vq, sg, n < out_sgs ?
DMA_TO_DEVICE : DMA_FROM_DEVICE);
if (vring_mapping_error(vq, addr))
goto unmap_release;
flags = cpu_to_le16(vq->packed.avail_used_flags |
(++c == total_sg ? 0 : VRING_DESC_F_NEXT) |
(n < out_sgs ? 0 : VRING_DESC_F_WRITE));
if (i == head)
head_flags = flags;
else
desc[i].flags = flags;
desc[i].addr = cpu_to_le64(addr);
desc[i].len = cpu_to_le32(sg->length);
desc[i].id = cpu_to_le16(id);
if (unlikely(vq->use_dma_api)) {
vq->packed.desc_extra[curr].addr = addr;
vq->packed.desc_extra[curr].len = sg->length;
vq->packed.desc_extra[curr].flags =
le16_to_cpu(flags);
}
prev = curr;
curr = vq->packed.desc_extra[curr].next;
if ((unlikely(++i >= vq->packed.vring.num))) {
i = 0;
vq->packed.avail_used_flags ^=
1 << VRING_PACKED_DESC_F_AVAIL |
1 << VRING_PACKED_DESC_F_USED;
}
}
}
if (i < head)
vq->packed.avail_wrap_counter ^= 1;
/* We're using some buffers from the free list. */
vq->vq.num_free -= descs_used;
/* Update free pointer */
vq->packed.next_avail_idx = i;
vq->free_head = curr;
/* Store token. */
vq->packed.desc_state[id].num = descs_used;
vq->packed.desc_state[id].data = data;
vq->packed.desc_state[id].indir_desc = ctx;
vq->packed.desc_state[id].last = prev;
/*
* A driver MUST NOT make the first descriptor in the list
* available before all subsequent descriptors comprising
* the list are made available.
*/
virtio_wmb(vq->weak_barriers);
vq->packed.vring.desc[head].flags = head_flags;
vq->num_added += descs_used;
pr_debug("Added buffer head %i to %p\n", head, vq);
END_USE(vq);
return 0;
unmap_release:
err_idx = i;
i = head;
curr = vq->free_head;
vq->packed.avail_used_flags = avail_used_flags;
for (n = 0; n < total_sg; n++) {
if (i == err_idx)
break;
vring_unmap_state_packed(vq,
&vq->packed.desc_extra[curr]);
curr = vq->packed.desc_extra[curr].next;
i++;
if (i >= vq->packed.vring.num)
i = 0;
}
END_USE(vq);
return -EIO;
}