in src/devices/src/virtio/net/device.rs [303:367]
fn do_write_frame_to_guest(&mut self) -> std::result::Result<(), FrontendError> {
let mut result: std::result::Result<(), FrontendError> = Ok(());
// This is safe since we checked in the event handler that the device is activated.
let mem = self.device_state.mem().unwrap();
let queue = &mut self.queues[RX_INDEX];
let head_descriptor = queue.pop(mem).ok_or_else(|| {
METRICS.net.no_rx_avail_buffer.inc();
FrontendError::EmptyQueue
})?;
let head_index = head_descriptor.index;
let mut frame_slice = &self.rx_frame_buf[..self.rx_bytes_read];
let frame_len = frame_slice.len();
let mut maybe_next_descriptor = Some(head_descriptor);
while let Some(descriptor) = &maybe_next_descriptor {
if frame_slice.is_empty() {
break;
}
if !descriptor.is_write_only() {
result = Err(FrontendError::ReadOnlyDescriptor);
break;
}
let len = std::cmp::min(frame_slice.len(), descriptor.len as usize);
match mem.write_slice(&frame_slice[..len], descriptor.addr) {
Ok(()) => {
METRICS.net.rx_count.inc();
frame_slice = &frame_slice[len..];
}
Err(e) => {
error!("Failed to write slice: {:?}", e);
match e {
GuestMemoryError::PartialBuffer { .. } => &METRICS.net.rx_partial_writes,
_ => &METRICS.net.rx_fails,
}
.inc();
result = Err(FrontendError::GuestMemory(e));
break;
}
};
maybe_next_descriptor = descriptor.next_descriptor();
}
if result.is_ok() && !frame_slice.is_empty() {
warn!("Receiving buffer is too small to hold frame of current size");
METRICS.net.rx_fails.inc();
result = Err(FrontendError::DescriptorChainTooSmall);
}
// Mark the descriptor chain as used. If an error occurred, skip the descriptor chain.
let used_len = if result.is_err() { 0 } else { frame_len as u32 };
queue.add_used(mem, head_index, used_len).map_err(|e| {
error!("Failed to add available descriptor {}: {}", head_index, e);
FrontendError::AddUsed
})?;
self.rx_deferred_irqs = true;
if result.is_ok() {
METRICS.net.rx_bytes_count.add(frame_len);
METRICS.net.rx_packets_count.inc();
}
result
}