fn test_read_write()

in src/devices/src/virtio/block/device.rs [964:1257]


    fn test_read_write() {
        let mut block = default_block(default_engine_type_for_kv());
        let mem = default_mem();
        let vq = VirtQueue::new(GuestAddress(0), &mem, 16);
        set_queue(&mut block, 0, vq.create_queue());
        block.activate(mem.clone()).unwrap();
        initialize_virtqueue(&vq);

        let request_type_addr = GuestAddress(vq.dtable[0].addr.get());
        let data_addr = GuestAddress(vq.dtable[1].addr.get());
        let status_addr = GuestAddress(vq.dtable[2].addr.get());

        let empty_data = vec![0; 512];
        let rand_data = utils::rand::rand_alphanumerics(1024).as_bytes().to_vec();

        // Write with invalid data len (not a multiple of 512).
        {
            mem.write_obj::<u32>(VIRTIO_BLK_T_OUT, request_type_addr)
                .unwrap();
            // Make data read only, 512 bytes in len, and set the actual value to be written.
            vq.dtable[1].flags.set(VIRTQ_DESC_F_NEXT);
            vq.dtable[1].len.set(511);
            mem.write_slice(&rand_data[..511], data_addr).unwrap();

            simulate_queue_and_async_completion_events(&mut block, true);

            assert_eq!(vq.used.idx.get(), 1);
            assert_eq!(vq.used.ring[0].get().id, 0);
            assert_eq!(vq.used.ring[0].get().len, 0);

            // Check that the data wasn't written to the file
            let mut buf = [0u8; 512];
            block.disk.file().seek(SeekFrom::Start(0)).unwrap();
            block.disk.file().read_exact(&mut buf).unwrap();
            assert_eq!(buf, empty_data.as_slice());
        }

        // Write from valid address, with an overflowing length.
        {
            let mut block = default_block(default_engine_type_for_kv());

            // Default mem size is 0x10000
            let mem = default_mem();
            let vq = VirtQueue::new(GuestAddress(0), &mem, 16);
            set_queue(&mut block, 0, vq.create_queue());
            block.activate(mem.clone()).unwrap();
            initialize_virtqueue(&vq);
            let request_type_addr = GuestAddress(vq.dtable[0].addr.get());

            vq.dtable[1].set(0xff00, 0x1000, VIRTQ_DESC_F_NEXT, 2);
            mem.write_obj::<u32>(VIRTIO_BLK_T_OUT, request_type_addr)
                .unwrap();

            // Mark the next available descriptor.
            vq.avail.idx.set(1);
            vq.used.idx.set(0);

            check_metric_after_block!(
                &METRICS.block.invalid_reqs_count,
                1,
                simulate_queue_and_async_completion_events(&mut block, true)
            );

            let used_idx = vq.used.idx.get();
            assert_eq!(used_idx, 1);

            let status_addr = GuestAddress(vq.dtable[2].addr.get());
            assert_eq!(
                mem.read_obj::<u8>(status_addr).unwrap(),
                VIRTIO_BLK_S_IOERR as u8
            );
        }

        // Write.
        {
            vq.used.idx.set(0);
            set_queue(&mut block, 0, vq.create_queue());

            mem.write_obj::<u32>(VIRTIO_BLK_T_OUT, request_type_addr)
                .unwrap();
            // Make data read only, 512 bytes in len, and set the actual value to be written.
            vq.dtable[1].flags.set(VIRTQ_DESC_F_NEXT);
            vq.dtable[1].len.set(512);
            mem.write_slice(&rand_data[..512], data_addr).unwrap();

            check_metric_after_block!(
                &METRICS.block.write_count,
                1,
                simulate_queue_and_async_completion_events(&mut block, true)
            );

            assert_eq!(vq.used.idx.get(), 1);
            assert_eq!(vq.used.ring[0].get().id, 0);
            assert_eq!(vq.used.ring[0].get().len, 1);
            assert_eq!(mem.read_obj::<u32>(status_addr).unwrap(), VIRTIO_BLK_S_OK);
        }

        // Read with invalid data len (not a multiple of 512).
        {
            vq.used.idx.set(0);
            set_queue(&mut block, 0, vq.create_queue());

            mem.write_obj::<u32>(VIRTIO_BLK_T_IN, request_type_addr)
                .unwrap();
            vq.dtable[1]
                .flags
                .set(VIRTQ_DESC_F_NEXT | VIRTQ_DESC_F_WRITE);
            vq.dtable[1].len.set(511);
            mem.write_slice(empty_data.as_slice(), data_addr).unwrap();

            simulate_queue_and_async_completion_events(&mut block, true);

            assert_eq!(vq.used.idx.get(), 1);
            assert_eq!(vq.used.ring[0].get().id, 0);
            // The descriptor should have been discarded.
            assert_eq!(vq.used.ring[0].get().len, 0);

            // Check that no data was read.
            let mut buf = [0u8; 512];
            mem.read_slice(&mut buf, data_addr).unwrap();
            assert_eq!(buf, empty_data.as_slice());
        }

        // Read.
        {
            vq.used.idx.set(0);
            set_queue(&mut block, 0, vq.create_queue());

            mem.write_obj::<u32>(VIRTIO_BLK_T_IN, request_type_addr)
                .unwrap();
            vq.dtable[1]
                .flags
                .set(VIRTQ_DESC_F_NEXT | VIRTQ_DESC_F_WRITE);
            vq.dtable[1].len.set(512);
            mem.write_slice(empty_data.as_slice(), data_addr).unwrap();

            check_metric_after_block!(
                &METRICS.block.read_count,
                1,
                simulate_queue_and_async_completion_events(&mut block, true)
            );

            assert_eq!(vq.used.idx.get(), 1);
            assert_eq!(vq.used.ring[0].get().id, 0);
            // Added status byte length.
            assert_eq!(vq.used.ring[0].get().len, vq.dtable[1].len.get() + 1);
            assert_eq!(mem.read_obj::<u32>(status_addr).unwrap(), VIRTIO_BLK_S_OK);

            // Check that the data is the same that we wrote before
            let mut buf = [0u8; 512];
            mem.read_slice(&mut buf, data_addr).unwrap();
            assert_eq!(buf, &rand_data[..512]);
        }

        // Read with error.
        {
            vq.used.idx.set(0);
            set_queue(&mut block, 0, vq.create_queue());

            mem.write_obj::<u32>(VIRTIO_BLK_T_IN, request_type_addr)
                .unwrap();
            vq.dtable[1]
                .flags
                .set(VIRTQ_DESC_F_NEXT | VIRTQ_DESC_F_WRITE);
            mem.write_slice(empty_data.as_slice(), data_addr).unwrap();

            let size = block.disk.file().seek(SeekFrom::End(0)).unwrap();
            block.disk.file().set_len(size / 2).unwrap();
            mem.write_obj(10, GuestAddress(request_type_addr.0 + 8))
                .unwrap();

            simulate_queue_and_async_completion_events(&mut block, true);

            assert_eq!(vq.used.idx.get(), 1);
            assert_eq!(vq.used.ring[0].get().id, 0);
            // The descriptor should have been discarded.
            assert_eq!(vq.used.ring[0].get().len, 0);

            // Check that no data was read.
            let mut buf = [0u8; 512];
            mem.read_slice(&mut buf, data_addr).unwrap();
            assert_eq!(buf, empty_data.as_slice());
        }

        // Partial buffer error on read.
        {
            vq.used.idx.set(0);
            set_queue(&mut block, 0, vq.create_queue());

            mem.write_obj::<u32>(VIRTIO_BLK_T_IN, request_type_addr)
                .unwrap();
            vq.dtable[1]
                .flags
                .set(VIRTQ_DESC_F_NEXT | VIRTQ_DESC_F_WRITE);

            let size = block.disk.file().seek(SeekFrom::End(0)).unwrap();
            block.disk.file().set_len(size / 2).unwrap();
            // Update sector number: stored at `request_type_addr.0 + 8`
            mem.write_obj(5, GuestAddress(request_type_addr.0 + 8))
                .unwrap();

            // This will attempt to read past end of file.
            simulate_queue_and_async_completion_events(&mut block, true);

            assert_eq!(vq.used.idx.get(), 1);
            assert_eq!(vq.used.ring[0].get().id, 0);

            // No data since can't read past end of file, only status byte length.
            assert_eq!(vq.used.ring[0].get().len, 1);
            assert_eq!(
                mem.read_obj::<u32>(status_addr).unwrap(),
                VIRTIO_BLK_S_IOERR
            );

            // Check that no data was read since we can't read past the end of the file.
            let mut buf = [0u8; 512];
            mem.read_slice(&mut buf, data_addr).unwrap();
            assert_eq!(buf, empty_data.as_slice());
        }

        {
            vq.used.idx.set(0);
            set_queue(&mut block, 0, vq.create_queue());

            mem.write_obj::<u32>(VIRTIO_BLK_T_IN, request_type_addr)
                .unwrap();
            vq.dtable[1]
                .flags
                .set(VIRTQ_DESC_F_NEXT | VIRTQ_DESC_F_WRITE);
            vq.dtable[1].len.set(1024);

            mem.write_obj(1, GuestAddress(request_type_addr.0 + 8))
                .unwrap();

            block.disk.file().seek(SeekFrom::Start(512)).unwrap();
            block.disk.file().write_all(&rand_data[512..]).unwrap();

            simulate_queue_and_async_completion_events(&mut block, true);

            assert_eq!(vq.used.idx.get(), 1);
            assert_eq!(vq.used.ring[0].get().id, 0);

            // File has 2 sectors and we try to read from the second sector, which means we will
            // read 512 bytes (instead of 1024).
            assert_eq!(vq.used.ring[0].get().len, 513);
            assert_eq!(
                mem.read_obj::<u32>(status_addr).unwrap(),
                VIRTIO_BLK_S_IOERR
            );

            // Check that we correctly read the second file sector.
            let mut buf = [0u8; 512];
            mem.read_slice(&mut buf, data_addr).unwrap();
            assert_eq!(buf, rand_data[512..]);
        }

        // Read at valid address, with an overflowing length.
        {
            // Default mem size is 0x10000
            let mem = default_mem();
            let vq = VirtQueue::new(GuestAddress(0), &mem, 16);
            set_queue(&mut block, 0, vq.create_queue());
            block.activate(mem.clone()).unwrap();
            initialize_virtqueue(&vq);
            vq.dtable[1].set(0xff00, 0x1000, VIRTQ_DESC_F_NEXT | VIRTQ_DESC_F_WRITE, 2);

            let request_type_addr = GuestAddress(vq.dtable[0].addr.get());

            // Mark the next available descriptor.
            vq.avail.idx.set(1);
            vq.used.idx.set(0);

            mem.write_obj::<u32>(VIRTIO_BLK_T_IN, request_type_addr)
                .unwrap();
            vq.dtable[1]
                .flags
                .set(VIRTQ_DESC_F_NEXT | VIRTQ_DESC_F_WRITE);

            check_metric_after_block!(
                &METRICS.block.invalid_reqs_count,
                1,
                simulate_queue_and_async_completion_events(&mut block, true)
            );

            let used_idx = vq.used.idx.get();
            assert_eq!(used_idx, 1);

            let status_addr = GuestAddress(vq.dtable[2].addr.get());
            assert_eq!(
                mem.read_obj::<u8>(status_addr).unwrap(),
                VIRTIO_BLK_S_IOERR as u8
            );
        }
    }