in kvm/guest.c [1011:1095]
long kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
struct kvm_arm_copy_mte_tags *copy_tags)
{
gpa_t guest_ipa = copy_tags->guest_ipa;
size_t length = copy_tags->length;
void __user *tags = copy_tags->addr;
gpa_t gfn;
bool write = !(copy_tags->flags & KVM_ARM_TAGS_FROM_GUEST);
int ret = 0;
if (!kvm_has_mte(kvm))
return -EINVAL;
if (copy_tags->reserved[0] || copy_tags->reserved[1])
return -EINVAL;
if (copy_tags->flags & ~KVM_ARM_TAGS_FROM_GUEST)
return -EINVAL;
if (length & ~PAGE_MASK || guest_ipa & ~PAGE_MASK)
return -EINVAL;
gfn = gpa_to_gfn(guest_ipa);
mutex_lock(&kvm->slots_lock);
while (length > 0) {
kvm_pfn_t pfn = gfn_to_pfn_prot(kvm, gfn, write, NULL);
void *maddr;
unsigned long num_tags;
struct page *page;
if (is_error_noslot_pfn(pfn)) {
ret = -EFAULT;
goto out;
}
page = pfn_to_online_page(pfn);
if (!page) {
/* Reject ZONE_DEVICE memory */
ret = -EFAULT;
goto out;
}
maddr = page_address(page);
if (!write) {
if (test_bit(PG_mte_tagged, &page->flags))
num_tags = mte_copy_tags_to_user(tags, maddr,
MTE_GRANULES_PER_PAGE);
else
/* No tags in memory, so write zeros */
num_tags = MTE_GRANULES_PER_PAGE -
clear_user(tags, MTE_GRANULES_PER_PAGE);
kvm_release_pfn_clean(pfn);
} else {
num_tags = mte_copy_tags_from_user(maddr, tags,
MTE_GRANULES_PER_PAGE);
/*
* Set the flag after checking the write
* completed fully
*/
if (num_tags == MTE_GRANULES_PER_PAGE)
set_bit(PG_mte_tagged, &page->flags);
kvm_release_pfn_dirty(pfn);
}
if (num_tags != MTE_GRANULES_PER_PAGE) {
ret = -EFAULT;
goto out;
}
gfn++;
tags += num_tags;
length -= PAGE_SIZE;
}
out:
mutex_unlock(&kvm->slots_lock);
/* If some data has been copied report the number of bytes copied */
if (length != copy_tags->length)
return copy_tags->length - length;
return ret;
}