in vfio_iommu_type1.c [646:755]
static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
long npage, unsigned long *pfn_base,
unsigned long limit, struct vfio_batch *batch)
{
unsigned long pfn;
struct mm_struct *mm = current->mm;
long ret, pinned = 0, lock_acct = 0;
bool rsvd;
dma_addr_t iova = vaddr - dma->vaddr + dma->iova;
/* This code path is only user initiated */
if (!mm)
return -ENODEV;
if (batch->size) {
/* Leftover pages in batch from an earlier call. */
*pfn_base = page_to_pfn(batch->pages[batch->offset]);
pfn = *pfn_base;
rsvd = is_invalid_reserved_pfn(*pfn_base);
} else {
*pfn_base = 0;
}
while (npage) {
if (!batch->size) {
/* Empty batch, so refill it. */
long req_pages = min_t(long, npage, batch->capacity);
ret = vaddr_get_pfns(mm, vaddr, req_pages, dma->prot,
&pfn, batch->pages);
if (ret < 0)
goto unpin_out;
batch->size = ret;
batch->offset = 0;
if (!*pfn_base) {
*pfn_base = pfn;
rsvd = is_invalid_reserved_pfn(*pfn_base);
}
}
/*
* pfn is preset for the first iteration of this inner loop and
* updated at the end to handle a VM_PFNMAP pfn. In that case,
* batch->pages isn't valid (there's no struct page), so allow
* batch->pages to be touched only when there's more than one
* pfn to check, which guarantees the pfns are from a
* !VM_PFNMAP vma.
*/
while (true) {
if (pfn != *pfn_base + pinned ||
rsvd != is_invalid_reserved_pfn(pfn))
goto out;
/*
* Reserved pages aren't counted against the user,
* externally pinned pages are already counted against
* the user.
*/
if (!rsvd && !vfio_find_vpfn(dma, iova)) {
if (!dma->lock_cap &&
mm->locked_vm + lock_acct + 1 > limit) {
pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
__func__, limit << PAGE_SHIFT);
ret = -ENOMEM;
goto unpin_out;
}
lock_acct++;
}
pinned++;
npage--;
vaddr += PAGE_SIZE;
iova += PAGE_SIZE;
batch->offset++;
batch->size--;
if (!batch->size)
break;
pfn = page_to_pfn(batch->pages[batch->offset]);
}
if (unlikely(disable_hugepages))
break;
}
out:
ret = vfio_lock_acct(dma, lock_acct, false);
unpin_out:
if (batch->size == 1 && !batch->offset) {
/* May be a VM_PFNMAP pfn, which the batch can't remember. */
put_pfn(pfn, dma->prot);
batch->size = 0;
}
if (ret < 0) {
if (pinned && !rsvd) {
for (pfn = *pfn_base ; pinned ; pfn++, pinned--)
put_pfn(pfn, dma->prot);
}
vfio_batch_unpin(batch, dma);
return ret;
}
return pinned;
}