static int vfio_iommu_replay()

in vfio_iommu_type1.c [1694:1840]


static int vfio_iommu_replay(struct vfio_iommu *iommu,
			     struct vfio_domain *domain)
{
	struct vfio_batch batch;
	struct vfio_domain *d = NULL;
	struct rb_node *n;
	unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
	int ret;

	ret = vfio_wait_all_valid(iommu);
	if (ret < 0)
		return ret;

	/* Arbitrarily pick the first domain in the list for lookups */
	if (!list_empty(&iommu->domain_list))
		d = list_first_entry(&iommu->domain_list,
				     struct vfio_domain, next);

	vfio_batch_init(&batch);

	n = rb_first(&iommu->dma_list);

	for (; n; n = rb_next(n)) {
		struct vfio_dma *dma;
		dma_addr_t iova;

		dma = rb_entry(n, struct vfio_dma, node);
		iova = dma->iova;

		while (iova < dma->iova + dma->size) {
			phys_addr_t phys;
			size_t size;

			if (dma->iommu_mapped) {
				phys_addr_t p;
				dma_addr_t i;

				if (WARN_ON(!d)) { /* mapped w/o a domain?! */
					ret = -EINVAL;
					goto unwind;
				}

				phys = iommu_iova_to_phys(d->domain, iova);

				if (WARN_ON(!phys)) {
					iova += PAGE_SIZE;
					continue;
				}

				size = PAGE_SIZE;
				p = phys + size;
				i = iova + size;
				while (i < dma->iova + dma->size &&
				       p == iommu_iova_to_phys(d->domain, i)) {
					size += PAGE_SIZE;
					p += PAGE_SIZE;
					i += PAGE_SIZE;
				}
			} else {
				unsigned long pfn;
				unsigned long vaddr = dma->vaddr +
						     (iova - dma->iova);
				size_t n = dma->iova + dma->size - iova;
				long npage;

				npage = vfio_pin_pages_remote(dma, vaddr,
							      n >> PAGE_SHIFT,
							      &pfn, limit,
							      &batch);
				if (npage <= 0) {
					WARN_ON(!npage);
					ret = (int)npage;
					goto unwind;
				}

				phys = pfn << PAGE_SHIFT;
				size = npage << PAGE_SHIFT;
			}

			ret = iommu_map(domain->domain, iova, phys,
					size, dma->prot | domain->prot);
			if (ret) {
				if (!dma->iommu_mapped) {
					vfio_unpin_pages_remote(dma, iova,
							phys >> PAGE_SHIFT,
							size >> PAGE_SHIFT,
							true);
					vfio_batch_unpin(&batch, dma);
				}
				goto unwind;
			}

			iova += size;
		}
	}

	/* All dmas are now mapped, defer to second tree walk for unwind */
	for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
		struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);

		dma->iommu_mapped = true;
	}

	vfio_batch_fini(&batch);
	return 0;

unwind:
	for (; n; n = rb_prev(n)) {
		struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
		dma_addr_t iova;

		if (dma->iommu_mapped) {
			iommu_unmap(domain->domain, dma->iova, dma->size);
			continue;
		}

		iova = dma->iova;
		while (iova < dma->iova + dma->size) {
			phys_addr_t phys, p;
			size_t size;
			dma_addr_t i;

			phys = iommu_iova_to_phys(domain->domain, iova);
			if (!phys) {
				iova += PAGE_SIZE;
				continue;
			}

			size = PAGE_SIZE;
			p = phys + size;
			i = iova + size;
			while (i < dma->iova + dma->size &&
			       p == iommu_iova_to_phys(domain->domain, i)) {
				size += PAGE_SIZE;
				p += PAGE_SIZE;
				i += PAGE_SIZE;
			}

			iommu_unmap(domain->domain, iova, size);
			vfio_unpin_pages_remote(dma, iova, phys >> PAGE_SHIFT,
						size >> PAGE_SHIFT, true);
		}
	}

	vfio_batch_fini(&batch);
	return ret;
}