static int vfio_iommu_type1_attach_group()

in vfio_iommu_type1.c [2156:2342]


static int vfio_iommu_type1_attach_group(void *iommu_data,
		struct iommu_group *iommu_group, enum vfio_group_type type)
{
	struct vfio_iommu *iommu = iommu_data;
	struct vfio_iommu_group *group;
	struct vfio_domain *domain, *d;
	struct bus_type *bus = NULL;
	bool resv_msi, msi_remap;
	phys_addr_t resv_msi_base = 0;
	struct iommu_domain_geometry *geo;
	LIST_HEAD(iova_copy);
	LIST_HEAD(group_resv_regions);
	int ret = -EINVAL;

	mutex_lock(&iommu->lock);

	/* Check for duplicates */
	if (vfio_iommu_find_iommu_group(iommu, iommu_group))
		goto out_unlock;

	ret = -ENOMEM;
	group = kzalloc(sizeof(*group), GFP_KERNEL);
	if (!group)
		goto out_unlock;
	group->iommu_group = iommu_group;

	if (type == VFIO_EMULATED_IOMMU) {
		list_add(&group->next, &iommu->emulated_iommu_groups);
		/*
		 * An emulated IOMMU group cannot dirty memory directly, it can
		 * only use interfaces that provide dirty tracking.
		 * The iommu scope can only be promoted with the addition of a
		 * dirty tracking group.
		 */
		group->pinned_page_dirty_scope = true;
		ret = 0;
		goto out_unlock;
	}

	/* Determine bus_type in order to allocate a domain */
	ret = iommu_group_for_each_dev(iommu_group, &bus, vfio_bus_type);
	if (ret)
		goto out_free_group;

	ret = -ENOMEM;
	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
	if (!domain)
		goto out_free_group;

	ret = -EIO;
	domain->domain = iommu_domain_alloc(bus);
	if (!domain->domain)
		goto out_free_domain;

	if (iommu->nesting) {
		ret = iommu_enable_nesting(domain->domain);
		if (ret)
			goto out_domain;
	}

	ret = iommu_attach_group(domain->domain, group->iommu_group);
	if (ret)
		goto out_domain;

	/* Get aperture info */
	geo = &domain->domain->geometry;
	if (vfio_iommu_aper_conflict(iommu, geo->aperture_start,
				     geo->aperture_end)) {
		ret = -EINVAL;
		goto out_detach;
	}

	ret = iommu_get_group_resv_regions(iommu_group, &group_resv_regions);
	if (ret)
		goto out_detach;

	if (vfio_iommu_resv_conflict(iommu, &group_resv_regions)) {
		ret = -EINVAL;
		goto out_detach;
	}

	/*
	 * We don't want to work on the original iova list as the list
	 * gets modified and in case of failure we have to retain the
	 * original list. Get a copy here.
	 */
	ret = vfio_iommu_iova_get_copy(iommu, &iova_copy);
	if (ret)
		goto out_detach;

	ret = vfio_iommu_aper_resize(&iova_copy, geo->aperture_start,
				     geo->aperture_end);
	if (ret)
		goto out_detach;

	ret = vfio_iommu_resv_exclude(&iova_copy, &group_resv_regions);
	if (ret)
		goto out_detach;

	resv_msi = vfio_iommu_has_sw_msi(&group_resv_regions, &resv_msi_base);

	INIT_LIST_HEAD(&domain->group_list);
	list_add(&group->next, &domain->group_list);

	msi_remap = irq_domain_check_msi_remap() ||
		    iommu_capable(bus, IOMMU_CAP_INTR_REMAP);

	if (!allow_unsafe_interrupts && !msi_remap) {
		pr_warn("%s: No interrupt remapping support.  Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n",
		       __func__);
		ret = -EPERM;
		goto out_detach;
	}

	if (iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY))
		domain->prot |= IOMMU_CACHE;

	/*
	 * Try to match an existing compatible domain.  We don't want to
	 * preclude an IOMMU driver supporting multiple bus_types and being
	 * able to include different bus_types in the same IOMMU domain, so
	 * we test whether the domains use the same iommu_ops rather than
	 * testing if they're on the same bus_type.
	 */
	list_for_each_entry(d, &iommu->domain_list, next) {
		if (d->domain->ops == domain->domain->ops &&
		    d->prot == domain->prot) {
			iommu_detach_group(domain->domain, group->iommu_group);
			if (!iommu_attach_group(d->domain,
						group->iommu_group)) {
				list_add(&group->next, &d->group_list);
				iommu_domain_free(domain->domain);
				kfree(domain);
				goto done;
			}

			ret = iommu_attach_group(domain->domain,
						 group->iommu_group);
			if (ret)
				goto out_domain;
		}
	}

	vfio_test_domain_fgsp(domain);

	/* replay mappings on new domains */
	ret = vfio_iommu_replay(iommu, domain);
	if (ret)
		goto out_detach;

	if (resv_msi) {
		ret = iommu_get_msi_cookie(domain->domain, resv_msi_base);
		if (ret && ret != -ENODEV)
			goto out_detach;
	}

	list_add(&domain->next, &iommu->domain_list);
	vfio_update_pgsize_bitmap(iommu);
done:
	/* Delete the old one and insert new iova list */
	vfio_iommu_iova_insert_copy(iommu, &iova_copy);

	/*
	 * An iommu backed group can dirty memory directly and therefore
	 * demotes the iommu scope until it declares itself dirty tracking
	 * capable via the page pinning interface.
	 */
	iommu->num_non_pinned_groups++;
	mutex_unlock(&iommu->lock);
	vfio_iommu_resv_free(&group_resv_regions);

	return 0;

out_detach:
	iommu_detach_group(domain->domain, group->iommu_group);
out_domain:
	iommu_domain_free(domain->domain);
	vfio_iommu_iova_free(&iova_copy);
	vfio_iommu_resv_free(&group_resv_regions);
out_free_domain:
	kfree(domain);
out_free_group:
	kfree(group);
out_unlock:
	mutex_unlock(&iommu->lock);
	return ret;
}