static int alloc_iommu()

in intel/dmar.c [1052:1161]


static int alloc_iommu(struct dmar_drhd_unit *drhd)
{
	struct intel_iommu *iommu;
	u32 ver, sts;
	int agaw = -1;
	int msagaw = -1;
	int err;

	if (!drhd->reg_base_addr) {
		warn_invalid_dmar(0, "");
		return -EINVAL;
	}

	iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
	if (!iommu)
		return -ENOMEM;

	if (dmar_alloc_seq_id(iommu) < 0) {
		pr_err("Failed to allocate seq_id\n");
		err = -ENOSPC;
		goto error;
	}

	err = map_iommu(iommu, drhd->reg_base_addr);
	if (err) {
		pr_err("Failed to map %s\n", iommu->name);
		goto error_free_seq_id;
	}

	err = -EINVAL;
	if (cap_sagaw(iommu->cap) == 0) {
		pr_info("%s: No supported address widths. Not attempting DMA translation.\n",
			iommu->name);
		drhd->ignored = 1;
	}

	if (!drhd->ignored) {
		agaw = iommu_calculate_agaw(iommu);
		if (agaw < 0) {
			pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
			       iommu->seq_id);
			drhd->ignored = 1;
		}
	}
	if (!drhd->ignored) {
		msagaw = iommu_calculate_max_sagaw(iommu);
		if (msagaw < 0) {
			pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
			       iommu->seq_id);
			drhd->ignored = 1;
			agaw = -1;
		}
	}
	iommu->agaw = agaw;
	iommu->msagaw = msagaw;
	iommu->segment = drhd->segment;

	iommu->node = NUMA_NO_NODE;

	ver = readl(iommu->reg + DMAR_VER_REG);
	pr_info("%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
		iommu->name,
		(unsigned long long)drhd->reg_base_addr,
		DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
		(unsigned long long)iommu->cap,
		(unsigned long long)iommu->ecap);

	/* Reflect status in gcmd */
	sts = readl(iommu->reg + DMAR_GSTS_REG);
	if (sts & DMA_GSTS_IRES)
		iommu->gcmd |= DMA_GCMD_IRE;
	if (sts & DMA_GSTS_TES)
		iommu->gcmd |= DMA_GCMD_TE;
	if (sts & DMA_GSTS_QIES)
		iommu->gcmd |= DMA_GCMD_QIE;

	raw_spin_lock_init(&iommu->register_lock);

	/*
	 * This is only for hotplug; at boot time intel_iommu_enabled won't
	 * be set yet. When intel_iommu_init() runs, it registers the units
	 * present at boot time, then sets intel_iommu_enabled.
	 */
	if (intel_iommu_enabled && !drhd->ignored) {
		err = iommu_device_sysfs_add(&iommu->iommu, NULL,
					     intel_iommu_groups,
					     "%s", iommu->name);
		if (err)
			goto err_unmap;

		err = iommu_device_register(&iommu->iommu, &intel_iommu_ops, NULL);
		if (err)
			goto err_sysfs;
	}

	drhd->iommu = iommu;
	iommu->drhd = drhd;

	return 0;

err_sysfs:
	iommu_device_sysfs_remove(&iommu->iommu);
err_unmap:
	unmap_iommu(iommu);
error_free_seq_id:
	dmar_free_seq_id(iommu);
error:
	kfree(iommu);
	return err;
}