static int __init init_iommu_one()

in amd/init.c [1529:1646]


static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
{
	int ret;

	raw_spin_lock_init(&iommu->lock);
	iommu->cmd_sem_val = 0;

	/* Add IOMMU to internal data structures */
	list_add_tail(&iommu->list, &amd_iommu_list);
	iommu->index = amd_iommus_present++;

	if (unlikely(iommu->index >= MAX_IOMMUS)) {
		WARN(1, "System has more IOMMUs than supported by this driver\n");
		return -ENOSYS;
	}

	/* Index is fine - add IOMMU to the array */
	amd_iommus[iommu->index] = iommu;

	/*
	 * Copy data from ACPI table entry to the iommu struct
	 */
	iommu->devid   = h->devid;
	iommu->cap_ptr = h->cap_ptr;
	iommu->pci_seg = h->pci_seg;
	iommu->mmio_phys = h->mmio_phys;

	switch (h->type) {
	case 0x10:
		/* Check if IVHD EFR contains proper max banks/counters */
		if ((h->efr_attr != 0) &&
		    ((h->efr_attr & (0xF << 13)) != 0) &&
		    ((h->efr_attr & (0x3F << 17)) != 0))
			iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
		else
			iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;

		/*
		 * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
		 * GAM also requires GA mode. Therefore, we need to
		 * check cmpxchg16b support before enabling it.
		 */
		if (!boot_cpu_has(X86_FEATURE_CX16) ||
		    ((h->efr_attr & (0x1 << IOMMU_FEAT_GASUP_SHIFT)) == 0))
			amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
		break;
	case 0x11:
	case 0x40:
		if (h->efr_reg & (1 << 9))
			iommu->mmio_phys_end = MMIO_REG_END_OFFSET;
		else
			iommu->mmio_phys_end = MMIO_CNTR_CONF_OFFSET;

		/*
		 * Note: GA (128-bit IRTE) mode requires cmpxchg16b supports.
		 * XT, GAM also requires GA mode. Therefore, we need to
		 * check cmpxchg16b support before enabling them.
		 */
		if (!boot_cpu_has(X86_FEATURE_CX16) ||
		    ((h->efr_reg & (0x1 << IOMMU_EFR_GASUP_SHIFT)) == 0)) {
			amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_LEGACY;
			break;
		}

		if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT))
			amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE;

		early_iommu_features_init(iommu, h);

		break;
	default:
		return -EINVAL;
	}

	iommu->mmio_base = iommu_map_mmio_space(iommu->mmio_phys,
						iommu->mmio_phys_end);
	if (!iommu->mmio_base)
		return -ENOMEM;

	if (alloc_cwwb_sem(iommu))
		return -ENOMEM;

	if (alloc_command_buffer(iommu))
		return -ENOMEM;

	if (alloc_event_buffer(iommu))
		return -ENOMEM;

	iommu->int_enabled = false;

	init_translation_status(iommu);
	if (translation_pre_enabled(iommu) && !is_kdump_kernel()) {
		iommu_disable(iommu);
		clear_translation_pre_enabled(iommu);
		pr_warn("Translation was enabled for IOMMU:%d but we are not in kdump mode\n",
			iommu->index);
	}
	if (amd_iommu_pre_enabled)
		amd_iommu_pre_enabled = translation_pre_enabled(iommu);

	ret = init_iommu_from_acpi(iommu, h);
	if (ret)
		return ret;

	if (amd_iommu_irq_remap) {
		ret = amd_iommu_create_irq_domain(iommu);
		if (ret)
			return ret;
	}

	/*
	 * Make sure IOMMU is not considered to translate itself. The IVRS
	 * table tells us so, but this is a lie!
	 */
	amd_iommu_rlookup_table[iommu->devid] = NULL;

	return 0;
}