static int __init early_amd_iommu_init()

in amd/init.c [2686:2825]


static int __init early_amd_iommu_init(void)
{
	struct acpi_table_header *ivrs_base;
	int i, remap_cache_sz, ret;
	acpi_status status;

	if (!amd_iommu_detected)
		return -ENODEV;

	status = acpi_get_table("IVRS", 0, &ivrs_base);
	if (status == AE_NOT_FOUND)
		return -ENODEV;
	else if (ACPI_FAILURE(status)) {
		const char *err = acpi_format_exception(status);
		pr_err("IVRS table error: %s\n", err);
		return -EINVAL;
	}

	/*
	 * Validate checksum here so we don't need to do it when
	 * we actually parse the table
	 */
	ret = check_ivrs_checksum(ivrs_base);
	if (ret)
		goto out;

	ivinfo_init(ivrs_base);

	amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base);
	DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type);

	/*
	 * First parse ACPI tables to find the largest Bus/Dev/Func
	 * we need to handle. Upon this information the shared data
	 * structures for the IOMMUs in the system will be allocated
	 */
	ret = find_last_devid_acpi(ivrs_base);
	if (ret)
		goto out;

	dev_table_size     = tbl_size(DEV_TABLE_ENTRY_SIZE);
	alias_table_size   = tbl_size(ALIAS_TABLE_ENTRY_SIZE);
	rlookup_table_size = tbl_size(RLOOKUP_TABLE_ENTRY_SIZE);

	/* Device table - directly used by all IOMMUs */
	ret = -ENOMEM;
	amd_iommu_dev_table = (void *)__get_free_pages(
				      GFP_KERNEL | __GFP_ZERO | GFP_DMA32,
				      get_order(dev_table_size));
	if (amd_iommu_dev_table == NULL)
		goto out;

	/*
	 * Alias table - map PCI Bus/Dev/Func to Bus/Dev/Func the
	 * IOMMU see for that device
	 */
	amd_iommu_alias_table = (void *)__get_free_pages(GFP_KERNEL,
			get_order(alias_table_size));
	if (amd_iommu_alias_table == NULL)
		goto out;

	/* IOMMU rlookup table - find the IOMMU for a specific device */
	amd_iommu_rlookup_table = (void *)__get_free_pages(
			GFP_KERNEL | __GFP_ZERO,
			get_order(rlookup_table_size));
	if (amd_iommu_rlookup_table == NULL)
		goto out;

	amd_iommu_pd_alloc_bitmap = (void *)__get_free_pages(
					    GFP_KERNEL | __GFP_ZERO,
					    get_order(MAX_DOMAIN_ID/8));
	if (amd_iommu_pd_alloc_bitmap == NULL)
		goto out;

	/*
	 * let all alias entries point to itself
	 */
	for (i = 0; i <= amd_iommu_last_bdf; ++i)
		amd_iommu_alias_table[i] = i;

	/*
	 * never allocate domain 0 because its used as the non-allocated and
	 * error value placeholder
	 */
	__set_bit(0, amd_iommu_pd_alloc_bitmap);

	/*
	 * now the data structures are allocated and basically initialized
	 * start the real acpi table scan
	 */
	ret = init_iommu_all(ivrs_base);
	if (ret)
		goto out;

	/* Disable any previously enabled IOMMUs */
	if (!is_kdump_kernel() || amd_iommu_disabled)
		disable_iommus();

	if (amd_iommu_irq_remap)
		amd_iommu_irq_remap = check_ioapic_information();

	if (amd_iommu_irq_remap) {
		/*
		 * Interrupt remapping enabled, create kmem_cache for the
		 * remapping tables.
		 */
		ret = -ENOMEM;
		if (!AMD_IOMMU_GUEST_IR_GA(amd_iommu_guest_ir))
			remap_cache_sz = MAX_IRQS_PER_TABLE * sizeof(u32);
		else
			remap_cache_sz = MAX_IRQS_PER_TABLE * (sizeof(u64) * 2);
		amd_iommu_irq_cache = kmem_cache_create("irq_remap_cache",
							remap_cache_sz,
							DTE_INTTAB_ALIGNMENT,
							0, NULL);
		if (!amd_iommu_irq_cache)
			goto out;

		irq_lookup_table = (void *)__get_free_pages(
				GFP_KERNEL | __GFP_ZERO,
				get_order(rlookup_table_size));
		kmemleak_alloc(irq_lookup_table, rlookup_table_size,
			       1, GFP_KERNEL);
		if (!irq_lookup_table)
			goto out;
	}

	ret = init_memory_definitions(ivrs_base);
	if (ret)
		goto out;

	/* init the device table */
	init_device_table();

out:
	/* Don't leak any ACPI memory */
	acpi_put_table(ivrs_base);

	return ret;
}