static int __nfit_mem_init()

in nfit/core.c [1076:1205]


static int __nfit_mem_init(struct acpi_nfit_desc *acpi_desc,
		struct acpi_nfit_system_address *spa)
{
	struct nfit_mem *nfit_mem, *found;
	struct nfit_memdev *nfit_memdev;
	int type = spa ? nfit_spa_type(spa) : 0;

	switch (type) {
	case NFIT_SPA_DCR:
	case NFIT_SPA_PM:
		break;
	default:
		if (spa)
			return 0;
	}

	/*
	 * This loop runs in two modes, when a dimm is mapped the loop
	 * adds memdev associations to an existing dimm, or creates a
	 * dimm. In the unmapped dimm case this loop sweeps for memdev
	 * instances with an invalid / zero range_index and adds those
	 * dimms without spa associations.
	 */
	list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
		struct nfit_flush *nfit_flush;
		struct nfit_dcr *nfit_dcr;
		u32 device_handle;
		u16 dcr;

		if (spa && nfit_memdev->memdev->range_index != spa->range_index)
			continue;
		if (!spa && nfit_memdev->memdev->range_index)
			continue;
		found = NULL;
		dcr = nfit_memdev->memdev->region_index;
		device_handle = nfit_memdev->memdev->device_handle;
		list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
			if (__to_nfit_memdev(nfit_mem)->device_handle
					== device_handle) {
				found = nfit_mem;
				break;
			}

		if (found)
			nfit_mem = found;
		else {
			nfit_mem = devm_kzalloc(acpi_desc->dev,
					sizeof(*nfit_mem), GFP_KERNEL);
			if (!nfit_mem)
				return -ENOMEM;
			INIT_LIST_HEAD(&nfit_mem->list);
			nfit_mem->acpi_desc = acpi_desc;
			list_add(&nfit_mem->list, &acpi_desc->dimms);
		}

		list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
			if (nfit_dcr->dcr->region_index != dcr)
				continue;
			/*
			 * Record the control region for the dimm.  For
			 * the ACPI 6.1 case, where there are separate
			 * control regions for the pmem vs blk
			 * interfaces, be sure to record the extended
			 * blk details.
			 */
			if (!nfit_mem->dcr)
				nfit_mem->dcr = nfit_dcr->dcr;
			else if (nfit_mem->dcr->windows == 0
					&& nfit_dcr->dcr->windows)
				nfit_mem->dcr = nfit_dcr->dcr;
			break;
		}

		list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
			struct acpi_nfit_flush_address *flush;
			u16 i;

			if (nfit_flush->flush->device_handle != device_handle)
				continue;
			nfit_mem->nfit_flush = nfit_flush;
			flush = nfit_flush->flush;
			nfit_mem->flush_wpq = devm_kcalloc(acpi_desc->dev,
					flush->hint_count,
					sizeof(struct resource),
					GFP_KERNEL);
			if (!nfit_mem->flush_wpq)
				return -ENOMEM;
			for (i = 0; i < flush->hint_count; i++) {
				struct resource *res = &nfit_mem->flush_wpq[i];

				res->start = flush->hint_address[i];
				res->end = res->start + 8 - 1;
			}
			break;
		}

		if (dcr && !nfit_mem->dcr) {
			dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
					spa->range_index, dcr);
			return -ENODEV;
		}

		if (type == NFIT_SPA_DCR) {
			struct nfit_idt *nfit_idt;
			u16 idt_idx;

			/* multiple dimms may share a SPA when interleaved */
			nfit_mem->spa_dcr = spa;
			nfit_mem->memdev_dcr = nfit_memdev->memdev;
			idt_idx = nfit_memdev->memdev->interleave_index;
			list_for_each_entry(nfit_idt, &acpi_desc->idts, list) {
				if (nfit_idt->idt->interleave_index != idt_idx)
					continue;
				nfit_mem->idt_dcr = nfit_idt->idt;
				break;
			}
			nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
		} else if (type == NFIT_SPA_PM) {
			/*
			 * A single dimm may belong to multiple SPA-PM
			 * ranges, record at least one in addition to
			 * any SPA-DCR range.
			 */
			nfit_mem->memdev_pmem = nfit_memdev->memdev;
		} else
			nfit_mem->memdev_dcr = nfit_memdev->memdev;
	}

	return 0;
}