static void __init setup_bootmem()

in mm/init.c [109:336]


static void __init setup_bootmem(void)
{
	unsigned long mem_max;
#ifndef CONFIG_SPARSEMEM
	physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
	int npmem_holes;
#endif
	int i, sysram_resource_count;

	disable_sr_hashing(); /* Turn off space register hashing */

	/*
	 * Sort the ranges. Since the number of ranges is typically
	 * small, and performance is not an issue here, just do
	 * a simple insertion sort.
	 */

	for (i = 1; i < npmem_ranges; i++) {
		int j;

		for (j = i; j > 0; j--) {
			if (pmem_ranges[j-1].start_pfn <
			    pmem_ranges[j].start_pfn) {

				break;
			}
			swap(pmem_ranges[j-1], pmem_ranges[j]);
		}
	}

#ifndef CONFIG_SPARSEMEM
	/*
	 * Throw out ranges that are too far apart (controlled by
	 * MAX_GAP).
	 */

	for (i = 1; i < npmem_ranges; i++) {
		if (pmem_ranges[i].start_pfn -
			(pmem_ranges[i-1].start_pfn +
			 pmem_ranges[i-1].pages) > MAX_GAP) {
			npmem_ranges = i;
			printk("Large gap in memory detected (%ld pages). "
			       "Consider turning on CONFIG_SPARSEMEM\n",
			       pmem_ranges[i].start_pfn -
			       (pmem_ranges[i-1].start_pfn +
			        pmem_ranges[i-1].pages));
			break;
		}
	}
#endif

	/* Print the memory ranges */
	pr_info("Memory Ranges:\n");

	for (i = 0; i < npmem_ranges; i++) {
		struct resource *res = &sysram_resources[i];
		unsigned long start;
		unsigned long size;

		size = (pmem_ranges[i].pages << PAGE_SHIFT);
		start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
		pr_info("%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
			i, start, start + (size - 1), size >> 20);

		/* request memory resource */
		res->name = "System RAM";
		res->start = start;
		res->end = start + size - 1;
		res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
		request_resource(&iomem_resource, res);
	}

	sysram_resource_count = npmem_ranges;

	/*
	 * For 32 bit kernels we limit the amount of memory we can
	 * support, in order to preserve enough kernel address space
	 * for other purposes. For 64 bit kernels we don't normally
	 * limit the memory, but this mechanism can be used to
	 * artificially limit the amount of memory (and it is written
	 * to work with multiple memory ranges).
	 */

	mem_limit_func();       /* check for "mem=" argument */

	mem_max = 0;
	for (i = 0; i < npmem_ranges; i++) {
		unsigned long rsize;

		rsize = pmem_ranges[i].pages << PAGE_SHIFT;
		if ((mem_max + rsize) > mem_limit) {
			printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20);
			if (mem_max == mem_limit)
				npmem_ranges = i;
			else {
				pmem_ranges[i].pages =   (mem_limit >> PAGE_SHIFT)
						       - (mem_max >> PAGE_SHIFT);
				npmem_ranges = i + 1;
				mem_max = mem_limit;
			}
			break;
		}
		mem_max += rsize;
	}

	printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20);

#ifndef CONFIG_SPARSEMEM
	/* Merge the ranges, keeping track of the holes */
	{
		unsigned long end_pfn;
		unsigned long hole_pages;

		npmem_holes = 0;
		end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages;
		for (i = 1; i < npmem_ranges; i++) {

			hole_pages = pmem_ranges[i].start_pfn - end_pfn;
			if (hole_pages) {
				pmem_holes[npmem_holes].start_pfn = end_pfn;
				pmem_holes[npmem_holes++].pages = hole_pages;
				end_pfn += hole_pages;
			}
			end_pfn += pmem_ranges[i].pages;
		}

		pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn;
		npmem_ranges = 1;
	}
#endif

	/*
	 * Initialize and free the full range of memory in each range.
	 */

	max_pfn = 0;
	for (i = 0; i < npmem_ranges; i++) {
		unsigned long start_pfn;
		unsigned long npages;
		unsigned long start;
		unsigned long size;

		start_pfn = pmem_ranges[i].start_pfn;
		npages = pmem_ranges[i].pages;

		start = start_pfn << PAGE_SHIFT;
		size = npages << PAGE_SHIFT;

		/* add system RAM memblock */
		memblock_add(start, size);

		if ((start_pfn + npages) > max_pfn)
			max_pfn = start_pfn + npages;
	}

	/*
	 * We can't use memblock top-down allocations because we only
	 * created the initial mapping up to KERNEL_INITIAL_SIZE in
	 * the assembly bootup code.
	 */
	memblock_set_bottom_up(true);

	/* IOMMU is always used to access "high mem" on those boxes
	 * that can support enough mem that a PCI device couldn't
	 * directly DMA to any physical addresses.
	 * ISA DMA support will need to revisit this.
	 */
	max_low_pfn = max_pfn;

	/* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */

#define PDC_CONSOLE_IO_IODC_SIZE 32768

	memblock_reserve(0UL, (unsigned long)(PAGE0->mem_free +
				PDC_CONSOLE_IO_IODC_SIZE));
	memblock_reserve(__pa(KERNEL_BINARY_TEXT_START),
			(unsigned long)(_end - KERNEL_BINARY_TEXT_START));

#ifndef CONFIG_SPARSEMEM

	/* reserve the holes */

	for (i = 0; i < npmem_holes; i++) {
		memblock_reserve((pmem_holes[i].start_pfn << PAGE_SHIFT),
				(pmem_holes[i].pages << PAGE_SHIFT));
	}
#endif

#ifdef CONFIG_BLK_DEV_INITRD
	if (initrd_start) {
		printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end);
		if (__pa(initrd_start) < mem_max) {
			unsigned long initrd_reserve;

			if (__pa(initrd_end) > mem_max) {
				initrd_reserve = mem_max - __pa(initrd_start);
			} else {
				initrd_reserve = initrd_end - initrd_start;
			}
			initrd_below_start_ok = 1;
			printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);

			memblock_reserve(__pa(initrd_start), initrd_reserve);
		}
	}
#endif

	data_resource.start =  virt_to_phys(&data_start);
	data_resource.end = virt_to_phys(_end) - 1;
	code_resource.start = virt_to_phys(_text);
	code_resource.end = virt_to_phys(&data_start)-1;

	/* We don't know which region the kernel will be in, so try
	 * all of them.
	 */
	for (i = 0; i < sysram_resource_count; i++) {
		struct resource *res = &sysram_resources[i];
		request_resource(res, &code_resource);
		request_resource(res, &data_resource);
	}
	request_resource(&sysram_resources[0], &pdcdata_resource);

	/* Initialize Page Deallocation Table (PDT) and check for bad memory. */
	pdc_pdt_init();

	memblock_allow_resize();
	memblock_dump_all();
}