static void __init map_pages()

in mm/init.c [340:437]


static void __init map_pages(unsigned long start_vaddr,
			     unsigned long start_paddr, unsigned long size,
			     pgprot_t pgprot, int force)
{
	pmd_t *pmd;
	pte_t *pg_table;
	unsigned long end_paddr;
	unsigned long start_pmd;
	unsigned long start_pte;
	unsigned long tmp1;
	unsigned long tmp2;
	unsigned long address;
	unsigned long vaddr;
	unsigned long ro_start;
	unsigned long ro_end;
	unsigned long kernel_start, kernel_end;

	ro_start = __pa((unsigned long)_text);
	ro_end   = __pa((unsigned long)&data_start);
	kernel_start = __pa((unsigned long)&__init_begin);
	kernel_end  = __pa((unsigned long)&_end);

	end_paddr = start_paddr + size;

	/* for 2-level configuration PTRS_PER_PMD is 0 so start_pmd will be 0 */
	start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
	start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));

	address = start_paddr;
	vaddr = start_vaddr;
	while (address < end_paddr) {
		pgd_t *pgd = pgd_offset_k(vaddr);
		p4d_t *p4d = p4d_offset(pgd, vaddr);
		pud_t *pud = pud_offset(p4d, vaddr);

#if CONFIG_PGTABLE_LEVELS == 3
		if (pud_none(*pud)) {
			pmd = memblock_alloc(PAGE_SIZE << PMD_TABLE_ORDER,
					     PAGE_SIZE << PMD_TABLE_ORDER);
			if (!pmd)
				panic("pmd allocation failed.\n");
			pud_populate(NULL, pud, pmd);
		}
#endif

		pmd = pmd_offset(pud, vaddr);
		for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) {
			if (pmd_none(*pmd)) {
				pg_table = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
				if (!pg_table)
					panic("page table allocation failed\n");
				pmd_populate_kernel(NULL, pmd, pg_table);
			}

			pg_table = pte_offset_kernel(pmd, vaddr);
			for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
				pte_t pte;
				pgprot_t prot;
				bool huge = false;

				if (force) {
					prot = pgprot;
				} else if (address < kernel_start || address >= kernel_end) {
					/* outside kernel memory */
					prot = PAGE_KERNEL;
				} else if (!kernel_set_to_readonly) {
					/* still initializing, allow writing to RO memory */
					prot = PAGE_KERNEL_RWX;
					huge = true;
				} else if (address >= ro_start) {
					/* Code (ro) and Data areas */
					prot = (address < ro_end) ?
						PAGE_KERNEL_EXEC : PAGE_KERNEL;
					huge = true;
				} else {
					prot = PAGE_KERNEL;
				}

				pte = __mk_pte(address, prot);
				if (huge)
					pte = pte_mkhuge(pte);

				if (address >= end_paddr)
					break;

				set_pte(pg_table, pte);

				address += PAGE_SIZE;
				vaddr += PAGE_SIZE;
			}
			start_pte = 0;

			if (address >= end_paddr)
			    break;
		}
		start_pmd = 0;
	}
}