in mm/init.c [50:96]
static void __init map_ram(void)
{
unsigned long v, p, e;
pgd_t *pge;
p4d_t *p4e;
pud_t *pue;
pmd_t *pme;
pte_t *pte;
/* These mark extents of read-only kernel pages...
* ...from vmlinux.lds.S
*/
p = (u32) memblock_start_of_DRAM() & PAGE_MASK;
e = min((u32) memblock_end_of_DRAM(), (u32) __pa(high_memory));
v = (u32) __va(p);
pge = pgd_offset_k(v);
while (p < e) {
int j;
p4e = p4d_offset(pge, v);
pue = pud_offset(p4e, v);
pme = pmd_offset(pue, v);
if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) {
panic("%s: Kernel hardcoded for "
"two-level page tables", __func__);
}
/* Alloc one page for holding PTE's... */
pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (!pte)
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
__func__, PAGE_SIZE, PAGE_SIZE);
set_pmd(pme, __pmd(__pa(pte) + _PAGE_KERNEL_TABLE));
/* Fill the newly allocated page with PTE'S */
for (j = 0; p < e && j < PTRS_PER_PTE;
v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) {
/* Create mapping between p and v. */
/* TODO: more fine grant for page access permission */
set_pte(pte, __pte(p + pgprot_val(PAGE_KERNEL)));
}
pge++;
}
}