static int flush_tlb_kernel_range_common()

in kernel/tlb.c [351:453]


static int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
{
	struct mm_struct *mm;
	pgd_t *pgd;
	p4d_t *p4d;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;
	unsigned long addr, last;
	int updated = 0, err = 0, force = 0, userspace = 0;
	struct host_vm_change hvc;

	mm = &init_mm;
	hvc = INIT_HVC(mm, force, userspace);
	for (addr = start; addr < end;) {
		pgd = pgd_offset(mm, addr);
		if (!pgd_present(*pgd)) {
			last = ADD_ROUND(addr, PGDIR_SIZE);
			if (last > end)
				last = end;
			if (pgd_newpage(*pgd)) {
				updated = 1;
				err = add_munmap(addr, last - addr, &hvc);
				if (err < 0)
					panic("munmap failed, errno = %d\n",
					      -err);
			}
			addr = last;
			continue;
		}

		p4d = p4d_offset(pgd, addr);
		if (!p4d_present(*p4d)) {
			last = ADD_ROUND(addr, P4D_SIZE);
			if (last > end)
				last = end;
			if (p4d_newpage(*p4d)) {
				updated = 1;
				err = add_munmap(addr, last - addr, &hvc);
				if (err < 0)
					panic("munmap failed, errno = %d\n",
					      -err);
			}
			addr = last;
			continue;
		}

		pud = pud_offset(p4d, addr);
		if (!pud_present(*pud)) {
			last = ADD_ROUND(addr, PUD_SIZE);
			if (last > end)
				last = end;
			if (pud_newpage(*pud)) {
				updated = 1;
				err = add_munmap(addr, last - addr, &hvc);
				if (err < 0)
					panic("munmap failed, errno = %d\n",
					      -err);
			}
			addr = last;
			continue;
		}

		pmd = pmd_offset(pud, addr);
		if (!pmd_present(*pmd)) {
			last = ADD_ROUND(addr, PMD_SIZE);
			if (last > end)
				last = end;
			if (pmd_newpage(*pmd)) {
				updated = 1;
				err = add_munmap(addr, last - addr, &hvc);
				if (err < 0)
					panic("munmap failed, errno = %d\n",
					      -err);
			}
			addr = last;
			continue;
		}

		pte = pte_offset_kernel(pmd, addr);
		if (!pte_present(*pte) || pte_newpage(*pte)) {
			updated = 1;
			err = add_munmap(addr, PAGE_SIZE, &hvc);
			if (err < 0)
				panic("munmap failed, errno = %d\n",
				      -err);
			if (pte_present(*pte))
				err = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
					       PAGE_SIZE, 0, &hvc);
		}
		else if (pte_newprot(*pte)) {
			updated = 1;
			err = add_mprotect(addr, PAGE_SIZE, 0, &hvc);
		}
		addr += PAGE_SIZE;
	}
	if (!err)
		err = do_ops(&hvc, hvc.index, 1);

	if (err < 0)
		panic("flush_tlb_kernel failed, errno = %d\n", err);
	return updated;
}