in mm/tlb.c [52:93]
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
unsigned long newpid = cpu_asid(vma->vm_mm);
start &= TLB_ENTRY_SIZE_MASK;
end += TLB_ENTRY_SIZE - 1;
end &= TLB_ENTRY_SIZE_MASK;
#ifdef CONFIG_CPU_HAS_TLBI
sync_is();
while (start < end) {
asm volatile(
"tlbi.vas %0 \n"
:
: "r" (start | newpid)
: "memory");
start += 2*PAGE_SIZE;
}
asm volatile("sync.i\n");
#else
{
unsigned long flags, oldpid;
local_irq_save(flags);
oldpid = read_mmu_entryhi() & ASID_MASK;
while (start < end) {
int idx;
write_mmu_entryhi(start | newpid);
start += 2*PAGE_SIZE;
tlb_probe();
idx = read_mmu_index();
if (idx >= 0)
tlb_invalid_indexed();
}
restore_asid_inv_utlb(oldpid, newpid);
local_irq_restore(flags);
}
#endif
}