in mm/tlb.c [135:166]
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{
int newpid = cpu_asid(vma->vm_mm);
addr &= TLB_ENTRY_SIZE_MASK;
#ifdef CONFIG_CPU_HAS_TLBI
sync_is();
asm volatile(
"tlbi.vas %0 \n"
"sync.i \n"
:
: "r" (addr | newpid)
: "memory");
#else
{
int oldpid, idx;
unsigned long flags;
local_irq_save(flags);
oldpid = read_mmu_entryhi() & ASID_MASK;
write_mmu_entryhi(addr | newpid);
tlb_probe();
idx = read_mmu_index();
if (idx >= 0)
tlb_invalid_indexed();
restore_asid_inv_utlb(oldpid, newpid);
local_irq_restore(flags);
}
#endif
}