in mm/tlb.c [222:262]
static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
{
unsigned tlbidx = w | (e << PAGE_SHIFT);
unsigned r0 = dtlb ?
read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
unsigned r1 = dtlb ?
read_dtlb_translation(tlbidx) : read_itlb_translation(tlbidx);
unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
unsigned pte = get_pte_for_vaddr(vpn);
unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
unsigned tlb_asid = r0 & ASID_MASK;
bool kernel = tlb_asid == 1;
int rc = 0;
if (tlb_asid > 0 && ((vpn < TASK_SIZE) == kernel)) {
pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n",
dtlb ? 'D' : 'I', w, e, vpn,
kernel ? "kernel" : "user");
rc |= TLB_INSANE;
}
if (tlb_asid == mm_asid) {
if ((pte ^ r1) & PAGE_MASK) {
pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
dtlb ? 'D' : 'I', w, e, r0, r1, pte);
if (pte == 0 || !pte_present(__pte(pte))) {
struct page *p = pfn_to_page(r1 >> PAGE_SHIFT);
pr_err("page refcount: %d, mapcount: %d\n",
page_count(p),
page_mapcount(p));
if (!page_count(p))
rc |= TLB_INSANE;
else if (page_mapcount(p))
rc |= TLB_SUSPICIOUS;
} else {
rc |= TLB_INSANE;
}
}
}
return rc;
}