in mm/fault.c [260:427]
void do_page_fault(struct pt_regs *regs, unsigned long code,
unsigned long address)
{
struct vm_area_struct *vma, *prev_vma;
struct task_struct *tsk;
struct mm_struct *mm;
unsigned long acc_type;
vm_fault_t fault = 0;
unsigned int flags;
char *msg;
tsk = current;
mm = tsk->mm;
if (!mm) {
msg = "Page fault: no context";
goto no_context;
}
flags = FAULT_FLAG_DEFAULT;
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
acc_type = parisc_acctyp(code, regs->iir);
if (acc_type & VM_WRITE)
flags |= FAULT_FLAG_WRITE;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry:
mmap_read_lock(mm);
vma = find_vma_prev(mm, address, &prev_vma);
if (!vma || address < vma->vm_start)
goto check_expansion;
/*
* Ok, we have a good vm_area for this memory access. We still need to
* check the access permissions.
*/
good_area:
if ((vma->vm_flags & acc_type) != acc_type)
goto bad_area;
/*
* If for any reason at all we couldn't handle the fault, make
* sure we exit gracefully rather than endlessly redo the
* fault.
*/
fault = handle_mm_fault(vma, address, flags, regs);
if (fault_signal_pending(fault, regs))
return;
if (unlikely(fault & VM_FAULT_ERROR)) {
/*
* We hit a shared mapping outside of the file, or some
* other thing happened to us that made us unable to
* handle the page fault gracefully.
*/
if (fault & VM_FAULT_OOM)
goto out_of_memory;
else if (fault & VM_FAULT_SIGSEGV)
goto bad_area;
else if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
VM_FAULT_HWPOISON_LARGE))
goto bad_area;
BUG();
}
if (fault & VM_FAULT_RETRY) {
/*
* No need to mmap_read_unlock(mm) as we would
* have already released it in __lock_page_or_retry
* in mm/filemap.c.
*/
flags |= FAULT_FLAG_TRIED;
goto retry;
}
mmap_read_unlock(mm);
return;
check_expansion:
vma = prev_vma;
if (vma && (expand_stack(vma, address) == 0))
goto good_area;
/*
* Something tried to access memory that isn't in our memory map..
*/
bad_area:
mmap_read_unlock(mm);
if (user_mode(regs)) {
int signo, si_code;
switch (code) {
case 15: /* Data TLB miss fault/Data page fault */
/* send SIGSEGV when outside of vma */
if (!vma ||
address < vma->vm_start || address >= vma->vm_end) {
signo = SIGSEGV;
si_code = SEGV_MAPERR;
break;
}
/* send SIGSEGV for wrong permissions */
if ((vma->vm_flags & acc_type) != acc_type) {
signo = SIGSEGV;
si_code = SEGV_ACCERR;
break;
}
/* probably address is outside of mapped file */
fallthrough;
case 17: /* NA data TLB miss / page fault */
case 18: /* Unaligned access - PCXS only */
signo = SIGBUS;
si_code = (code == 18) ? BUS_ADRALN : BUS_ADRERR;
break;
case 16: /* Non-access instruction TLB miss fault */
case 26: /* PCXL: Data memory access rights trap */
default:
signo = SIGSEGV;
si_code = (code == 26) ? SEGV_ACCERR : SEGV_MAPERR;
break;
}
#ifdef CONFIG_MEMORY_FAILURE
if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
unsigned int lsb = 0;
printk(KERN_ERR
"MCE: Killing %s:%d due to hardware memory corruption fault at %08lx\n",
tsk->comm, tsk->pid, address);
/*
* Either small page or large page may be poisoned.
* In other words, VM_FAULT_HWPOISON_LARGE and
* VM_FAULT_HWPOISON are mutually exclusive.
*/
if (fault & VM_FAULT_HWPOISON_LARGE)
lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault));
else if (fault & VM_FAULT_HWPOISON)
lsb = PAGE_SHIFT;
force_sig_mceerr(BUS_MCEERR_AR, (void __user *) address,
lsb);
return;
}
#endif
show_signal_msg(regs, code, address, tsk, vma);
force_sig_fault(signo, si_code, (void __user *) address);
return;
}
msg = "Page fault: bad address";
no_context:
if (!user_mode(regs) && fixup_exception(regs)) {
return;
}
parisc_terminate(msg, regs, code, address);
out_of_memory:
mmap_read_unlock(mm);
if (!user_mode(regs)) {
msg = "Page fault: out of memory";
goto no_context;
}
pagefault_out_of_memory();
}