in kernel/nmi.c [373:496]
int notrace s390_do_machine_check(struct pt_regs *regs)
{
static int ipd_count;
static DEFINE_SPINLOCK(ipd_lock);
static unsigned long long last_ipd;
struct mcck_struct *mcck;
unsigned long long tmp;
union mci mci;
unsigned long mcck_dam_code;
int mcck_pending = 0;
nmi_enter();
if (user_mode(regs))
update_timer_mcck();
inc_irq_stat(NMI_NMI);
mci.val = S390_lowcore.mcck_interruption_code;
mcck = this_cpu_ptr(&cpu_mcck);
/*
* Reinject the instruction processing damages' machine checks
* including Delayed Access Exception into the guest
* instead of damaging the host if they happen in the guest.
*/
if (mci.pd && !test_cpu_flag(CIF_MCCK_GUEST)) {
if (mci.b) {
/* Processing backup -> verify if we can survive this */
u64 z_mcic, o_mcic, t_mcic;
z_mcic = (1ULL<<63 | 1ULL<<59 | 1ULL<<29);
o_mcic = (1ULL<<43 | 1ULL<<42 | 1ULL<<41 | 1ULL<<40 |
1ULL<<36 | 1ULL<<35 | 1ULL<<34 | 1ULL<<32 |
1ULL<<30 | 1ULL<<21 | 1ULL<<20 | 1ULL<<17 |
1ULL<<16);
t_mcic = mci.val;
if (((t_mcic & z_mcic) != 0) ||
((t_mcic & o_mcic) != o_mcic)) {
s390_handle_damage();
}
/*
* Nullifying exigent condition, therefore we might
* retry this instruction.
*/
spin_lock(&ipd_lock);
tmp = get_tod_clock();
if (((tmp - last_ipd) >> 12) < MAX_IPD_TIME)
ipd_count++;
else
ipd_count = 1;
last_ipd = tmp;
if (ipd_count == MAX_IPD_COUNT)
s390_handle_damage();
spin_unlock(&ipd_lock);
} else {
/* Processing damage -> stopping machine */
s390_handle_damage();
}
}
if (s390_validate_registers(mci, user_mode(regs))) {
/*
* Couldn't restore all register contents for the
* user space process -> mark task for termination.
*/
mcck->kill_task = 1;
mcck->mcck_code = mci.val;
mcck_pending = 1;
}
/*
* Backup the machine check's info if it happens when the guest
* is running.
*/
if (test_cpu_flag(CIF_MCCK_GUEST))
s390_backup_mcck_info(regs);
if (mci.cd) {
/* Timing facility damage */
s390_handle_damage();
}
if (mci.ed && mci.ec) {
/* External damage */
if (S390_lowcore.external_damage_code & (1U << ED_STP_SYNC))
mcck->stp_queue |= stp_sync_check();
if (S390_lowcore.external_damage_code & (1U << ED_STP_ISLAND))
mcck->stp_queue |= stp_island_check();
mcck_pending = 1;
}
if (mci.cp) {
/* Channel report word pending */
mcck->channel_report = 1;
mcck_pending = 1;
}
if (mci.w) {
/* Warning pending */
mcck->warning = 1;
mcck_pending = 1;
}
/*
* If there are only Channel Report Pending and External Damage
* machine checks, they will not be reinjected into the guest
* because they refer to host conditions only.
*/
mcck_dam_code = (mci.val & MCIC_SUBCLASS_MASK);
if (test_cpu_flag(CIF_MCCK_GUEST) &&
(mcck_dam_code & MCCK_CODE_NO_GUEST) != mcck_dam_code) {
/* Set exit reason code for host's later handling */
*((long *)(regs->gprs[15] + __SF_SIE_REASON)) = -EINTR;
}
clear_cpu_flag(CIF_MCCK_GUEST);
if (user_mode(regs) && mcck_pending) {
nmi_exit();
return 1;
}
if (mcck_pending)
schedule_mcck_handler();
nmi_exit();
return 0;
}