static int handle_pfmf()

in kvm/priv.c [1016:1126]


static int handle_pfmf(struct kvm_vcpu *vcpu)
{
	bool mr = false, mc = false, nq;
	int reg1, reg2;
	unsigned long start, end;
	unsigned char key;

	vcpu->stat.instruction_pfmf++;

	kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);

	if (!test_kvm_facility(vcpu->kvm, 8))
		return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);

	if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
		return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);

	if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	/* Only provide non-quiescing support if enabled for the guest */
	if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ &&
	    !test_kvm_facility(vcpu->kvm, 14))
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);

	/* Only provide conditional-SSKE support if enabled for the guest */
	if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK &&
	    test_kvm_facility(vcpu->kvm, 10)) {
		mr = vcpu->run->s.regs.gprs[reg1] & PFMF_MR;
		mc = vcpu->run->s.regs.gprs[reg1] & PFMF_MC;
	}

	nq = vcpu->run->s.regs.gprs[reg1] & PFMF_NQ;
	key = vcpu->run->s.regs.gprs[reg1] & PFMF_KEY;
	start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
	start = kvm_s390_logical_to_effective(vcpu, start);

	if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
		if (kvm_s390_check_low_addr_prot_real(vcpu, start))
			return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
	}

	switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
	case 0x00000000:
		/* only 4k frames specify a real address */
		start = kvm_s390_real_to_abs(vcpu, start);
		end = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
		break;
	case 0x00001000:
		end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1);
		break;
	case 0x00002000:
		/* only support 2G frame size if EDAT2 is available and we are
		   not in 24-bit addressing mode */
		if (!test_kvm_facility(vcpu->kvm, 78) ||
		    psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_24BIT)
			return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
		end = (start + _REGION3_SIZE) & ~(_REGION3_SIZE - 1);
		break;
	default:
		return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
	}

	while (start != end) {
		unsigned long vmaddr;
		bool unlocked = false;

		/* Translate guest address to host address */
		vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
		if (kvm_is_error_hva(vmaddr))
			return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);

		if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
			if (kvm_clear_guest(vcpu->kvm, start, PAGE_SIZE))
				return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
		}

		if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
			int rc = kvm_s390_skey_check_enable(vcpu);

			if (rc)
				return rc;
			mmap_read_lock(current->mm);
			rc = cond_set_guest_storage_key(current->mm, vmaddr,
							key, NULL, nq, mr, mc);
			if (rc < 0) {
				rc = fixup_user_fault(current->mm, vmaddr,
						      FAULT_FLAG_WRITE, &unlocked);
				rc = !rc ? -EAGAIN : rc;
			}
			mmap_read_unlock(current->mm);
			if (rc == -EFAULT)
				return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
			if (rc == -EAGAIN)
				continue;
			if (rc < 0)
				return rc;
		}
		start += PAGE_SIZE;
	}
	if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
		if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) {
			vcpu->run->s.regs.gprs[reg2] = end;
		} else {
			vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL;
			end = kvm_s390_logical_to_effective(vcpu, end);
			vcpu->run->s.regs.gprs[reg2] |= end;
		}
	}
	return 0;
}