int kvm_riscv_vcpu_set_reg_fp()

in kvm/vcpu_fp.c [123:167]


int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu,
			      const struct kvm_one_reg *reg,
			      unsigned long rtype)
{
	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
	unsigned long isa = vcpu->arch.isa;
	unsigned long __user *uaddr =
			(unsigned long __user *)(unsigned long)reg->addr;
	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
					    KVM_REG_SIZE_MASK |
					    rtype);
	void *reg_val;

	if ((rtype == KVM_REG_RISCV_FP_F) &&
	    riscv_isa_extension_available(&isa, f)) {
		if (KVM_REG_SIZE(reg->id) != sizeof(u32))
			return -EINVAL;
		if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr))
			reg_val = &cntx->fp.f.fcsr;
		else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) &&
			  reg_num <= KVM_REG_RISCV_FP_F_REG(f[31]))
			reg_val = &cntx->fp.f.f[reg_num];
		else
			return -EINVAL;
	} else if ((rtype == KVM_REG_RISCV_FP_D) &&
		   riscv_isa_extension_available(&isa, d)) {
		if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) {
			if (KVM_REG_SIZE(reg->id) != sizeof(u32))
				return -EINVAL;
			reg_val = &cntx->fp.d.fcsr;
		} else if ((KVM_REG_RISCV_FP_D_REG(f[0]) <= reg_num) &&
			   reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) {
			if (KVM_REG_SIZE(reg->id) != sizeof(u64))
				return -EINVAL;
			reg_val = &cntx->fp.d.f[reg_num];
		} else
			return -EINVAL;
	} else
		return -EINVAL;

	if (copy_from_user(reg_val, uaddr, KVM_REG_SIZE(reg->id)))
		return -EFAULT;

	return 0;
}