static int svm_set_msr()

in kvm/svm/svm.c [2916:3103]


static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
{
	struct vcpu_svm *svm = to_svm(vcpu);
	int r;

	u32 ecx = msr->index;
	u64 data = msr->data;
	switch (ecx) {
	case MSR_AMD64_TSC_RATIO:
		if (!msr->host_initiated && !svm->tsc_scaling_enabled)
			return 1;

		if (data & TSC_RATIO_RSVD)
			return 1;

		svm->tsc_ratio_msr = data;

		if (svm->tsc_scaling_enabled && is_guest_mode(vcpu))
			nested_svm_update_tsc_ratio_msr(vcpu);

		break;
	case MSR_IA32_CR_PAT:
		if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
			return 1;
		vcpu->arch.pat = data;
		svm->vmcb01.ptr->save.g_pat = data;
		if (is_guest_mode(vcpu))
			nested_vmcb02_compute_g_pat(svm);
		vmcb_mark_dirty(svm->vmcb, VMCB_NPT);
		break;
	case MSR_IA32_SPEC_CTRL:
		if (!msr->host_initiated &&
		    !guest_has_spec_ctrl_msr(vcpu))
			return 1;

		if (kvm_spec_ctrl_test_value(data))
			return 1;

		if (boot_cpu_has(X86_FEATURE_V_SPEC_CTRL))
			svm->vmcb->save.spec_ctrl = data;
		else
			svm->spec_ctrl = data;
		if (!data)
			break;

		/*
		 * For non-nested:
		 * When it's written (to non-zero) for the first time, pass
		 * it through.
		 *
		 * For nested:
		 * The handling of the MSR bitmap for L2 guests is done in
		 * nested_svm_vmrun_msrpm.
		 * We update the L1 MSR bit as well since it will end up
		 * touching the MSR anyway now.
		 */
		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
		break;
	case MSR_IA32_PRED_CMD:
		if (!msr->host_initiated &&
		    !guest_has_pred_cmd_msr(vcpu))
			return 1;

		if (data & ~PRED_CMD_IBPB)
			return 1;
		if (!boot_cpu_has(X86_FEATURE_IBPB))
			return 1;
		if (!data)
			break;

		wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
		set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
		break;
	case MSR_AMD64_VIRT_SPEC_CTRL:
		if (!msr->host_initiated &&
		    !guest_cpuid_has(vcpu, X86_FEATURE_VIRT_SSBD))
			return 1;

		if (data & ~SPEC_CTRL_SSBD)
			return 1;

		svm->virt_spec_ctrl = data;
		break;
	case MSR_STAR:
		svm->vmcb01.ptr->save.star = data;
		break;
#ifdef CONFIG_X86_64
	case MSR_LSTAR:
		svm->vmcb01.ptr->save.lstar = data;
		break;
	case MSR_CSTAR:
		svm->vmcb01.ptr->save.cstar = data;
		break;
	case MSR_KERNEL_GS_BASE:
		svm->vmcb01.ptr->save.kernel_gs_base = data;
		break;
	case MSR_SYSCALL_MASK:
		svm->vmcb01.ptr->save.sfmask = data;
		break;
#endif
	case MSR_IA32_SYSENTER_CS:
		svm->vmcb01.ptr->save.sysenter_cs = data;
		break;
	case MSR_IA32_SYSENTER_EIP:
		svm->vmcb01.ptr->save.sysenter_eip = (u32)data;
		/*
		 * We only intercept the MSR_IA32_SYSENTER_{EIP|ESP} msrs
		 * when we spoof an Intel vendor ID (for cross vendor migration).
		 * In this case we use this intercept to track the high
		 * 32 bit part of these msrs to support Intel's
		 * implementation of SYSENTER/SYSEXIT.
		 */
		svm->sysenter_eip_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0;
		break;
	case MSR_IA32_SYSENTER_ESP:
		svm->vmcb01.ptr->save.sysenter_esp = (u32)data;
		svm->sysenter_esp_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0;
		break;
	case MSR_TSC_AUX:
		/*
		 * TSC_AUX is usually changed only during boot and never read
		 * directly.  Intercept TSC_AUX instead of exposing it to the
		 * guest via direct_access_msrs, and switch it via user return.
		 */
		preempt_disable();
		r = kvm_set_user_return_msr(tsc_aux_uret_slot, data, -1ull);
		preempt_enable();
		if (r)
			return 1;

		svm->tsc_aux = data;
		break;
	case MSR_IA32_DEBUGCTLMSR:
		if (!lbrv) {
			vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
				    __func__, data);
			break;
		}
		if (data & DEBUGCTL_RESERVED_BITS)
			return 1;

		svm->vmcb->save.dbgctl = data;
		vmcb_mark_dirty(svm->vmcb, VMCB_LBR);
		if (data & (1ULL<<0))
			svm_enable_lbrv(vcpu);
		else
			svm_disable_lbrv(vcpu);
		break;
	case MSR_VM_HSAVE_PA:
		/*
		 * Old kernels did not validate the value written to
		 * MSR_VM_HSAVE_PA.  Allow KVM_SET_MSR to set an invalid
		 * value to allow live migrating buggy or malicious guests
		 * originating from those kernels.
		 */
		if (!msr->host_initiated && !page_address_valid(vcpu, data))
			return 1;

		svm->nested.hsave_msr = data & PAGE_MASK;
		break;
	case MSR_VM_CR:
		return svm_set_vm_cr(vcpu, data);
	case MSR_VM_IGNNE:
		vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
		break;
	case MSR_F10H_DECFG: {
		struct kvm_msr_entry msr_entry;

		msr_entry.index = msr->index;
		if (svm_get_msr_feature(&msr_entry))
			return 1;

		/* Check the supported bits */
		if (data & ~msr_entry.data)
			return 1;

		/* Don't allow the guest to change a bit, #GP */
		if (!msr->host_initiated && (data ^ msr_entry.data))
			return 1;

		svm->msr_decfg = data;
		break;
	}
	default:
		return kvm_set_msr_common(vcpu, msr);
	}
	return 0;
}