static u64 read_id_reg()

in kvm/sys_regs.c [1063:1120]


static u64 read_id_reg(const struct kvm_vcpu *vcpu,
		struct sys_reg_desc const *r, bool raz)
{
	u32 id = reg_to_encoding(r);
	u64 val;

	if (raz)
		return 0;

	val = read_sanitised_ftr_reg(id);

	switch (id) {
	case SYS_ID_AA64PFR0_EL1:
		if (!vcpu_has_sve(vcpu))
			val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_SVE);
		val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_AMU);
		val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2);
		val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
		val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3);
		val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
		if (irqchip_in_kernel(vcpu->kvm) &&
		    vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
			val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_GIC);
			val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_GIC), 1);
		}
		break;
	case SYS_ID_AA64PFR1_EL1:
		if (!kvm_has_mte(vcpu->kvm))
			val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE);
		break;
	case SYS_ID_AA64ISAR1_EL1:
		if (!vcpu_has_ptrauth(vcpu))
			val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_APA) |
				 ARM64_FEATURE_MASK(ID_AA64ISAR1_API) |
				 ARM64_FEATURE_MASK(ID_AA64ISAR1_GPA) |
				 ARM64_FEATURE_MASK(ID_AA64ISAR1_GPI));
		break;
	case SYS_ID_AA64DFR0_EL1:
		/* Limit debug to ARMv8.0 */
		val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER);
		val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), 6);
		/* Limit guests to PMUv3 for ARMv8.4 */
		val = cpuid_feature_cap_perfmon_field(val,
						      ID_AA64DFR0_PMUVER_SHIFT,
						      kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_4 : 0);
		/* Hide SPE from guests */
		val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER);
		break;
	case SYS_ID_DFR0_EL1:
		/* Limit guests to PMUv3 for ARMv8.4 */
		val = cpuid_feature_cap_perfmon_field(val,
						      ID_DFR0_PERFMON_SHIFT,
						      kvm_vcpu_has_pmu(vcpu) ? ID_DFR0_PERFMON_8_4 : 0);
		break;
	}

	return val;
}