static int kvm_sbi_ext_rfence_handler()

in kvm/vcpu_sbi_replace.c [79:129]


static int kvm_sbi_ext_rfence_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
				      unsigned long *out_val,
				      struct kvm_cpu_trap *utrap, bool *exit)
{
	int ret = 0;
	unsigned long i;
	struct cpumask cm, hm;
	struct kvm_vcpu *tmp;
	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
	unsigned long hmask = cp->a0;
	unsigned long hbase = cp->a1;
	unsigned long funcid = cp->a6;

	cpumask_clear(&cm);
	cpumask_clear(&hm);
	kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
		if (hbase != -1UL) {
			if (tmp->vcpu_id < hbase)
				continue;
			if (!(hmask & (1UL << (tmp->vcpu_id - hbase))))
				continue;
		}
		if (tmp->cpu < 0)
			continue;
		cpumask_set_cpu(tmp->cpu, &cm);
	}

	riscv_cpuid_to_hartid_mask(&cm, &hm);

	switch (funcid) {
	case SBI_EXT_RFENCE_REMOTE_FENCE_I:
		ret = sbi_remote_fence_i(cpumask_bits(&hm));
		break;
	case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
		ret = sbi_remote_hfence_vvma(cpumask_bits(&hm), cp->a2, cp->a3);
		break;
	case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
		ret = sbi_remote_hfence_vvma_asid(cpumask_bits(&hm), cp->a2,
						  cp->a3, cp->a4);
		break;
	case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA:
	case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID:
	case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA:
	case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID:
	/* TODO: implement for nested hypervisor case */
	default:
		ret = -EOPNOTSUPP;
	}

	return ret;
}