in kvm/vcpu_sbi.c [114:170]
int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
int ret = 1;
bool next_sepc = true;
bool userspace_exit = false;
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
const struct kvm_vcpu_sbi_extension *sbi_ext;
struct kvm_cpu_trap utrap = { 0 };
unsigned long out_val = 0;
bool ext_is_v01 = false;
sbi_ext = kvm_vcpu_sbi_find_ext(cp->a7);
if (sbi_ext && sbi_ext->handler) {
#ifdef CONFIG_RISCV_SBI_V01
if (cp->a7 >= SBI_EXT_0_1_SET_TIMER &&
cp->a7 <= SBI_EXT_0_1_SHUTDOWN)
ext_is_v01 = true;
#endif
ret = sbi_ext->handler(vcpu, run, &out_val, &utrap, &userspace_exit);
} else {
/* Return error for unsupported SBI calls */
cp->a0 = SBI_ERR_NOT_SUPPORTED;
goto ecall_done;
}
/* Handle special error cases i.e trap, exit or userspace forward */
if (utrap.scause) {
/* No need to increment sepc or exit ioctl loop */
ret = 1;
utrap.sepc = cp->sepc;
kvm_riscv_vcpu_trap_redirect(vcpu, &utrap);
next_sepc = false;
goto ecall_done;
}
/* Exit ioctl loop or Propagate the error code the guest */
if (userspace_exit) {
next_sepc = false;
ret = 0;
} else {
/**
* SBI extension handler always returns an Linux error code. Convert
* it to the SBI specific error code that can be propagated the SBI
* caller.
*/
ret = kvm_linux_err_map_sbi(ret);
cp->a0 = ret;
ret = 1;
}
ecall_done:
if (next_sepc)
cp->sepc += 4;
if (!ext_is_v01)
cp->a1 = out_val;
return ret;
}