diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index 8f145733ce..b0bd82f801 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -276,6 +276,8 @@ struct kvm_run; struct hax_vcpu_state; +struct qemu_vcpu; /* Used by NVMM */ + #define TB_JMP_CACHE_BITS 12 #define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS) @@ -454,6 +456,9 @@ struct CPUState { struct hax_vcpu_state *hax_vcpu; + /* Used by NVMM */ + struct qemu_vcpu *qemu_vcpu; + int hvf_fd; /* track IOMMUs whose translations we've cached in the TCG TLB */ diff --git a/target/i386/nvmm-all.c b/target/i386/nvmm-all.c index a21908f46a..d76b05cc24 100644 --- a/target/i386/nvmm-all.c +++ b/target/i386/nvmm-all.c @@ -52,7 +52,7 @@ static struct qemu_machine qemu_mach; static struct qemu_vcpu * get_qemu_vcpu(CPUState *cpu) { - return (struct qemu_vcpu *)cpu->hax_vcpu; + return cpu->qemu_vcpu; } static struct nvmm_machine * @@ -63,6 +63,34 @@ get_nvmm_mach(void) /* -------------------------------------------------------------------------- */ +static void nvmm_cpu_kick(CPUState *cpu) +{ + struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); + struct nvmm_vcpu *vcpu = &qcpu->vcpu; + + nvmm_vcpu_immediate_exit(vcpu, true); +} + +static void nvmm_cpu_kick_self(void) +{ + nvmm_cpu_kick(current_cpu); +} + +static void nvmm_eat_signals(CPUState *cpu) +{ + struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); + struct nvmm_vcpu *vcpu = &qcpu->vcpu; + + nvmm_vcpu_immediate_exit(vcpu, false); + + /* Write immediate_exit before the cpu->exit_request write in + * nvmm_cpu_exec. + */ + smp_wmb(); +} + +/* -------------------------------------------------------------------------- */ + static void nvmm_set_segment(struct nvmm_x64_state_seg *nseg, const SegmentCache *qseg) { @@ -749,9 +777,19 @@ nvmm_vcpu_loop(CPUState *cpu) nvmm_vcpu_pre_run(cpu); if (atomic_read(&cpu->exit_request)) { - qemu_cpu_kick_self(); + /* + * NVMM requires us to reenter the kernel after IO exits to complete + * instruction emulation. This self-signal will ensure that we + * leave ASAP again. + */ + nvmm_cpu_kick_self(); } + /* Read cpu->exit_request before nvmm_vcpu_run() reads immediate_exit. + * Matching barrier in nvmm_eat_signals. + */ + smp_rmb(); + ret = nvmm_vcpu_run(mach, vcpu); if (ret == -1) { error_report("NVMM: Failed to exec a virtual processor," @@ -764,6 +802,10 @@ nvmm_vcpu_loop(CPUState *cpu) switch (exit->reason) { case NVMM_VCPU_EXIT_NONE: break; + case NVMM_VCPU_EXIT_IMMEDIATE: + nvmm_eat_signals(cpu); + ret = EXCP_INTERRUPT; + break; case NVMM_VCPU_EXIT_MEMORY: ret = nvmm_handle_mem(mach, vcpu); break; @@ -810,7 +852,7 @@ nvmm_vcpu_loop(CPUState *cpu) atomic_set(&cpu->exit_request, false); - return ret < 0; + return ret; } /* -------------------------------------------------------------------------- */ @@ -871,11 +913,8 @@ static Error *nvmm_migration_blocker; static void nvmm_ipi_signal(int sigcpu) { - struct qemu_vcpu *qcpu; - if (current_cpu) { - qcpu = get_qemu_vcpu(current_cpu); - qcpu->stop = true; + nvmm_cpu_kick(current_cpu); } } @@ -974,7 +1013,7 @@ nvmm_init_vcpu(CPUState *cpu) } cpu->vcpu_dirty = true; - cpu->hax_vcpu = (struct hax_vcpu_state *)qcpu; + cpu->qemu_vcpu = qcpu; return 0; } @@ -982,7 +1021,7 @@ nvmm_init_vcpu(CPUState *cpu) int nvmm_vcpu_exec(CPUState *cpu) { - int ret, fatal; + int ret; while (1) { if (cpu->exception_index >= EXCP_INTERRUPT) { @@ -991,9 +1030,11 @@ nvmm_vcpu_exec(CPUState *cpu) break; } - fatal = nvmm_vcpu_loop(cpu); + ret = nvmm_vcpu_loop(cpu); + if (ret > 0) + break; - if (fatal) { + if (ret < 0) { error_report("NVMM: Failed to execute a VCPU."); abort(); } @@ -1009,7 +1050,7 @@ nvmm_destroy_vcpu(CPUState *cpu) struct qemu_vcpu *qcpu = get_qemu_vcpu(cpu); nvmm_vcpu_destroy(mach, &qcpu->vcpu); - g_free(cpu->hax_vcpu); + g_free(cpu->qemu_vcpu); } /* -------------------------------------------------------------------------- */