Joerg Roedel
2021-Jun-16 18:49 UTC
[PATCH v6 1/2] x86/sev: Make sure IRQs are disabled while GHCB is active
From: Joerg Roedel <jroedel at suse.de> The #VC handler only cares about IRQs being disabled while the GHCB is active, as it must not be interrupted by something which could cause another #VC while it holds the GHCB (NMI is the exception for which the backup GHCB exits). Make sure nothing interrupts the code path while the GHCB is active by disabling IRQs in sev_es_get_ghcb() and restoring the previous irq state in sev_es_put_ghcb(). Signed-off-by: Joerg Roedel <jroedel at suse.de> --- arch/x86/kernel/sev.c | 37 +++++++++++++++++++++++++------------ 1 file changed, 25 insertions(+), 12 deletions(-) diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c index 8178db07a06a..e0d12728bcb7 100644 --- a/arch/x86/kernel/sev.c +++ b/arch/x86/kernel/sev.c @@ -12,7 +12,6 @@ #include <linux/sched/debug.h> /* For show_regs() */ #include <linux/percpu-defs.h> #include <linux/mem_encrypt.h> -#include <linux/lockdep.h> #include <linux/printk.h> #include <linux/mm_types.h> #include <linux/set_memory.h> @@ -192,11 +191,19 @@ void noinstr __sev_es_ist_exit(void) this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist); } -static __always_inline struct ghcb *sev_es_get_ghcb(struct ghcb_state *state) +/* + * Nothing shall interrupt this code path while holding the per-CPU + * GHCB. The backup GHCB is only for NMIs interrupting this path. + * + * Callers must disable local interrupts around it. + */ +static __always_inline struct ghcb *__sev_get_ghcb(struct ghcb_state *state) { struct sev_es_runtime_data *data; struct ghcb *ghcb; + WARN_ON(!irqs_disabled()); + data = this_cpu_read(runtime_data); ghcb = &data->ghcb_page; @@ -486,11 +493,13 @@ static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt /* Include code shared with pre-decompression boot stage */ #include "sev-shared.c" -static __always_inline void sev_es_put_ghcb(struct ghcb_state *state) +static __always_inline void __sev_put_ghcb(struct ghcb_state *state) { struct sev_es_runtime_data *data; struct ghcb *ghcb; + WARN_ON(!irqs_disabled()); + data = this_cpu_read(runtime_data); ghcb = &data->ghcb_page; @@ -514,7 +523,7 @@ void noinstr __sev_es_nmi_complete(void) struct ghcb_state state; struct ghcb *ghcb; - ghcb = sev_es_get_ghcb(&state); + ghcb = __sev_get_ghcb(&state); vc_ghcb_invalidate(ghcb); ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE); @@ -524,7 +533,7 @@ void noinstr __sev_es_nmi_complete(void) sev_es_wr_ghcb_msr(__pa_nodebug(ghcb)); VMGEXIT(); - sev_es_put_ghcb(&state); + __sev_put_ghcb(&state); } static u64 get_jump_table_addr(void) @@ -536,7 +545,7 @@ static u64 get_jump_table_addr(void) local_irq_save(flags); - ghcb = sev_es_get_ghcb(&state); + ghcb = __sev_get_ghcb(&state); vc_ghcb_invalidate(ghcb); ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE); @@ -550,7 +559,7 @@ static u64 get_jump_table_addr(void) ghcb_sw_exit_info_2_is_valid(ghcb)) ret = ghcb->save.sw_exit_info_2; - sev_es_put_ghcb(&state); + __sev_put_ghcb(&state); local_irq_restore(flags); @@ -673,9 +682,12 @@ static bool __init sev_es_setup_ghcb(void) static void sev_es_ap_hlt_loop(void) { struct ghcb_state state; + unsigned long flags; struct ghcb *ghcb; - ghcb = sev_es_get_ghcb(&state); + local_irq_save(flags); + + ghcb = __sev_get_ghcb(&state); while (true) { vc_ghcb_invalidate(ghcb); @@ -692,7 +704,9 @@ static void sev_es_ap_hlt_loop(void) break; } - sev_es_put_ghcb(&state); + __sev_put_ghcb(&state); + + local_irq_restore(flags); } /* @@ -1351,7 +1365,6 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication) } irq_state = irqentry_nmi_enter(regs); - lockdep_assert_irqs_disabled(); instrumentation_begin(); /* @@ -1360,7 +1373,7 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication) * keep the IRQs disabled to protect us against concurrent TLB flushes. */ - ghcb = sev_es_get_ghcb(&state); + ghcb = __sev_get_ghcb(&state); vc_ghcb_invalidate(ghcb); result = vc_init_em_ctxt(&ctxt, regs, error_code); @@ -1368,7 +1381,7 @@ DEFINE_IDTENTRY_VC_SAFE_STACK(exc_vmm_communication) if (result == ES_OK) result = vc_handle_exitcode(&ctxt, ghcb, error_code); - sev_es_put_ghcb(&state); + __sev_put_ghcb(&state); /* Done - now check the result */ switch (result) { -- 2.31.1
Peter Zijlstra
2021-Jun-17 15:00 UTC
[PATCH v6 1/2] x86/sev: Make sure IRQs are disabled while GHCB is active
On Wed, Jun 16, 2021 at 08:49:12PM +0200, Joerg Roedel wrote:> static void sev_es_ap_hlt_loop(void) > { > struct ghcb_state state; > + unsigned long flags; > struct ghcb *ghcb; > > - ghcb = sev_es_get_ghcb(&state); > + local_irq_save(flags); > + > + ghcb = __sev_get_ghcb(&state); > > while (true) { > vc_ghcb_invalidate(ghcb); > @@ -692,7 +704,9 @@ static void sev_es_ap_hlt_loop(void) > break; > } > > - sev_es_put_ghcb(&state); > + __sev_put_ghcb(&state); > + > + local_irq_restore(flags); > }I think this is broken, at this point RCU is quite dead on this CPU and local_irq_save/restore include tracing and all sorts. Also, shouldn't IRQs already be disabled by the time we get here?
Peter Zijlstra
2021-Jun-17 15:38 UTC
[PATCH v6 1/2] x86/sev: Make sure IRQs are disabled while GHCB is active
On Wed, Jun 16, 2021 at 08:49:12PM +0200, Joerg Roedel wrote:> @@ -514,7 +523,7 @@ void noinstr __sev_es_nmi_complete(void) > struct ghcb_state state; > struct ghcb *ghcb; > > - ghcb = sev_es_get_ghcb(&state); > + ghcb = __sev_get_ghcb(&state); > > vc_ghcb_invalidate(ghcb); > ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE); > @@ -524,7 +533,7 @@ void noinstr __sev_es_nmi_complete(void) > sev_es_wr_ghcb_msr(__pa_nodebug(ghcb)); > VMGEXIT(); > > - sev_es_put_ghcb(&state); > + __sev_put_ghcb(&state); > }I'm getting (with all of v6.1 applied): vmlinux.o: warning: objtool: __sev_es_nmi_complete()+0x1bf: call to panic() leaves .noinstr.text section $ ./scripts/faddr2line defconfig-build/vmlinux __sev_es_nmi_complete+0x1bf __sev_es_nmi_complete+0x1bf/0x1d0: __sev_get_ghcb at arch/x86/kernel/sev.c:223 (inlined by) __sev_es_nmi_complete at arch/x86/kernel/sev.c:519