Displaying 20 results from an estimated 41 matches for "sev_es_active".
2020 Apr 28
0
[PATCH v3 38/75] x86/sev-es: Add SEV-ES Feature Detection
From: Joerg Roedel <jroedel at suse.de>
Add the sev_es_active function for checking whether SEV-ES is enabled.
Also cache the value of MSR_AMD64_SEV at boot to speed up the feature
checking in the running code.
Signed-off-by: Joerg Roedel <jroedel at suse.de>
---
arch/x86/include/asm/mem_encrypt.h | 3 +++
arch/x86/include/asm/msr-index.h | 2 ++...
2020 May 20
1
[PATCH v3 38/75] x86/sev-es: Add SEV-ES Feature Detection
On Tue, Apr 28, 2020 at 05:16:48PM +0200, Joerg Roedel wrote:
> +bool sev_es_active(void)
> +{
> + return !!(sev_status & MSR_AMD64_SEV_ES_ENABLED);
> +}
> +EXPORT_SYMBOL_GPL(sev_es_active);
I don't see this being used in modules anywhere in the patchset. Or am I
missing something?
--
Regards/Gruss,
Boris.
https://people.kernel.org/tglx/notes-about-neti...
2020 Jul 24
0
[PATCH v5 38/75] x86/sev-es: Add SEV-ES Feature Detection
From: Joerg Roedel <jroedel at suse.de>
Add the sev_es_active function for checking whether SEV-ES is enabled.
Also cache the value of MSR_AMD64_SEV at boot to speed up the feature
checking in the running code.
Signed-off-by: Joerg Roedel <jroedel at suse.de>
---
arch/x86/include/asm/mem_encrypt.h | 3 +++
arch/x86/include/asm/msr-index.h | 2 ++
ar...
2020 Sep 07
0
[PATCH v7 36/72] x86/sev-es: Add SEV-ES Feature Detection
From: Joerg Roedel <jroedel at suse.de>
Add the sev_es_active function for checking whether SEV-ES is enabled.
Also cache the value of MSR_AMD64_SEV at boot to speed up the feature
checking in the running code.
Signed-off-by: Joerg Roedel <jroedel at suse.de>
Reviewed-by: Kees Cook <keescook at chromium.org>
---
arch/x86/include/asm/mem_encrypt....
2020 Aug 24
0
[PATCH v6 39/76] x86/sev-es: Add SEV-ES Feature Detection
From: Joerg Roedel <jroedel at suse.de>
Add the sev_es_active function for checking whether SEV-ES is enabled.
Also cache the value of MSR_AMD64_SEV at boot to speed up the feature
checking in the running code.
Signed-off-by: Joerg Roedel <jroedel at suse.de>
Reviewed-by: Kees Cook <keescook at chromium.org>
Link: https://lore.kernel.org/r/202007...
2020 Feb 11
0
[PATCH 35/62] x86/sev-es: Setup per-cpu GHCBs for the runtime handler
..._encrypt.h
@@ -48,6 +48,7 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
void __init mem_encrypt_init(void);
void __init mem_encrypt_free_decrypted_mem(void);
+void __init encrypted_state_init_ghcbs(void);
bool sme_active(void);
bool sev_active(void);
bool sev_es_active(void);
@@ -71,6 +72,7 @@ static inline void __init sme_early_init(void) { }
static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
static inline void __init sme_enable(struct boot_params *bp) { }
+static inline void encrypted_state_init_ghcbs(void) { }
static inline bool sme_...
2020 Jul 15
2
[PATCH v4 45/75] x86/sev-es: Adjust #VC IST Stack on entering NMI handler
...MI->#DB) may back-adjust the IST entry
> + * too early.
Is this comment accurate, I cannot find the patch touching
nmi_enter/exit()?
> + */
> +void noinstr sev_es_ist_enter(struct pt_regs *regs)
> +{
> + unsigned long old_ist, new_ist;
> + unsigned long *p;
> +
> + if (!sev_es_active())
> + return;
> +
> + /* Read old IST entry */
> + old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
> +
> + /* Make room on the IST stack */
> + if (on_vc_stack(regs->sp))
> + new_ist = ALIGN_DOWN(regs->sp, 8) - sizeof(old_ist);
> + else
> + n...
2020 Jul 15
2
[PATCH v4 45/75] x86/sev-es: Adjust #VC IST Stack on entering NMI handler
...MI->#DB) may back-adjust the IST entry
> + * too early.
Is this comment accurate, I cannot find the patch touching
nmi_enter/exit()?
> + */
> +void noinstr sev_es_ist_enter(struct pt_regs *regs)
> +{
> + unsigned long old_ist, new_ist;
> + unsigned long *p;
> +
> + if (!sev_es_active())
> + return;
> +
> + /* Read old IST entry */
> + old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
> +
> + /* Make room on the IST stack */
> + if (on_vc_stack(regs->sp))
> + new_ist = ALIGN_DOWN(regs->sp, 8) - sizeof(old_ist);
> + else
> + n...
2020 Sep 07
0
[PATCH v7 41/72] x86/sev-es: Setup per-cpu GHCBs for the runtime handler
...pt.h
+++ b/arch/x86/include/asm/mem_encrypt.h
@@ -49,6 +49,7 @@ void __init mem_encrypt_free_decrypted_mem(void);
/* Architecture __weak replacement functions */
void __init mem_encrypt_init(void);
+void __init sev_es_init_vc_handling(void);
bool sme_active(void);
bool sev_active(void);
bool sev_es_active(void);
@@ -72,6 +73,7 @@ static inline void __init sme_early_init(void) { }
static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
static inline void __init sme_enable(struct boot_params *bp) { }
+static inline void sev_es_init_vc_handling(void) { }
static inline bool sme_act...
2020 Apr 28
0
[PATCH v3 43/75] x86/sev-es: Setup per-cpu GHCBs for the runtime handler
...mem_encrypt.h
@@ -48,6 +48,7 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
void __init mem_encrypt_init(void);
void __init mem_encrypt_free_decrypted_mem(void);
+void __init sev_es_init_vc_handling(void);
bool sme_active(void);
bool sev_active(void);
bool sev_es_active(void);
@@ -71,6 +72,7 @@ static inline void __init sme_early_init(void) { }
static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
static inline void __init sme_enable(struct boot_params *bp) { }
+static inline void sev_es_init_vc_handling(void) { }
static inline bool sme_act...
2020 Jul 15
0
[PATCH v4 45/75] x86/sev-es: Adjust #VC IST Stack on entering NMI handler
...t first, but needed to move it out because the possible DR7
access needs the #VC stack already adjusted.
> > + */
> > +void noinstr sev_es_ist_enter(struct pt_regs *regs)
> > +{
> > + unsigned long old_ist, new_ist;
> > + unsigned long *p;
> > +
> > + if (!sev_es_active())
> > + return;
> > +
> > + /* Read old IST entry */
> > + old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
> > +
> > + /* Make room on the IST stack */
> > + if (on_vc_stack(regs->sp))
> > + new_ist = ALIGN_DOWN(regs->sp, 8)...
2020 Feb 11
1
[PATCH 35/62] x86/sev-es: Setup per-cpu GHCBs for the runtime handler
...__init early_set_memory_encrypted(unsigned long vaddr, unsigned long size);
> void __init mem_encrypt_init(void);
> void __init mem_encrypt_free_decrypted_mem(void);
>
> +void __init encrypted_state_init_ghcbs(void);
> bool sme_active(void);
> bool sev_active(void);
> bool sev_es_active(void);
> @@ -71,6 +72,7 @@ static inline void __init sme_early_init(void) { }
> static inline void __init sme_encrypt_kernel(struct boot_params *bp) { }
> static inline void __init sme_enable(struct boot_params *bp) { }
>
> +static inline void encrypted_state_init_ghcbs(void) { }
&...
2020 May 29
1
[PATCH v3 69/75] x86/realmode: Setup AP jump table
...;
> +#include <asm/sev-es.h>
>
> struct real_mode_header *real_mode_header;
> u32 *trampoline_cr4_features;
> @@ -107,6 +108,11 @@ static void __init setup_real_mode(void)
> if (sme_active())
> trampoline_header->flags |= TH_FLAGS_SME_ACTIVE;
>
> + if (sev_es_active()) {
> + if (sev_es_setup_ap_jump_table(real_mode_header))
> + panic("Failed to update SEV-ES AP Jump Table");
> + }
> +
So this function gets slowly sprinkled with
if (sev-something)
bla
Please wrap at least those last two into a
sev_setup_real_mode()
or so.
--...
2020 Jul 14
0
[PATCH v4 45/75] x86/sev-es: Adjust #VC IST Stack on entering NMI handler
...an be also be
+ * unconditionally back-adjusted in sev_es_nmi_exit(). Otherwise a
+ * nested nmi_exit() call (#VC->NMI->#DB) may back-adjust the IST entry
+ * too early.
+ */
+void noinstr sev_es_ist_enter(struct pt_regs *regs)
+{
+ unsigned long old_ist, new_ist;
+ unsigned long *p;
+
+ if (!sev_es_active())
+ return;
+
+ /* Read old IST entry */
+ old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
+
+ /* Make room on the IST stack */
+ if (on_vc_stack(regs->sp))
+ new_ist = ALIGN_DOWN(regs->sp, 8) - sizeof(old_ist);
+ else
+ new_ist = old_ist - sizeof(old_ist);
+
+ /* Store o...
2020 Jul 14
0
[PATCH v4 70/75] x86/head/64: Don't call verify_cpu() on starting APs
...diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index 61a52b925d15..df701f87ddef 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -46,6 +46,12 @@ static void sme_sev_setup_real_mode(struct trampoline_header *th)
th->flags |= TH_FLAGS_SME_ACTIVE;
if (sev_es_active()) {
+ /*
+ * Skip the call to verify_cpu() in secondary_startup_64 as it
+ * will cause #VC exceptions when the AP can't handle them yet.
+ */
+ th->start = (u64) secondary_startup_64_no_verify;
+
if (sev_es_setup_ap_jump_table(real_mode_header))
panic("Failed to update...
2020 Jul 24
0
[PATCH v5 39/75] x86/sev-es: Print SEV-ES info into kernel log
...ryption */
+ if (sme_active()) {
+ /*
+ * SME is mutually exclusive with any of the SEV
+ * features below.
+ */
+ pr_cont(" SME\n");
+ return;
+ }
+
+ /* Secure Encrypted Virtualization */
+ if (sev_active())
+ pr_cont(" SEV");
+
+ /* Encrypted Register State */
+ if (sev_es_active())
+ pr_cont(" SEV-ES");
+
+ pr_cont("\n");
+}
+
/* Architecture __weak replacement functions */
void __init mem_encrypt_init(void)
{
@@ -422,8 +447,6 @@ void __init mem_encrypt_init(void)
if (sev_active())
static_branch_enable(&sev_enable_key);
- pr_info("AM...
2020 Jul 24
0
[PATCH v5 70/75] x86/head/64: Don't call verify_cpu() on starting APs
...diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
index 61a52b925d15..df701f87ddef 100644
--- a/arch/x86/realmode/init.c
+++ b/arch/x86/realmode/init.c
@@ -46,6 +46,12 @@ static void sme_sev_setup_real_mode(struct trampoline_header *th)
th->flags |= TH_FLAGS_SME_ACTIVE;
if (sev_es_active()) {
+ /*
+ * Skip the call to verify_cpu() in secondary_startup_64 as it
+ * will cause #VC exceptions when the AP can't handle them yet.
+ */
+ th->start = (u64) secondary_startup_64_no_verify;
+
if (sev_es_setup_ap_jump_table(real_mode_header))
panic("Failed to update...
2020 Apr 28
0
[PATCH v3 74/75] x86/sev-es: Handle NMI State
...h/x86/kernel/nmi.c
@@ -511,6 +511,13 @@ NOKPROBE_SYMBOL(is_debug_stack);
dotraplinkage notrace void
do_nmi(struct pt_regs *regs, long error_code)
{
+ /*
+ * Re-enable NMIs right here when running as an SEV-ES guest. This might
+ * cause nested NMIs, but those can be handled safely.
+ */
+ if (sev_es_active())
+ sev_es_nmi_complete();
+
if (IS_ENABLED(CONFIG_SMP) && cpu_is_offline(smp_processor_id()))
return;
diff --git a/arch/x86/kernel/sev-es.c b/arch/x86/kernel/sev-es.c
index 00a5d0483730..eef6e2196ef4 100644
--- a/arch/x86/kernel/sev-es.c
+++ b/arch/x86/kernel/sev-es.c
@@ -341,6 +3...
2020 Apr 14
3
[PATCH 40/70] x86/sev-es: Setup per-cpu GHCBs for the runtime handler
...00644
>> --- a/arch/x86/kernel/sev-es.c
>> +++ b/arch/x86/kernel/sev-es.c
>> @@ -197,6 +203,26 @@ static bool __init sev_es_setup_ghcb(void)
>> return true;
>> }
>>
>> +void sev_es_init_ghcbs(void)
>> +{
>> + int cpu;
>> +
>> + if (!sev_es_active())
>> + return;
>> +
>> + /* Allocate GHCB pages */
>> + ghcb_page = __alloc_percpu(sizeof(struct ghcb), PAGE_SIZE);
>> +
>> + /* Initialize per-cpu GHCB pages */
>> + for_each_possible_cpu(cpu) {
>> + struct ghcb *ghcb = (struct ghcb *)per_cpu_ptr(...
2020 Apr 14
3
[PATCH 40/70] x86/sev-es: Setup per-cpu GHCBs for the runtime handler
...00644
>> --- a/arch/x86/kernel/sev-es.c
>> +++ b/arch/x86/kernel/sev-es.c
>> @@ -197,6 +203,26 @@ static bool __init sev_es_setup_ghcb(void)
>> return true;
>> }
>>
>> +void sev_es_init_ghcbs(void)
>> +{
>> + int cpu;
>> +
>> + if (!sev_es_active())
>> + return;
>> +
>> + /* Allocate GHCB pages */
>> + ghcb_page = __alloc_percpu(sizeof(struct ghcb), PAGE_SIZE);
>> +
>> + /* Initialize per-cpu GHCB pages */
>> + for_each_possible_cpu(cpu) {
>> + struct ghcb *ghcb = (struct ghcb *)per_cpu_ptr(...