Displaying 20 results from an estimated 25 matches for "cpu_tss_rw".
2020 Jul 15
2
[PATCH v4 45/75] x86/sev-es: Adjust #VC IST Stack on entering NMI handler
...e patch touching
nmi_enter/exit()?
> + */
> +void noinstr sev_es_ist_enter(struct pt_regs *regs)
> +{
> + unsigned long old_ist, new_ist;
> + unsigned long *p;
> +
> + if (!sev_es_active())
> + return;
> +
> + /* Read old IST entry */
> + old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
> +
> + /* Make room on the IST stack */
> + if (on_vc_stack(regs->sp))
> + new_ist = ALIGN_DOWN(regs->sp, 8) - sizeof(old_ist);
> + else
> + new_ist = old_ist - sizeof(old_ist);
> +
> + /* Store old IST entry */
> + p = (unsigned...
2020 Jul 15
2
[PATCH v4 45/75] x86/sev-es: Adjust #VC IST Stack on entering NMI handler
...e patch touching
nmi_enter/exit()?
> + */
> +void noinstr sev_es_ist_enter(struct pt_regs *regs)
> +{
> + unsigned long old_ist, new_ist;
> + unsigned long *p;
> +
> + if (!sev_es_active())
> + return;
> +
> + /* Read old IST entry */
> + old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
> +
> + /* Make room on the IST stack */
> + if (on_vc_stack(regs->sp))
> + new_ist = ALIGN_DOWN(regs->sp, 8) - sizeof(old_ist);
> + else
> + new_ist = old_ist - sizeof(old_ist);
> +
> + /* Store old IST entry */
> + p = (unsigned...
2020 Jul 15
0
[PATCH v4 45/75] x86/sev-es: Adjust #VC IST Stack on entering NMI handler
...> +void noinstr sev_es_ist_enter(struct pt_regs *regs)
> > +{
> > + unsigned long old_ist, new_ist;
> > + unsigned long *p;
> > +
> > + if (!sev_es_active())
> > + return;
> > +
> > + /* Read old IST entry */
> > + old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
> > +
> > + /* Make room on the IST stack */
> > + if (on_vc_stack(regs->sp))
> > + new_ist = ALIGN_DOWN(regs->sp, 8) - sizeof(old_ist);
> > + else
> > + new_ist = old_ist - sizeof(old_ist);
> > +
> > + /* Store old...
2020 Jul 14
0
[PATCH v4 45/75] x86/sev-es: Adjust #VC IST Stack on entering NMI handler
...nested nmi_exit() call (#VC->NMI->#DB) may back-adjust the IST entry
+ * too early.
+ */
+void noinstr sev_es_ist_enter(struct pt_regs *regs)
+{
+ unsigned long old_ist, new_ist;
+ unsigned long *p;
+
+ if (!sev_es_active())
+ return;
+
+ /* Read old IST entry */
+ old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
+
+ /* Make room on the IST stack */
+ if (on_vc_stack(regs->sp))
+ new_ist = ALIGN_DOWN(regs->sp, 8) - sizeof(old_ist);
+ else
+ new_ist = old_ist - sizeof(old_ist);
+
+ /* Store old IST entry */
+ p = (unsigned long *)new_ist;
+ *p = old_ist;
+
+ /*...
2020 Aug 24
0
[PATCH v6 46/76] x86/sev-es: Adjust #VC IST Stack on entering NMI handler
...ly back-adjusted in sev_es_ist_exit(). Otherwise a nested
+ * sev_es_ist_exit() call may back-adjust the IST entry too early.
+ */
+void noinstr __sev_es_ist_enter(struct pt_regs *regs)
+{
+ unsigned long old_ist, new_ist;
+ unsigned long *p;
+
+ /* Read old IST entry */
+ old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
+
+ /* Make room on the IST stack */
+ if (on_vc_stack(regs->sp))
+ new_ist = ALIGN_DOWN(regs->sp, 8) - sizeof(old_ist);
+ else
+ new_ist = old_ist - sizeof(old_ist);
+
+ /* Store old IST entry */
+ p = (unsigned long *)new_ist;
+ *p = old_ist;
+
+ /*...
2020 Feb 18
2
[PATCH] x86/ioperm: add new paravirt function update_io_bitmap
...-374,7 +374,7 @@ static void tss_copy_io_bitmap(struct tss_struct *tss, struct io_bitmap *iobm)
/**
* tss_update_io_bitmap - Update I/O bitmap before exiting to usermode
*/
-void tss_update_io_bitmap(void)
+void native_tss_update_io_bitmap(void)
{
struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
struct thread_struct *t = ¤t->thread;
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 1f756ffffe8b..feaf2e68ee5c 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -72,6 +72,9 @@
#include <asm/mwait.h>
#include <asm...
2020 Feb 18
2
[PATCH] x86/ioperm: add new paravirt function update_io_bitmap
...-374,7 +374,7 @@ static void tss_copy_io_bitmap(struct tss_struct *tss, struct io_bitmap *iobm)
/**
* tss_update_io_bitmap - Update I/O bitmap before exiting to usermode
*/
-void tss_update_io_bitmap(void)
+void native_tss_update_io_bitmap(void)
{
struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
struct thread_struct *t = ¤t->thread;
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 1f756ffffe8b..feaf2e68ee5c 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -72,6 +72,9 @@
#include <asm/mwait.h>
#include <asm...
2020 Sep 08
1
[PATCH v7 67/72] x86/smpboot: Load TSS and getcpu GDT entry before loading IDT
...tss_struct *tss)
> #endif
> }
>
> +/*
> + * Setup everything needed to handle exceptions from the IDT, including the IST
> + * exceptions which use paranoid_entry()
> + */
> +void cpu_init_exception_handling(void)
> +{
> + struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
> + int cpu = raw_smp_processor_id();
> +
> + /* paranoid_entry() gets the CPU number from the GDT */
> + setup_getcpu(cpu);
> +
> + /* IST vectors need TSS to be set up. */
> + tss_setup_ist(tss);
> + tss_setup_io_bitmap(tss);
> + set_tss_desc(cpu, &get_cpu_entry_a...
2020 Aug 24
0
[PATCH v6 70/76] x86/smpboot: Setup TSS for starting AP
...) { }
+static inline void tss_setup_ist(struct tss_struct *tss, struct cpu_entry_area *cea) { }
#endif /* !CONFIG_X86_64 */
@@ -1854,6 +1853,17 @@ static inline void tss_setup_io_bitmap(struct tss_struct *tss)
#endif
}
+void tss_setup(int cpu)
+{
+ struct tss_struct *tss = per_cpu_ptr(&cpu_tss_rw, cpu);
+ struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
+
+ /* Initialize the TSS for cpu. */
+ tss_setup_ist(tss, cea);
+ tss_setup_io_bitmap(tss);
+ set_tss_desc(cpu, &cea->tss.x86_tss);
+}
+
/*
* cpu_init() initializes state that is per-CPU. Some data is already
* initialized...
2020 Sep 07
0
[PATCH v7 67/72] x86/smpboot: Load TSS and getcpu GDT entry before loading IDT
...@@ static inline void tss_setup_io_bitmap(struct tss_struct *tss)
#endif
}
+/*
+ * Setup everything needed to handle exceptions from the IDT, including the IST
+ * exceptions which use paranoid_entry()
+ */
+void cpu_init_exception_handling(void)
+{
+ struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
+ int cpu = raw_smp_processor_id();
+
+ /* paranoid_entry() gets the CPU number from the GDT */
+ setup_getcpu(cpu);
+
+ /* IST vectors need TSS to be set up. */
+ tss_setup_ist(tss);
+ tss_setup_io_bitmap(tss);
+ set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
+
+ load_TR_desc()...
2020 Apr 28
0
[PATCH v3 70/75] x86/head/64: Setup TSS early for secondary CPUs
...--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -523,6 +523,19 @@ void __head early_idt_setup_early_handler(unsigned long physaddr)
}
}
+void __head early_load_tss(void)
+{
+ struct desc_struct *gdt = (struct desc_struct *)boot_gdt;
+ struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
+ tss_desc tss_desc;
+
+ set_tssldt_descriptor(&tss_desc, (unsigned long)tss, DESC_TSS,
+ __KERNEL_TSS_LIMIT);
+ native_write_gdt_entry(gdt, GDT_ENTRY_TSS, &tss_desc, DESC_TSS);
+
+ asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
+}
+
void __head early_idt_set...
2020 Apr 28
0
[PATCH v3 45/75] x86/dumpstack/64: Handle #VC exception stacks
...S; i++) {
+ snprintf(vc_stack_names[i], sizeof(vc_stack_names[i]),
+ "#VC%d", i);
+ }
+}
+
static void __init sev_es_setup_vc_stack(int cpu)
{
struct vmm_exception_stacks *stack;
@@ -272,6 +286,8 @@ static void __init sev_es_setup_vc_stack(int cpu)
tss = per_cpu_ptr(&cpu_tss_rw, cpu);
tss->x86_tss.ist[IST_INDEX_VC] = (unsigned long)first_stack + size;
+
+ per_cpu(cea_vmm_exception_stacks, cpu) = &cea->vc_stacks;
}
void __init sev_es_init_vc_handling(void)
@@ -290,6 +306,16 @@ void __init sev_es_init_vc_handling(void)
sev_es_init_ghcb(cpu);
sev_es_s...
2020 Apr 28
0
[PATCH v3 44/75] x86/sev-es: Allocate and Map IST stacks for #VC handler
...* cpu_init() had a chance to setup the rest of the TSS. So setup the
+ * #VC handlers stack pointer up here for all CPUs
+ */
+ first_stack = cea->vc_stacks.stacks[N_VC_STACKS - 1].stack;
+ size = sizeof(cea->vc_stacks.stacks[N_VC_STACKS - 1].stack);
+ tss = per_cpu_ptr(&cpu_tss_rw, cpu);
+
+ tss->x86_tss.ist[IST_INDEX_VC] = (unsigned long)first_stack + size;
+}
+
void __init sev_es_init_vc_handling(void)
{
int cpu;
BUILD_BUG_ON((offsetof(struct sev_es_runtime_data, ghcb_page) % PAGE_SIZE) != 0);
+ BUILD_BUG_ON((offsetof(struct sev_es_runtime_data, vc_stacks) % PAG...
2020 Aug 24
0
[PATCH v6 48/76] x86/entry/64: Add entry code for #VC handler
...1,8 @@ SYM_CODE_START(entry_SYSCALL_64)
SWITCH_TO_KERNEL_CR3 scratch_reg=%rsp
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
+SYM_INNER_LABEL(entry_SYSCALL_64_safe_stack, SYM_L_GLOBAL)
+
/* Construct struct pt_regs on stack */
pushq $__USER_DS /* pt_regs->ss */
pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2) /* pt_regs->sp */
@@ -446,6 +448,82 @@ _ASM_NOKPROBE(\asmsym)
SYM_CODE_END(\asmsym)
.endm
+/**
+ * idtentry_vc - Macro to generate entry stub for #VC
+ * @vector: Vector number
+ * @asmsym: ASM symbol for the entry point
+ * @cfunc: C function to be called
+ *
+ * The macro emi...
2020 Jul 14
92
[PATCH v4 00/75] x86: SEV-ES Guest Support
From: Joerg Roedel <jroedel at suse.de>
Hi,
here is the fourth version of the SEV-ES Guest Support patches. I
addressed the review comments sent to me for the previous version and
rebased the code v5.8-rc5.
The biggest change in this version is the IST handling code for the
#VC handler. I adapted the entry code for the #VC handler to the big
pile of entry code changes merged into
2020 Jul 14
92
[PATCH v4 00/75] x86: SEV-ES Guest Support
From: Joerg Roedel <jroedel at suse.de>
Hi,
here is the fourth version of the SEV-ES Guest Support patches. I
addressed the review comments sent to me for the previous version and
rebased the code v5.8-rc5.
The biggest change in this version is the IST handling code for the
#VC handler. I adapted the entry code for the #VC handler to the big
pile of entry code changes merged into
2020 Aug 24
96
[PATCH v6 00/76] x86: SEV-ES Guest Support
From: Joerg Roedel <jroedel at suse.de>
Hi,
here is the new version of the SEV-ES client enabling patch-set. It is
based on the latest tip/master branch and contains the necessary
changes. In particular those ar:
- Enabling CR4.FSGSBASE early on supported processors so that
early #VC exceptions on APs can be handled.
- Add another patch (patch 1) to fix a KVM frame-size build
2020 Jul 24
86
[PATCH v5 00/75] x86: SEV-ES Guest Support
From: Joerg Roedel <jroedel at suse.de>
Hi,
here is a rebased version of the latest SEV-ES patches. They are now
based on latest tip/master instead of upstream Linux and include the
necessary changes.
Changes to v4 are in particular:
- Moved early IDT setup code to idt.c, because the idt_descr
and the idt_table are now static
- This required to make stack protector work early (or
2020 Sep 07
84
[PATCH v7 00/72] x86: SEV-ES Guest Support
From: Joerg Roedel <jroedel at suse.de>
Hi,
here is a new version of the SEV-ES Guest Support patches for x86. The
previous versions can be found as a linked list starting here:
https://lore.kernel.org/lkml/20200824085511.7553-1-joro at 8bytes.org/
I updated the patch-set based on ther review comments I got and the
discussions around it.
Another important change is that the early IDT
2020 Sep 07
84
[PATCH v7 00/72] x86: SEV-ES Guest Support
From: Joerg Roedel <jroedel at suse.de>
Hi,
here is a new version of the SEV-ES Guest Support patches for x86. The
previous versions can be found as a linked list starting here:
https://lore.kernel.org/lkml/20200824085511.7553-1-joro at 8bytes.org/
I updated the patch-set based on ther review comments I got and the
discussions around it.
Another important change is that the early IDT