Displaying 20 results from an estimated 58 matches for "this_cpu_writ".
Did you mean:
this_cpu_write
2020 Jul 15
2
[PATCH v4 45/75] x86/sev-es: Adjust #VC IST Stack on entering NMI handler
On Tue, Jul 14, 2020 at 02:08:47PM +0200, Joerg Roedel wrote:
> @@ -489,6 +490,9 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
> this_cpu_write(nmi_cr2, read_cr2());
> nmi_restart:
>
> + /* Needs to happen before DR7 is accessed */
> + sev_es_ist_enter(regs);
> +
> this_cpu_write(nmi_dr7, local_db_save());
>
> nmi_enter();
> @@ -502,6 +506,8 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
>
> local_db_restore...
2020 Jul 15
2
[PATCH v4 45/75] x86/sev-es: Adjust #VC IST Stack on entering NMI handler
On Tue, Jul 14, 2020 at 02:08:47PM +0200, Joerg Roedel wrote:
> @@ -489,6 +490,9 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
> this_cpu_write(nmi_cr2, read_cr2());
> nmi_restart:
>
> + /* Needs to happen before DR7 is accessed */
> + sev_es_ist_enter(regs);
> +
> this_cpu_write(nmi_dr7, local_db_save());
>
> nmi_enter();
> @@ -502,6 +506,8 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
>
> local_db_restore...
2020 Jul 14
0
[PATCH v4 45/75] x86/sev-es: Adjust #VC IST Stack on entering NMI handler
...mi.c
+++ b/arch/x86/kernel/nmi.c
@@ -33,6 +33,7 @@
#include <asm/reboot.h>
#include <asm/cache.h>
#include <asm/nospec-branch.h>
+#include <asm/sev-es.h>
#define CREATE_TRACE_POINTS
#include <trace/events/nmi.h>
@@ -489,6 +490,9 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
this_cpu_write(nmi_cr2, read_cr2());
nmi_restart:
+ /* Needs to happen before DR7 is accessed */
+ sev_es_ist_enter(regs);
+
this_cpu_write(nmi_dr7, local_db_save());
nmi_enter();
@@ -502,6 +506,8 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
local_db_restore(this_cpu_read(nmi_dr7));
+ sev_es_ist_exit();
+
if...
2020 Aug 24
0
[PATCH v6 46/76] x86/sev-es: Adjust #VC IST Stack on entering NMI handler
...mi.c
+++ b/arch/x86/kernel/nmi.c
@@ -33,6 +33,7 @@
#include <asm/reboot.h>
#include <asm/cache.h>
#include <asm/nospec-branch.h>
+#include <asm/sev-es.h>
#define CREATE_TRACE_POINTS
#include <trace/events/nmi.h>
@@ -488,6 +489,9 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
this_cpu_write(nmi_cr2, read_cr2());
nmi_restart:
+ /* Needs to happen before DR7 is accessed */
+ sev_es_ist_enter(regs);
+
this_cpu_write(nmi_dr7, local_db_save());
irq_state = idtentry_enter_nmi(regs);
@@ -501,6 +505,8 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
local_db_restore(this_cpu_read(nmi_dr7));
+...
2015 Mar 18
2
[PATCH 8/9] qspinlock: Generic paravirt support
...d pv_wait_head(struct qspinlock *lock)
> +{
> + struct __qspinlock *l = (void *)lock;
> + int loop;
> +
> + for (;;) {
> + for (loop = SPIN_THRESHOLD; loop; loop--) {
> + if (!READ_ONCE(l->locked))
> + goto done;
> +
> + cpu_relax();
> + }
> +
> + this_cpu_write(__pv_lock_wait, lock);
We may run into the same problem of needing to have 4 queue nodes per
CPU. If an interrupt happens just after the write and before the actual
wait and it goes through the same sequence, it will overwrite the
__pv_lock_wait[] entry. So we may have lost wakeup. That is why...
2015 Mar 18
2
[PATCH 8/9] qspinlock: Generic paravirt support
...d pv_wait_head(struct qspinlock *lock)
> +{
> + struct __qspinlock *l = (void *)lock;
> + int loop;
> +
> + for (;;) {
> + for (loop = SPIN_THRESHOLD; loop; loop--) {
> + if (!READ_ONCE(l->locked))
> + goto done;
> +
> + cpu_relax();
> + }
> +
> + this_cpu_write(__pv_lock_wait, lock);
We may run into the same problem of needing to have 4 queue nodes per
CPU. If an interrupt happens just after the write and before the actual
wait and it goes through the same sequence, it will overwrite the
__pv_lock_wait[] entry. So we may have lost wakeup. That is why...
2015 Mar 19
0
[PATCH 8/9] qspinlock: Generic paravirt support
On Wed, Mar 18, 2015 at 04:50:37PM -0400, Waiman Long wrote:
> >+ this_cpu_write(__pv_lock_wait, lock);
>
> We may run into the same problem of needing to have 4 queue nodes per CPU.
> If an interrupt happens just after the write and before the actual wait and
> it goes through the same sequence, it will overwrite the __pv_lock_wait[]
> entry. So we may have lo...
2015 Mar 19
0
[PATCH 8/9] qspinlock: Generic paravirt support
On Wed, Mar 18, 2015 at 04:50:37PM -0400, Waiman Long wrote:
> >+ this_cpu_write(__pv_lock_wait, lock);
>
> We may run into the same problem of needing to have 4 queue nodes per CPU.
> If an interrupt happens just after the write and before the actual wait and
> it goes through the same sequence, it will overwrite the __pv_lock_wait[]
> entry. So we may have lo...
2020 Jul 15
0
[PATCH v4 45/75] x86/sev-es: Adjust #VC IST Stack on entering NMI handler
...IGN_DOWN(regs->sp, 8) - sizeof(old_ist);
> > + else
> > + new_ist = old_ist - sizeof(old_ist);
> > +
> > + /* Store old IST entry */
> > + p = (unsigned long *)new_ist;
> > + *p = old_ist;
> > +
> > + /* Set new IST entry */
> > + this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist);
> > +}
> > +
> > +void noinstr sev_es_ist_exit(void)
> > +{
> > + unsigned long ist;
> > + unsigned long *p;
> > +
> > + if (!sev_es_active())
> > + return;
> > +
> > + /* Read IST en...
2015 Apr 02
3
[PATCH 8/9] qspinlock: Generic paravirt support
On Thu, Apr 02, 2015 at 12:28:30PM -0400, Waiman Long wrote:
> On 04/01/2015 05:03 PM, Peter Zijlstra wrote:
> >On Wed, Apr 01, 2015 at 03:58:58PM -0400, Waiman Long wrote:
> >>On 04/01/2015 02:48 PM, Peter Zijlstra wrote:
> >>I am sorry that I don't quite get what you mean here. My point is that in
> >>the hashing step, a cpu will need to scan an empty
2015 Apr 02
3
[PATCH 8/9] qspinlock: Generic paravirt support
On Thu, Apr 02, 2015 at 12:28:30PM -0400, Waiman Long wrote:
> On 04/01/2015 05:03 PM, Peter Zijlstra wrote:
> >On Wed, Apr 01, 2015 at 03:58:58PM -0400, Waiman Long wrote:
> >>On 04/01/2015 02:48 PM, Peter Zijlstra wrote:
> >>I am sorry that I don't quite get what you mean here. My point is that in
> >>the hashing step, a cpu will need to scan an empty
2015 Apr 02
0
[PATCH 8/9] qspinlock: Generic paravirt support
...static void pv_wait_head(struct qspinlock *lock)
{
struct __qspinlock *l = (void *)lock;
+ struct pv_hash_bucket *hb = NULL;
int loop;
+ u8 o;
for (;;) {
for (loop = SPIN_THRESHOLD; loop; loop--) {
@@ -126,29 +207,47 @@ static void pv_wait_head(struct qspinloc
cpu_relax();
}
- this_cpu_write(__pv_lock_wait, lock);
- /*
- * __pv_lock_wait must be set before setting _Q_SLOW_VAL
- *
- * [S] __pv_lock_wait = lock [RmW] l = l->locked = 0
- * MB MB
- * [S] l->locked = _Q_SLOW_VAL [L] __pv_lock_wait
- *
- * Matches the xchg() in pv_q...
2020 Feb 11
0
[PATCH 62/62] x86/sev-es: Add NMI state tracking
...access is set up */
static unsigned long early_dr7 = DR7_RESET_VALUE;
@@ -144,6 +145,28 @@ static phys_addr_t es_slow_virt_to_phys(struct ghcb *ghcb, long vaddr)
/* Include code shared with pre-decompression boot stage */
#include "sev-es-shared.c"
+void sev_es_nmi_enter(void)
+{
+ this_cpu_write(sev_es_in_nmi, true);
+}
+
+void sev_es_nmi_complete(void)
+{
+ struct ghcb *ghcb;
+
+ ghcb = this_cpu_ptr(&ghcb_page);
+
+ ghcb_invalidate(ghcb);
+ ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE);
+ ghcb_set_sw_exit_info_1(ghcb, 0);
+ ghcb_set_sw_exit_info_2(ghcb, 0);
+
+ write_ghcb_msr...
2015 Mar 16
0
[PATCH 8/9] qspinlock: Generic paravirt support
...spin.
+ * __pv_queue_spin_unlock() will wake us.
+ */
+static void pv_wait_head(struct qspinlock *lock)
+{
+ struct __qspinlock *l = (void *)lock;
+ int loop;
+
+ for (;;) {
+ for (loop = SPIN_THRESHOLD; loop; loop--) {
+ if (!READ_ONCE(l->locked))
+ goto done;
+
+ cpu_relax();
+ }
+
+ this_cpu_write(__pv_lock_wait, lock);
+ /*
+ * __pv_lock_wait must be set before setting _Q_SLOW_VAL
+ *
+ * [S] __pv_lock_wait = lock [RmW] l = l->locked = 0
+ * MB MB
+ * [S] l->locked = _Q_SLOW_VAL [L] __pv_lock_wait
+ *
+ * Matches the xchg() in pv_q...
2015 Mar 16
0
[PATCH 8/9] qspinlock: Generic paravirt support
...spin.
+ * __pv_queue_spin_unlock() will wake us.
+ */
+static void pv_wait_head(struct qspinlock *lock)
+{
+ struct __qspinlock *l = (void *)lock;
+ int loop;
+
+ for (;;) {
+ for (loop = SPIN_THRESHOLD; loop; loop--) {
+ if (!READ_ONCE(l->locked))
+ goto done;
+
+ cpu_relax();
+ }
+
+ this_cpu_write(__pv_lock_wait, lock);
+ /*
+ * __pv_lock_wait must be set before setting _Q_SLOW_VAL
+ *
+ * [S] __pv_lock_wait = lock [RmW] l = l->locked = 0
+ * MB MB
+ * [S] l->locked = _Q_SLOW_VAL [L] __pv_lock_wait
+ *
+ * Matches the xchg() in pv_q...
2017 Nov 13
0
[PATCH RFC v3 6/6] KVM guest: introduce smart idle poll algorithm
...l_duration = shrink_poll_ns(poll_duration,
+ paravirt_poll_shrink);
+ else if (poll_duration < paravirt_poll_threshold_ns &&
+ idle < paravirt_poll_threshold_ns)
+ poll_duration = grow_poll_ns(poll_duration, paravirt_poll_grow,
+ paravirt_poll_threshold_ns);
+
+ this_cpu_write(poll_duration_ns, poll_duration);
+}
+
static void kvm_idle_poll(void)
{
unsigned long poll_duration = this_cpu_read(poll_duration_ns);
+ ktime_t idle = tick_nohz_get_last_idle_length();
ktime_t start, cur, stop;
+ kvm_idle_update_poll_duration(idle);
+
start = cur = ktime_get();
stop...
2017 Nov 13
0
[PATCH RFC v3 6/6] KVM guest: introduce smart idle poll algorithm
...l_duration = shrink_poll_ns(poll_duration,
+ paravirt_poll_shrink);
+ else if (poll_duration < paravirt_poll_threshold_ns &&
+ idle < paravirt_poll_threshold_ns)
+ poll_duration = grow_poll_ns(poll_duration, paravirt_poll_grow,
+ paravirt_poll_threshold_ns);
+
+ this_cpu_write(poll_duration_ns, poll_duration);
+}
+
static void kvm_idle_poll(void)
{
unsigned long poll_duration = this_cpu_read(poll_duration_ns);
+ ktime_t idle = tick_nohz_get_last_idle_length();
ktime_t start, cur, stop;
+ kvm_idle_update_poll_duration(idle);
+
start = cur = ktime_get();
stop...
2012 Jun 01
0
[PATCH 06/27] xen, smpboot: Use generic SMP booting infrastructure
...pu_init();
touch_softlockup_watchdog();
- preempt_disable();
xen_enable_sysenter();
xen_enable_syscall();
@@ -75,25 +74,11 @@ static void __cpuinit cpu_bringup(void)
set_cpu_sibling_map(cpu);
xen_setup_cpu_clockevents();
-
- notify_cpu_starting(cpu);
-
- set_cpu_online(cpu, true);
-
- this_cpu_write(cpu_state, CPU_ONLINE);
-
- wmb();
-
- /* We can take interrupts now: we're officially "up". */
- local_irq_enable();
-
- wmb(); /* make sure everything is out */
}
static void __cpuinit cpu_bringup_and_idle(void)
{
- cpu_bringup();
- cpu_idle();
+ smpboot_start_secondary(NULL...
2012 Jun 01
0
[PATCH 06/27] xen, smpboot: Use generic SMP booting infrastructure
...pu_init();
touch_softlockup_watchdog();
- preempt_disable();
xen_enable_sysenter();
xen_enable_syscall();
@@ -75,25 +74,11 @@ static void __cpuinit cpu_bringup(void)
set_cpu_sibling_map(cpu);
xen_setup_cpu_clockevents();
-
- notify_cpu_starting(cpu);
-
- set_cpu_online(cpu, true);
-
- this_cpu_write(cpu_state, CPU_ONLINE);
-
- wmb();
-
- /* We can take interrupts now: we're officially "up". */
- local_irq_enable();
-
- wmb(); /* make sure everything is out */
}
static void __cpuinit cpu_bringup_and_idle(void)
{
- cpu_bringup();
- cpu_idle();
+ smpboot_start_secondary(NULL...
2015 Mar 19
4
[PATCH 8/9] qspinlock: Generic paravirt support
...-116,6 +232,7 @@ static DEFINE_PER_CPU(struct qspinlock *
static void pv_wait_head(struct qspinlock *lock)
{
struct __qspinlock *l = (void *)lock;
+ struct qspinlock **lp = NULL;
int loop;
for (;;) {
@@ -126,13 +243,13 @@ static void pv_wait_head(struct qspinloc
cpu_relax();
}
- this_cpu_write(__pv_lock_wait, lock);
+ lp = pv_hash(lock);
/*
- * __pv_lock_wait must be set before setting _Q_SLOW_VAL
+ * lp must be set before setting _Q_SLOW_VAL
*
- * [S] __pv_lock_wait = lock [RmW] l = l->locked = 0
+ * [S] lp = lock [RmW] l = l->locked = 0
*...