Displaying 20 results from an estimated 68 matches for "this_cpu_read".
Did you mean:
__this_cpu_read
2020 Jul 14
0
[PATCH v4 45/75] x86/sev-es: Adjust #VC IST Stack on entering NMI handler
...+490,9 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
this_cpu_write(nmi_cr2, read_cr2());
nmi_restart:
+ /* Needs to happen before DR7 is accessed */
+ sev_es_ist_enter(regs);
+
this_cpu_write(nmi_dr7, local_db_save());
nmi_enter();
@@ -502,6 +506,8 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
local_db_restore(this_cpu_read(nmi_dr7));
+ sev_es_ist_exit();
+
if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
write_cr2(this_cpu_read(nmi_cr2));
if (this_cpu_dec_return(nmi_state))
diff --git a/arch/x86/kernel/sev-es.c b/arch/x86/kernel/sev-es.c
index d415368f16ec..2a7cc72db1d5 100644
--- a/arch/x86/kernel/sev-es...
2020 Aug 24
0
[PATCH v6 46/76] x86/sev-es: Adjust #VC IST Stack on entering NMI handler
..._RAW(exc_nmi)
this_cpu_write(nmi_cr2, read_cr2());
nmi_restart:
+ /* Needs to happen before DR7 is accessed */
+ sev_es_ist_enter(regs);
+
this_cpu_write(nmi_dr7, local_db_save());
irq_state = idtentry_enter_nmi(regs);
@@ -501,6 +505,8 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
local_db_restore(this_cpu_read(nmi_dr7));
+ sev_es_ist_exit();
+
if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
write_cr2(this_cpu_read(nmi_cr2));
if (this_cpu_dec_return(nmi_state))
diff --git a/arch/x86/kernel/sev-es.c b/arch/x86/kernel/sev-es.c
index 64002d86a237..95831d103418 100644
--- a/arch/x86/kernel/sev-es...
2020 Jul 15
2
[PATCH v4 45/75] x86/sev-es: Adjust #VC IST Stack on entering NMI handler
...(nmi_cr2, read_cr2());
> nmi_restart:
>
> + /* Needs to happen before DR7 is accessed */
> + sev_es_ist_enter(regs);
> +
> this_cpu_write(nmi_dr7, local_db_save());
>
> nmi_enter();
> @@ -502,6 +506,8 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
>
> local_db_restore(this_cpu_read(nmi_dr7));
>
> + sev_es_ist_exit();
> +
> if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
> write_cr2(this_cpu_read(nmi_cr2));
> if (this_cpu_dec_return(nmi_state))
I really hate all this #VC stuff :-(
So the above will make the NMI do 4 unconditional extra CALL+RET...
2020 Jul 15
2
[PATCH v4 45/75] x86/sev-es: Adjust #VC IST Stack on entering NMI handler
...(nmi_cr2, read_cr2());
> nmi_restart:
>
> + /* Needs to happen before DR7 is accessed */
> + sev_es_ist_enter(regs);
> +
> this_cpu_write(nmi_dr7, local_db_save());
>
> nmi_enter();
> @@ -502,6 +506,8 @@ DEFINE_IDTENTRY_RAW(exc_nmi)
>
> local_db_restore(this_cpu_read(nmi_dr7));
>
> + sev_es_ist_exit();
> +
> if (unlikely(this_cpu_read(nmi_cr2) != read_cr2()))
> write_cr2(this_cpu_read(nmi_cr2));
> if (this_cpu_dec_return(nmi_state))
I really hate all this #VC stuff :-(
So the above will make the NMI do 4 unconditional extra CALL+RET...
2020 Aug 11
3
[PATCH] x86/paravirt: Add missing noinstr to arch_local*() helpers
On Tue, Aug 11, 2020 at 11:20:54AM +0200, peterz at infradead.org wrote:
> On Tue, Aug 11, 2020 at 10:38:50AM +0200, J?rgen Gro? wrote:
> > In case you don't want to do it I can send the patch for the Xen
> > variants.
>
> I might've opened a whole new can of worms here. I'm not sure we
> can/want to fix the entire fallout this release :/
>
> Let me
2020 Aug 11
3
[PATCH] x86/paravirt: Add missing noinstr to arch_local*() helpers
On Tue, Aug 11, 2020 at 11:20:54AM +0200, peterz at infradead.org wrote:
> On Tue, Aug 11, 2020 at 10:38:50AM +0200, J?rgen Gro? wrote:
> > In case you don't want to do it I can send the patch for the Xen
> > variants.
>
> I might've opened a whole new can of worms here. I'm not sure we
> can/want to fix the entire fallout this release :/
>
> Let me
2017 Nov 13
0
[PATCH RFC v3 6/6] KVM guest: introduce smart idle poll algorithm
...old * grow;
+ if (val > max)
+ val = max;
+
+ return val;
+}
+
+static unsigned int shrink_poll_ns(unsigned int old, unsigned int shrink)
+{
+ if (shrink == 0)
+ return 0;
+
+ return old / shrink;
+}
+
+static void kvm_idle_update_poll_duration(ktime_t idle)
+{
+ unsigned long poll_duration = this_cpu_read(poll_duration_ns);
+
+ /* so far poll duration is based on nohz */
+ if (idle == -1ULL)
+ return;
+
+ if (poll_duration && idle > paravirt_poll_threshold_ns)
+ poll_duration = shrink_poll_ns(poll_duration,
+ paravirt_poll_shrink);
+ else if (poll_duration < paravirt_poll_...
2017 Nov 13
0
[PATCH RFC v3 6/6] KVM guest: introduce smart idle poll algorithm
...old * grow;
+ if (val > max)
+ val = max;
+
+ return val;
+}
+
+static unsigned int shrink_poll_ns(unsigned int old, unsigned int shrink)
+{
+ if (shrink == 0)
+ return 0;
+
+ return old / shrink;
+}
+
+static void kvm_idle_update_poll_duration(ktime_t idle)
+{
+ unsigned long poll_duration = this_cpu_read(poll_duration_ns);
+
+ /* so far poll duration is based on nohz */
+ if (idle == -1ULL)
+ return;
+
+ if (poll_duration && idle > paravirt_poll_threshold_ns)
+ poll_duration = shrink_poll_ns(poll_duration,
+ paravirt_poll_shrink);
+ else if (poll_duration < paravirt_poll_...
2020 Apr 28
0
[PATCH v3 47/75] x86/sev-es: Add Runtime #VC Exception Handler
...it(). This is needed when an NMI hits in the #VC handlers
@@ -70,6 +87,53 @@ void sev_es_nmi_exit(void)
tss->x86_tss.ist[IST_INDEX_VC] += VC_STACK_OFFSET;
}
+static struct ghcb *sev_es_get_ghcb(struct ghcb_state *state)
+{
+ struct sev_es_runtime_data *data;
+ struct ghcb *ghcb;
+
+ data = this_cpu_read(runtime_data);
+ ghcb = &data->ghcb_page;
+
+ if (unlikely(data->ghcb_active)) {
+ /* GHCB is already in use - save its contents */
+
+ if (unlikely(data->backup_ghcb_active))
+ return NULL;
+
+ /* Mark backup_ghcb active before writing to it */
+ data->backup_ghcb_active = tr...
2020 Apr 28
0
[PATCH v3 54/75] x86/sev-es: Handle DR7 read/write events
...cb, struct es_em_ctxt *ctxt,
unsigned int bytes, bool read)
{
@@ -720,6 +744,61 @@ static enum es_result vc_handle_mmio(struct ghcb *ghcb,
return ret;
}
+static enum es_result vc_handle_dr7_write(struct ghcb *ghcb,
+ struct es_em_ctxt *ctxt)
+{
+ struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
+ long val, *reg = vc_insn_get_rm(ctxt);
+ enum es_result ret;
+
+ if (!reg)
+ return ES_DECODE_FAILED;
+
+ val = *reg;
+
+ /* Upper 32 bits must be written as zeroes */
+ if (val >> 32) {
+ ctxt->fi.vector = X86_TRAP_GP;
+ ctxt->fi.error_code = 0;
+ return ES_EXCEPTI...
2017 Aug 09
1
[PATCH v13 1/5] Introduce xbitmap
...DA_BITMAP_BITS))
> +#define XB_MAX_PATH (DIV_ROUND_UP(XB_INDEX_BITS, \
> + RADIX_TREE_MAP_SHIFT))
> +#define XB_PRELOAD_SIZE (XB_MAX_PATH * 2 - 1)
> +
>
> ...
>
> +void xb_preload(gfp_t gfp)
> +{
> + __radix_tree_preload(gfp, XB_PRELOAD_SIZE);
> + if (!this_cpu_read(ida_bitmap)) {
> + struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp);
> +
> + if (!bitmap)
> + return;
> + bitmap = this_cpu_cmpxchg(ida_bitmap, NULL, bitmap);
> + kfree(bitmap);
> + }
> +}
> +EXPORT_SYMBOL(xb_preload);
Please document the exported API. It...
2017 Aug 09
1
[PATCH v13 1/5] Introduce xbitmap
...DA_BITMAP_BITS))
> +#define XB_MAX_PATH (DIV_ROUND_UP(XB_INDEX_BITS, \
> + RADIX_TREE_MAP_SHIFT))
> +#define XB_PRELOAD_SIZE (XB_MAX_PATH * 2 - 1)
> +
>
> ...
>
> +void xb_preload(gfp_t gfp)
> +{
> + __radix_tree_preload(gfp, XB_PRELOAD_SIZE);
> + if (!this_cpu_read(ida_bitmap)) {
> + struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp);
> +
> + if (!bitmap)
> + return;
> + bitmap = this_cpu_cmpxchg(ida_bitmap, NULL, bitmap);
> + kfree(bitmap);
> + }
> +}
> +EXPORT_SYMBOL(xb_preload);
Please document the exported API. It...
2019 Jul 22
2
[PATCH v3 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...+ enum tlb_flush_reason reason = TLB_REMOTE_SHOOTDOWN;
+ bool local = false;
+
+ if (f->cpu == smp_processor_id()) {
+ local = true;
+ reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN : TLB_LOCAL_MM_SHOOTDOWN;
+ } else {
+ inc_irq_stat(irq_tlb_count);
+
+ if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.loaded_mm))
+ return;
+
+ count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
+ }
+
+ flush_tlb_func_common(f, local, reason);
+}
+
static bool tlb_is_not_lazy(int cpu)
{
return !per_cpu(cpu_tlbstate_shared.is_lazy, cpu);
2019 Jul 22
2
[PATCH v3 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...+ enum tlb_flush_reason reason = TLB_REMOTE_SHOOTDOWN;
+ bool local = false;
+
+ if (f->cpu == smp_processor_id()) {
+ local = true;
+ reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN : TLB_LOCAL_MM_SHOOTDOWN;
+ } else {
+ inc_irq_stat(irq_tlb_count);
+
+ if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.loaded_mm))
+ return;
+
+ count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
+ }
+
+ flush_tlb_func_common(f, local, reason);
+}
+
static bool tlb_is_not_lazy(int cpu)
{
return !per_cpu(cpu_tlbstate_shared.is_lazy, cpu);
2017 Oct 09
4
[PATCH v16 3/5] virtio-balloon: VIRTIO_BALLOON_F_SG
On Sat, Sep 30, 2017 at 12:05:52PM +0800, Wei Wang wrote:
> +static inline void xb_set_page(struct virtio_balloon *vb,
> + struct page *page,
> + unsigned long *pfn_min,
> + unsigned long *pfn_max)
> +{
> + unsigned long pfn = page_to_pfn(page);
> +
> + *pfn_min = min(pfn, *pfn_min);
> + *pfn_max = max(pfn, *pfn_max);
> +
2017 Oct 09
4
[PATCH v16 3/5] virtio-balloon: VIRTIO_BALLOON_F_SG
On Sat, Sep 30, 2017 at 12:05:52PM +0800, Wei Wang wrote:
> +static inline void xb_set_page(struct virtio_balloon *vb,
> + struct page *page,
> + unsigned long *pfn_min,
> + unsigned long *pfn_max)
> +{
> + unsigned long pfn = page_to_pfn(page);
> +
> + *pfn_min = min(pfn, *pfn_min);
> + *pfn_max = max(pfn, *pfn_max);
> +
2017 Oct 11
0
[PATCH v16 3/5] virtio-balloon: VIRTIO_BALLOON_F_SG
...> Then, kmalloc() is called with preemption disabled, isn't it?
> But xb_set_page() calls xb_preload(GFP_KERNEL) which might sleep with
> preemption disabled.
Yes, I think that should not be expected, thanks.
I plan to change it like this:
bool xb_preload(gfp_t gfp)
{
if (!this_cpu_read(ida_bitmap)) {
struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp);
if (!bitmap)
return false;
bitmap = this_cpu_cmpxchg(ida_bitmap, NULL, bitmap);
kfree(bitmap);
}
if (__radix_tre...
2017 Oct 11
0
[PATCH v16 3/5] virtio-balloon: VIRTIO_BALLOON_F_SG
...;>> But xb_set_page() calls xb_preload(GFP_KERNEL) which might sleep with
>>> preemption disabled.
>> Yes, I think that should not be expected, thanks.
>>
>> I plan to change it like this:
>>
>> bool xb_preload(gfp_t gfp)
>> {
>> if (!this_cpu_read(ida_bitmap)) {
>> struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp);
>>
>> if (!bitmap)
>> return false;
>> bitmap = this_cpu_cmpxchg(ida_bitmap, NULL, bitmap);
>> kfr...
2020 May 25
1
[PATCH v3 54/75] x86/sev-es: Handle DR7 read/write events
On Tue, Apr 28, 2020 at 05:17:04PM +0200, Joerg Roedel wrote:
> +static enum es_result vc_handle_dr7_write(struct ghcb *ghcb,
> + struct es_em_ctxt *ctxt)
> +{
> + struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
> + long val, *reg = vc_insn_get_rm(ctxt);
> + enum es_result ret;
> +
> + if (!reg)
> + return ES_DECODE_FAILED;
> +
> + val = *reg;
> +
> + /* Upper 32 bits must be written as zeroes */
> + if (val >> 32) {
> + ctxt->fi.vector = X86_TRAP_...
2019 Jul 22
0
[PATCH v3 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...HOOTDOWN;
> + bool local = false;
> +
> + if (f->cpu == smp_processor_id()) {
> + local = true;
> + reason = (f->mm == NULL) ? TLB_LOCAL_SHOOTDOWN : TLB_LOCAL_MM_SHOOTDOWN;
> + } else {
> + inc_irq_stat(irq_tlb_count);
> +
> + if (f->mm && f->mm != this_cpu_read(cpu_tlbstate.loaded_mm))
> + return;
> +
> + count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
> + }
> +
> + flush_tlb_func_common(f, local, reason);
> +}
> +
> static bool tlb_is_not_lazy(int cpu)
> {
> return !per_cpu(cpu_tlbstate_shared.is_lazy, cpu);
Nice!...