search for: cpumask_clear

Displaying 20 results from an estimated 27 matches for "cpumask_clear".

2013 May 06
2
[PATCH v2] xen/gic: EOI irqs on the right pcpu
...r p = irq_to_pending(v, virq); if ( p->desc != NULL ) { p->desc->status &= ~IRQ_INPROGRESS; - GICC[GICC_DIR] = virq; + /* Assume only one pcpu needs to EOI the irq */ + cpu = cpumask_first(&p->eoimask); + cpumask_clear(&p->eoimask); + eoi = 1; } list_del_init(&p->inflight); spin_unlock_irq(&v->arch.vgic.lock); + if ( eoi ) { + /* this is not racy because we can''t receive another irq of the + * same type until we EO...
2015 Jun 04
1
[PATCH] virtio_pci: Clear stale cpumask when setting irq affinity
...rivers/virtio/virtio_pci_common.c index e894eb278d83..eba1b7ac7294 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -423,6 +423,7 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu) if (cpu == -1) irq_set_affinity_hint(irq, NULL); else { + cpumask_clear(mask); cpumask_set_cpu(cpu, mask); irq_set_affinity_hint(irq, mask); } -- 1.7.10.4
2015 Jun 04
1
[PATCH] virtio_pci: Clear stale cpumask when setting irq affinity
...rivers/virtio/virtio_pci_common.c index e894eb278d83..eba1b7ac7294 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -423,6 +423,7 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu) if (cpu == -1) irq_set_affinity_hint(irq, NULL); else { + cpumask_clear(mask); cpumask_set_cpu(cpu, mask); irq_set_affinity_hint(irq, mask); } -- 1.7.10.4
2013 Sep 27
4
[PATCH net-next] virtio-net: switch to use XPS to choose txq
...t_affinity(struct virtnet_info *vi) return; } + if (!alloc_cpumask_var(&cpumask, GFP_KERNEL)) + return; + i = 0; for_each_online_cpu(cpu) { virtqueue_set_affinity(vi->rq[i].vq, cpu); virtqueue_set_affinity(vi->sq[i].vq, cpu); - *per_cpu_ptr(vi->vq_index, cpu) = i; + cpumask_clear(cpumask); + cpumask_set_cpu(cpu, cpumask); + netif_set_xps_queue(vi->dev, cpumask, i); i++; } vi->affinity_hint_set = true; + free_cpumask_var(cpumask); } static int virtnet_cpu_callback(struct notifier_block *nfb, @@ -1217,28 +1210,6 @@ static int virtnet_change_mtu(struct net...
2013 Sep 27
4
[PATCH net-next] virtio-net: switch to use XPS to choose txq
...t_affinity(struct virtnet_info *vi) return; } + if (!alloc_cpumask_var(&cpumask, GFP_KERNEL)) + return; + i = 0; for_each_online_cpu(cpu) { virtqueue_set_affinity(vi->rq[i].vq, cpu); virtqueue_set_affinity(vi->sq[i].vq, cpu); - *per_cpu_ptr(vi->vq_index, cpu) = i; + cpumask_clear(cpumask); + cpumask_set_cpu(cpu, cpumask); + netif_set_xps_queue(vi->dev, cpumask, i); i++; } vi->affinity_hint_set = true; + free_cpumask_var(cpumask); } static int virtnet_cpu_callback(struct notifier_block *nfb, @@ -1217,28 +1210,6 @@ static int virtnet_change_mtu(struct net...
2013 Sep 29
0
[PATCH net-next] virtio-net: switch to use XPS to choose txq
...> > + if (!alloc_cpumask_var(&cpumask, GFP_KERNEL)) > + return; > + > i = 0; > for_each_online_cpu(cpu) { > virtqueue_set_affinity(vi->rq[i].vq, cpu); > virtqueue_set_affinity(vi->sq[i].vq, cpu); > - *per_cpu_ptr(vi->vq_index, cpu) = i; > + cpumask_clear(cpumask); > + cpumask_set_cpu(cpu, cpumask); > + netif_set_xps_queue(vi->dev, cpumask, i); > i++; > } > > vi->affinity_hint_set = true; > + free_cpumask_var(cpumask); > } Um, isn't this just cpumask_of(cpu)? Cheers, Rusty.
2012 Mar 09
10
[PATCH 0 of 9] (v2) arm: SMP boot
This patch series implements SMP boot for arch/arm, as far as getting all CPUs up and running the idle loop. Changes from v1: - moved barriers out of loop in udelay() - dropped broken GIC change in favour of explanatory comment - made the increment of ready_cpus atomic (I couldn''t move the increment to before signalling the next CPU because the PT switch has to happen between
2013 Sep 27
0
[PATCH net-next] virtio-net: switch to use XPS to choose txq
...> > + if (!alloc_cpumask_var(&cpumask, GFP_KERNEL)) > + return; > + > i = 0; > for_each_online_cpu(cpu) { > virtqueue_set_affinity(vi->rq[i].vq, cpu); > virtqueue_set_affinity(vi->sq[i].vq, cpu); > - *per_cpu_ptr(vi->vq_index, cpu) = i; > + cpumask_clear(cpumask); > + cpumask_set_cpu(cpu, cpumask); > + netif_set_xps_queue(vi->dev, cpumask, i); > i++; > } > > vi->affinity_hint_set = true; > + free_cpumask_var(cpumask); > } > > static int virtnet_cpu_callback(struct notifier_block *nfb, > @@ -1217...
2013 Mar 21
27
[PATCH 0/4] xen/arm: guest SMP support
Hi all, this small patch series implement guest SMP support for ARM, using the ARM PSCI interface for secondary cpu bringup. Stefano Stabellini (4): xen/arm: basic PSCI support, implement cpu_on xen/arm: support for guest SGI xen/arm: support vcpu_op hypercalls xen: move VCPUOP_register_vcpu_info to common code xen/arch/arm/domain.c | 66 ++++++++++++++++++++++++
2019 Jun 13
4
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...the condition + * instead of allocating a new one. + * + * This works under the assumption that there are no nested TLB + * flushes, an assumption that is already made in + * flush_tlb_mm_range(). + */ + struct cpumask *cond_cpumask = this_cpu_ptr(&flush_tlb_mask); + int cpu; + + cpumask_clear(cond_cpumask); + + for_each_cpu(cpu, cpumask) { + if (tlb_is_not_lazy(cpu)) + __cpumask_set_cpu(cpu, cond_cpumask); + } + __smp_call_function_many(cond_cpumask, flush_tlb_func_remote, + flush_tlb_func_local, (void *)info, 1); + } +} + +void native_flush_tlb_others(const struct cpumask...
2019 Jun 13
4
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...the condition + * instead of allocating a new one. + * + * This works under the assumption that there are no nested TLB + * flushes, an assumption that is already made in + * flush_tlb_mm_range(). + */ + struct cpumask *cond_cpumask = this_cpu_ptr(&flush_tlb_mask); + int cpu; + + cpumask_clear(cond_cpumask); + + for_each_cpu(cpu, cpumask) { + if (tlb_is_not_lazy(cpu)) + __cpumask_set_cpu(cpu, cond_cpumask); + } + __smp_call_function_many(cond_cpumask, flush_tlb_func_remote, + flush_tlb_func_local, (void *)info, 1); + } +} + +void native_flush_tlb_others(const struct cpumask...
2019 Jun 25
0
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...ruct cpumask *cond_cpumask = this_cpu_ptr(&flush_tlb_mask); This is logically a stack-local variable, right? But, since we've got preempt off and cpumasks can be huge, we don't want to allocate it on the stack. That might be worth a comment somewhere. > + int cpu; > + > + cpumask_clear(cond_cpumask); > + > + for_each_cpu(cpu, cpumask) { > + if (tlb_is_not_lazy(cpu)) > + __cpumask_set_cpu(cpu, cond_cpumask); > + } FWIW, it's probably worth calling out in the changelog that this loop exists in on_each_cpu_cond_mask() too. It looks bad here, but it's...
2019 Jun 26
2
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...t; > This is logically a stack-local variable, right? But, since we've got > preempt off and cpumasks can be huge, we don't want to allocate it on > the stack. That might be worth a comment somewhere. I will add a comment here. > >> + int cpu; >> + >> + cpumask_clear(cond_cpumask); >> + >> + for_each_cpu(cpu, cpumask) { >> + if (tlb_is_not_lazy(cpu)) >> + __cpumask_set_cpu(cpu, cond_cpumask); >> + } > > FWIW, it's probably worth calling out in the changelog that this loop > exists in on_each_cpu_cond_mask() too...
2019 Jun 26
2
[PATCH 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...t; > This is logically a stack-local variable, right? But, since we've got > preempt off and cpumasks can be huge, we don't want to allocate it on > the stack. That might be worth a comment somewhere. I will add a comment here. > >> + int cpu; >> + >> + cpumask_clear(cond_cpumask); >> + >> + for_each_cpu(cpu, cpumask) { >> + if (tlb_is_not_lazy(cpu)) >> + __cpumask_set_cpu(cpu, cond_cpumask); >> + } > > FWIW, it's probably worth calling out in the changelog that this loop > exists in on_each_cpu_cond_mask() too...
2019 Jul 19
0
[PATCH v3 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...pumask)) { lockdep_assert_irqs_enabled(); local_irq_disable(); flush_tlb_func_local((void *)&full_flush_tlb_info); local_irq_enable(); } - if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids) - flush_tlb_others(&batch->cpumask, &full_flush_tlb_info); - cpumask_clear(&batch->cpumask); put_cpu(); diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 26e8b326966d..48f7c7eb4dbc 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -1345,8 +1345,8 @@ static void xen_flush_tlb_one_user(unsigned long addr) preempt_enable(); } -s...
2019 May 31
2
[RFC PATCH v2 04/12] x86/mm/tlb: Flush remote and local TLBs concurrently
...the condition + * instead of allocating a new one. + * + * This works under the assumption that there are no nested TLB + * flushes, an assumption that is already made in + * flush_tlb_mm_range(). + */ + struct cpumask *cond_cpumask = this_cpu_ptr(&flush_tlb_mask); + int cpu; + + cpumask_clear(cond_cpumask); + + for_each_cpu(cpu, cpumask) { + if (tlb_is_not_lazy(cpu)) + __cpumask_set_cpu(cpu, cond_cpumask); + } + __smp_call_function_many(cond_cpumask, flush_tlb_func_remote, + flush_tlb_func_local, (void *)info, 1); + } +} + +void native_flush_tlb_others(const struct cpumask...
2019 May 31
2
[RFC PATCH v2 04/12] x86/mm/tlb: Flush remote and local TLBs concurrently
...the condition + * instead of allocating a new one. + * + * This works under the assumption that there are no nested TLB + * flushes, an assumption that is already made in + * flush_tlb_mm_range(). + */ + struct cpumask *cond_cpumask = this_cpu_ptr(&flush_tlb_mask); + int cpu; + + cpumask_clear(cond_cpumask); + + for_each_cpu(cpu, cpumask) { + if (tlb_is_not_lazy(cpu)) + __cpumask_set_cpu(cpu, cond_cpumask); + } + __smp_call_function_many(cond_cpumask, flush_tlb_func_remote, + flush_tlb_func_local, (void *)info, 1); + } +} + +void native_flush_tlb_others(const struct cpumask...
2019 Jul 02
0
[PATCH v2 4/9] x86/mm/tlb: Flush remote and local TLBs concurrently
...al_irq_disable(); - flush_tlb_func_local((void *)&full_flush_tlb_info); + flush_tlb_func_local(&full_flush_tlb_info); local_irq_enable(); } - if (cpumask_any_but(&batch->cpumask, cpu) < nr_cpu_ids) - flush_tlb_others(&batch->cpumask, &full_flush_tlb_info); - cpumask_clear(&batch->cpumask); put_cpu(); diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index beb44e22afdf..19e481e6e904 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -1355,8 +1355,8 @@ static void xen_flush_tlb_one_user(unsigned long addr) preempt_enable(); } -s...
2019 May 25
3
[RFC PATCH 5/6] x86/mm/tlb: Flush remote and local TLBs concurrently
...the condition + * instead of allocating a new one. + * + * This works under the assumption that there are no nested TLB + * flushes, an assumption that is already made in + * flush_tlb_mm_range(). + */ + struct cpumask *cond_cpumask = this_cpu_ptr(&flush_tlb_mask); + int cpu; + + cpumask_clear(cond_cpumask); + + for_each_cpu(cpu, cpumask) { + if (tlb_is_not_lazy(cpu)) + __cpumask_set_cpu(cpu, cond_cpumask); + } + __smp_call_function_many(cond_cpumask, flush_tlb_func_remote, + flush_tlb_func_local, (void *)info, 1); + } +} + +void native_flush_tlb_others(const struct cpumask...
2019 May 25
3
[RFC PATCH 5/6] x86/mm/tlb: Flush remote and local TLBs concurrently
...the condition + * instead of allocating a new one. + * + * This works under the assumption that there are no nested TLB + * flushes, an assumption that is already made in + * flush_tlb_mm_range(). + */ + struct cpumask *cond_cpumask = this_cpu_ptr(&flush_tlb_mask); + int cpu; + + cpumask_clear(cond_cpumask); + + for_each_cpu(cpu, cpumask) { + if (tlb_is_not_lazy(cpu)) + __cpumask_set_cpu(cpu, cond_cpumask); + } + __smp_call_function_many(cond_cpumask, flush_tlb_func_remote, + flush_tlb_func_local, (void *)info, 1); + } +} + +void native_flush_tlb_others(const struct cpumask...