Displaying 20 results from an estimated 97 matches for "cpumask_set_cpu".
2015 Jun 04
1
[PATCH] virtio_pci: Clear stale cpumask when setting irq affinity
...common.c
index e894eb278d83..eba1b7ac7294 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -423,6 +423,7 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
if (cpu == -1)
irq_set_affinity_hint(irq, NULL);
else {
+ cpumask_clear(mask);
cpumask_set_cpu(cpu, mask);
irq_set_affinity_hint(irq, mask);
}
--
1.7.10.4
2015 Jun 04
1
[PATCH] virtio_pci: Clear stale cpumask when setting irq affinity
...common.c
index e894eb278d83..eba1b7ac7294 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -423,6 +423,7 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
if (cpu == -1)
irq_set_affinity_hint(irq, NULL);
else {
+ cpumask_clear(mask);
cpumask_set_cpu(cpu, mask);
irq_set_affinity_hint(irq, mask);
}
--
1.7.10.4
2013 Sep 27
4
[PATCH net-next] virtio-net: switch to use XPS to choose txq
...nfo *vi)
return;
}
+ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
+ return;
+
i = 0;
for_each_online_cpu(cpu) {
virtqueue_set_affinity(vi->rq[i].vq, cpu);
virtqueue_set_affinity(vi->sq[i].vq, cpu);
- *per_cpu_ptr(vi->vq_index, cpu) = i;
+ cpumask_clear(cpumask);
+ cpumask_set_cpu(cpu, cpumask);
+ netif_set_xps_queue(vi->dev, cpumask, i);
i++;
}
vi->affinity_hint_set = true;
+ free_cpumask_var(cpumask);
}
static int virtnet_cpu_callback(struct notifier_block *nfb,
@@ -1217,28 +1210,6 @@ static int virtnet_change_mtu(struct net_device *dev, int new_mtu)...
2013 Sep 27
4
[PATCH net-next] virtio-net: switch to use XPS to choose txq
...nfo *vi)
return;
}
+ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
+ return;
+
i = 0;
for_each_online_cpu(cpu) {
virtqueue_set_affinity(vi->rq[i].vq, cpu);
virtqueue_set_affinity(vi->sq[i].vq, cpu);
- *per_cpu_ptr(vi->vq_index, cpu) = i;
+ cpumask_clear(cpumask);
+ cpumask_set_cpu(cpu, cpumask);
+ netif_set_xps_queue(vi->dev, cpumask, i);
i++;
}
vi->affinity_hint_set = true;
+ free_cpumask_var(cpumask);
}
static int virtnet_cpu_callback(struct notifier_block *nfb,
@@ -1217,28 +1210,6 @@ static int virtnet_change_mtu(struct net_device *dev, int new_mtu)...
2013 May 06
2
[PATCH v2] xen/gic: EOI irqs on the right pcpu
...ect_irq(struct vcpu *v, unsigned int irq, int virtual)
n->irq = irq;
n->priority = priority;
if (!virtual)
+ {
n->desc = irq_to_desc(irq);
- else
+ cpumask_clear(&n->eoimask);
+ /* Assume we received the IRQ on the current pcpu */
+ cpumask_set_cpu(smp_processor_id(), &n->eoimask);
+ } else {
n->desc = NULL;
+ }
/* the irq is enabled */
if ( rank->ienable & (1 << (irq % 32)) )
diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h
index cca7416..5561531 100644
--- a/xen/inclu...
2013 Aug 28
0
[PATCH] percpu ida: Switch to cpumask_t, add some comments
...*pool,
struct percpu_ida_cpu *tags)
{
@@ -1317,8 +1327,8 @@ int percpu_ida_alloc(struct percpu_ida *pool, gfp_t gfp)
if (tags->nr_free) {
tag = tags->freelist[--tags->nr_free];
if (tags->nr_free)
- set_bit(smp_processor_id(),
- pool->cpus_have_tags);
+ cpumask_set_cpu(smp_processor_id(),
+ &pool->cpus_have_tags);
}
spin_unlock(&pool->lock);
@@ -1363,8 +1373,8 @@ void percpu_ida_free(struct percpu_ida *pool, unsigned tag)
spin_unlock(&tags->lock);
if (nr_free == 1) {
- set_bit(smp_processor_id(),
- pool->cpus_have_tag...
2012 Mar 09
10
[PATCH 0 of 9] (v2) arm: SMP boot
This patch series implements SMP boot for arch/arm, as far as getting
all CPUs up and running the idle loop.
Changes from v1:
- moved barriers out of loop in udelay()
- dropped broken GIC change in favour of explanatory comment
- made the increment of ready_cpus atomic (I couldn''t move the
increment to before signalling the next CPU because the PT
switch has to happen between
2011 Oct 20
0
[PATCH 07/12] cpufreq: allocate CPU masks dynamically
...= per_cpu(cpufreq_cpu_policy, firstcpu);
per_cpu(cpufreq_cpu_policy, cpu) = policy;
@@ -201,15 +213,15 @@ int cpufreq_add_cpu(unsigned int cpu)
printk("adding CPU %u\n", cpu);
}
- cpu_set(cpu, policy->cpus);
- cpu_set(cpu, cpufreq_dom->map);
+ cpumask_set_cpu(cpu, policy->cpus);
+ cpumask_set_cpu(cpu, cpufreq_dom->map);
ret = cpufreq_statistic_init(cpu);
if (ret)
goto err1;
- if (hw_all ||
- (cpus_weight(cpufreq_dom->map) == perf->domain_info.num_processors)) {
+ if (hw_all || (cpumask_weight(cpufreq_do...
2019 May 08
2
[PATCH 04/10] s390/mm: force swiotlb for protected virtualization
...ev_active())
can't you just use is_prot_virt_guest here?
> + return;
> +
> + /* make sure bounce buffers are shared */
> + swiotlb_init(1);
> + swiotlb_update_mem_attributes();
> + swiotlb_force = SWIOTLB_FORCE;
> +}
> +
> void __init mem_init(void)
> {
> cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
> @@ -134,6 +182,8 @@ void __init mem_init(void)
> set_max_mapnr(max_low_pfn);
> high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
>
> + pv_init();
> +
> /* Setup guest page hinting */
> cmma_init();
>
2019 May 08
2
[PATCH 04/10] s390/mm: force swiotlb for protected virtualization
...ev_active())
can't you just use is_prot_virt_guest here?
> + return;
> +
> + /* make sure bounce buffers are shared */
> + swiotlb_init(1);
> + swiotlb_update_mem_attributes();
> + swiotlb_force = SWIOTLB_FORCE;
> +}
> +
> void __init mem_init(void)
> {
> cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
> @@ -134,6 +182,8 @@ void __init mem_init(void)
> set_max_mapnr(max_low_pfn);
> high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
>
> + pv_init();
> +
> /* Setup guest page hinting */
> cmma_init();
>
2017 Sep 06
4
[PATCH v2 0/2] guard virt_spin_lock() with a static key
With virt_spin_lock() being guarded by a static key the bare metal case
can be optimized by patching the call away completely. In case a kernel
running as a guest it can decide whether to use paravitualized
spinlocks, the current fallback to the unfair test-and-set scheme, or
to mimic the bare metal behavior.
V2:
- use static key instead of making virt_spin_lock() a pvops function
Juergen Gross
2017 Sep 06
4
[PATCH v2 0/2] guard virt_spin_lock() with a static key
With virt_spin_lock() being guarded by a static key the bare metal case
can be optimized by patching the call away completely. In case a kernel
running as a guest it can decide whether to use paravitualized
spinlocks, the current fallback to the unfair test-and-set scheme, or
to mimic the bare metal behavior.
V2:
- use static key instead of making virt_spin_lock() a pvops function
Juergen Gross
2013 Sep 26
8
[PATCH v5 0/7] Dissociate logical and gic/hardware CPU ID
Hi,
This is the fifth version of this patch series.
With the Versatile Express TC2, it''s possible to boot only with A7 or A15. If
the user choose to boot with only A7, the CPU ID will start at 0x100. As Xen
relies on it to set the logical ID and the GIC, it won''t be possible to use
Xen with this use case.
This patch series is divided in 3 parts:
- Patch 1: prepare Xen
2017 Sep 05
1
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...ndif /* CONFIG_PARAVIRT_SPINLOCKS */
> #endif /* CONFIG_PARAVIRT */
Because I think the above only ever uses native_virt_spin_lock() when
PARAVIRT.
> @@ -1381,6 +1382,7 @@ void __init native_smp_prepare_boot_cpu(void)
> /* already set me in cpu_online_mask in boot_cpu_init() */
> cpumask_set_cpu(me, cpu_callout_mask);
> cpu_set_state_online(me);
> + native_pv_lock_init();
> }
Aah, this is where that goes.. OK that works too.
2019 Apr 09
0
[RFC PATCH 03/12] s390/mm: force swiotlb for protected virtualization
...; +static void pv_init(void)
> +{
> + if (!sev_active())
> + return;
> +
> + /* make sure bounce buffers are shared */
> + swiotlb_init(1);
> + swiotlb_update_mem_attributes();
> + swiotlb_force = SWIOTLB_FORCE;
> +}
> +
> void __init mem_init(void)
> {
> cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
> @@ -134,6 +176,8 @@ void __init mem_init(void)
> set_max_mapnr(max_low_pfn);
> high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
>
> + pv_init();
> +
> /* Setup guest page hinting */
> cmma_init();
>
2017 Sep 05
1
[PATCH 3/4] paravirt: add virt_spin_lock pvops function
...ndif /* CONFIG_PARAVIRT_SPINLOCKS */
> #endif /* CONFIG_PARAVIRT */
Because I think the above only ever uses native_virt_spin_lock() when
PARAVIRT.
> @@ -1381,6 +1382,7 @@ void __init native_smp_prepare_boot_cpu(void)
> /* already set me in cpu_online_mask in boot_cpu_init() */
> cpumask_set_cpu(me, cpu_callout_mask);
> cpu_set_state_online(me);
> + native_pv_lock_init();
> }
Aah, this is where that goes.. OK that works too.
2013 Sep 29
0
[PATCH net-next] virtio-net: switch to use XPS to choose txq
...k_var(&cpumask, GFP_KERNEL))
> + return;
> +
> i = 0;
> for_each_online_cpu(cpu) {
> virtqueue_set_affinity(vi->rq[i].vq, cpu);
> virtqueue_set_affinity(vi->sq[i].vq, cpu);
> - *per_cpu_ptr(vi->vq_index, cpu) = i;
> + cpumask_clear(cpumask);
> + cpumask_set_cpu(cpu, cpumask);
> + netif_set_xps_queue(vi->dev, cpumask, i);
> i++;
> }
>
> vi->affinity_hint_set = true;
> + free_cpumask_var(cpumask);
> }
Um, isn't this just cpumask_of(cpu)?
Cheers,
Rusty.
2017 Sep 06
0
[PATCH v2 1/2] paravirt/locks: use new static key for controlling call of virt_spin_lock()
...;
#include <asm/realmode.h>
#include <asm/misc.h>
+#include <asm/qspinlock.h>
/* Number of siblings per CPU package */
int smp_num_siblings = 1;
@@ -1381,6 +1382,7 @@ void __init native_smp_prepare_boot_cpu(void)
/* already set me in cpu_online_mask in boot_cpu_init() */
cpumask_set_cpu(me, cpu_callout_mask);
cpu_set_state_online(me);
+ native_pv_lock_init();
}
void __init native_smp_cpus_done(unsigned int max_cpus)
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 294294c71ba4..838d235b87ef 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/lockin...
2019 Apr 26
0
[PATCH 04/10] s390/mm: force swiotlb for protected virtualization
...+EXPORT_SYMBOL_GPL(sev_active);
+
+/* protected virtualization */
+static void pv_init(void)
+{
+ if (!sev_active())
+ return;
+
+ /* make sure bounce buffers are shared */
+ swiotlb_init(1);
+ swiotlb_update_mem_attributes();
+ swiotlb_force = SWIOTLB_FORCE;
+}
+
void __init mem_init(void)
{
cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
@@ -134,6 +182,8 @@ void __init mem_init(void)
set_max_mapnr(max_low_pfn);
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+ pv_init();
+
/* Setup guest page hinting */
cmma_init();
--
2.16.4
2019 Jun 06
0
[PATCH v4 1/8] s390/mm: force swiotlb for protected virtualization
...is_prot_virt_guest();
+}
+
+/* protected virtualization */
+static void pv_init(void)
+{
+ if (!is_prot_virt_guest())
+ return;
+
+ /* make sure bounce buffers are shared */
+ swiotlb_init(1);
+ swiotlb_update_mem_attributes();
+ swiotlb_force = SWIOTLB_FORCE;
+}
+
void __init mem_init(void)
{
cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
@@ -136,6 +181,8 @@ void __init mem_init(void)
set_max_mapnr(max_low_pfn);
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+ pv_init();
+
/* Setup guest page hinting */
cmma_init();
--
2.17.1