Displaying 20 results from an estimated 67 matches for "cpumask_of".
2013 Sep 30
4
[net-next PATCH V2] virtio-net: switch to use XPS to choose txq
...XPS support,
there's no need for keeping per-cpu vq_index and .ndo_select_queue(),
so they were removed also.
Cc: Rusty Russell <rusty at rustcorp.com.au>
Cc: Michael S. Tsirkin <mst at redhat.com>
Signed-off-by: Jason Wang <jasowang at redhat.com>
---
Changes from V1:
- use cpumask_of() instead of allocate dynamically
drivers/net/virtio_net.c | 48 +--------------------------------------------
1 files changed, 2 insertions(+), 46 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index defec2b..4eca652 100644
--- a/drivers/net/virtio_net.c
+++ b/d...
2013 Sep 30
4
[net-next PATCH V2] virtio-net: switch to use XPS to choose txq
...XPS support,
there's no need for keeping per-cpu vq_index and .ndo_select_queue(),
so they were removed also.
Cc: Rusty Russell <rusty at rustcorp.com.au>
Cc: Michael S. Tsirkin <mst at redhat.com>
Signed-off-by: Jason Wang <jasowang at redhat.com>
---
Changes from V1:
- use cpumask_of() instead of allocate dynamically
drivers/net/virtio_net.c | 48 +--------------------------------------------
1 files changed, 2 insertions(+), 46 deletions(-)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index defec2b..4eca652 100644
--- a/drivers/net/virtio_net.c
+++ b/d...
2013 Jun 20
3
[PATCH V2 1/2] cpufreq, xenpm: fix cpufreq and xenpm mismatch
Currently cpufreq and xenpm are out of sync. Fix cpufreq reporting of
if turbo mode is enabled or not. Fix xenpm to not decode for tristate,
but a boolean.
Signed-off-by: Jacob Shin <jacob.shin@amd.com>
---
tools/misc/xenpm.c | 14 +++-----------
xen/drivers/cpufreq/utility.c | 2 +-
2 files changed, 4 insertions(+), 12 deletions(-)
diff --git a/tools/misc/xenpm.c
2012 Aug 16
5
[PATCH] AMD, powernow: Update P-state directly when _PSD's CoordType is DOMAIN_COORD_TYPE_HW_ALL
...able[next_state].index;
if (perf->state == next_perf_state) {
@@ -137,26 +122,28 @@ static int powernow_cpufreq_target(struc
return 0;
}
- if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY)
- cmd.mask = &online_policy_cpus;
- else
- cmd.mask = cpumask_of(policy->cpu);
+ if (policy->shared_type == CPUFREQ_SHARED_TYPE_HW &&
+ likely(policy->cpu == smp_processor_id())) {
+ transition_pstate(&next_perf_state);
+ cpufreq_statistic_update(policy->cpu, perf->state, next_perf_state);
+ } else {
+...
2013 Dec 15
1
[PATCH v3 [resend] 14/18] smp, x86, xen: kill SMP single function call interrupt
...).name = callfunc_name;
-
/*
* The IRQ worker on PVHVM goes through the native path and uses the
* IPI mechanism.
@@ -569,8 +548,7 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
static void xen_smp_send_call_function_single_ipi(int cpu)
{
- __xen_send_IPI_mask(cpumask_of(cpu),
- XEN_CALL_FUNCTION_SINGLE_VECTOR);
+ __xen_send_IPI_mask(cpumask_of(cpu), XEN_CALL_FUNCTION_VECTOR);
}
static inline int xen_map_vector(int vector)
@@ -582,10 +560,8 @@ static inline int xen_map_vector(int vector)
xen_vector = XEN_RESCHEDULE_VECTOR;
break;
case CALL_FUNCTION_...
2013 Dec 15
1
[PATCH v3 [resend] 14/18] smp, x86, xen: kill SMP single function call interrupt
...).name = callfunc_name;
-
/*
* The IRQ worker on PVHVM goes through the native path and uses the
* IPI mechanism.
@@ -569,8 +548,7 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
static void xen_smp_send_call_function_single_ipi(int cpu)
{
- __xen_send_IPI_mask(cpumask_of(cpu),
- XEN_CALL_FUNCTION_SINGLE_VECTOR);
+ __xen_send_IPI_mask(cpumask_of(cpu), XEN_CALL_FUNCTION_VECTOR);
}
static inline int xen_map_vector(int vector)
@@ -582,10 +560,8 @@ static inline int xen_map_vector(int vector)
xen_vector = XEN_RESCHEDULE_VECTOR;
break;
case CALL_FUNCTION_...
2013 Dec 15
1
[PATCH v3 [resend] 14/18] smp, x86, xen: kill SMP single function call interrupt
...).name = callfunc_name;
-
/*
* The IRQ worker on PVHVM goes through the native path and uses the
* IPI mechanism.
@@ -569,8 +548,7 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
static void xen_smp_send_call_function_single_ipi(int cpu)
{
- __xen_send_IPI_mask(cpumask_of(cpu),
- XEN_CALL_FUNCTION_SINGLE_VECTOR);
+ __xen_send_IPI_mask(cpumask_of(cpu), XEN_CALL_FUNCTION_VECTOR);
}
static inline int xen_map_vector(int vector)
@@ -582,10 +560,8 @@ static inline int xen_map_vector(int vector)
xen_vector = XEN_RESCHEDULE_VECTOR;
break;
case CALL_FUNCTION_...
2012 Sep 18
6
[PATCH 2/5] Xen/MCE: vMCE injection
...d->vcpu[0]->cpu_affinity_tmp,
- d->vcpu[0]->cpu_affinity);
- mce_printk(MCE_VERBOSE, "MCE: CPU%d set affinity, old %d\n",
- cpu, d->vcpu[0]->processor);
- vcpu_set_affinity(d->vcpu[0], cpumask_of(cpu));
- vcpu_kick(d->vcpu[0]);
- }
- else
- {
- mce_printk(MCE_VERBOSE,
- "MCE: Kill PV guest with No MCE handler\n");
- domain_crash(d);
- }
+ mce_printk(MCE_...
2013 Sep 26
8
[PATCH v5 0/7] Dissociate logical and gic/hardware CPU ID
Hi,
This is the fifth version of this patch series.
With the Versatile Express TC2, it''s possible to boot only with A7 or A15. If
the user choose to boot with only A7, the CPU ID will start at 0x100. As Xen
relies on it to set the logical ID and the GIC, it won''t be possible to use
Xen with this use case.
This patch series is divided in 3 parts:
- Patch 1: prepare Xen
2013 May 06
2
[PATCH v2] xen/gic: EOI irqs on the right pcpu
...;
+ if ( eoi ) {
+ /* this is not racy because we can''t receive another irq of the
+ * same type until we EOI it. */
+ if ( cpu == smp_processor_id() )
+ gic_irq_eoi((void*)virq);
+ else
+ on_selected_cpus(cpumask_of(cpu), gic_irq_eoi, (void*)virq, 0);
+ }
+
i++;
}
}
diff --git a/xen/arch/arm/vgic.c b/xen/arch/arm/vgic.c
index f9c1a6b..c5370d5 100644
--- a/xen/arch/arm/vgic.c
+++ b/xen/arch/arm/vgic.c
@@ -676,9 +676,14 @@ void vgic_vcpu_inject_irq(struct vcpu *v, unsigned int irq, int virt...
2013 Sep 11
0
[RFC PATCH v2 21/25] smp, tile: kill SMP single function call interrupt
...).name = callfunc_name;
-
/*
* The IRQ worker on PVHVM goes through the native path and uses the
* IPI mechanism.
@@ -551,8 +530,7 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
static void xen_smp_send_call_function_single_ipi(int cpu)
{
- __xen_send_IPI_mask(cpumask_of(cpu),
- XEN_CALL_FUNCTION_SINGLE_VECTOR);
+ __xen_send_IPI_mask(cpumask_of(cpu), XEN_CALL_FUNCTION_VECTOR);
}
static inline int xen_map_vector(int vector)
@@ -564,10 +542,8 @@ static inline int xen_map_vector(int vector)
xen_vector = XEN_RESCHEDULE_VECTOR;
break;
case CALL_FUNCTION_...
2013 Sep 11
0
[RFC PATCH v2 21/25] smp, tile: kill SMP single function call interrupt
...).name = callfunc_name;
-
/*
* The IRQ worker on PVHVM goes through the native path and uses the
* IPI mechanism.
@@ -551,8 +530,7 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
static void xen_smp_send_call_function_single_ipi(int cpu)
{
- __xen_send_IPI_mask(cpumask_of(cpu),
- XEN_CALL_FUNCTION_SINGLE_VECTOR);
+ __xen_send_IPI_mask(cpumask_of(cpu), XEN_CALL_FUNCTION_VECTOR);
}
static inline int xen_map_vector(int vector)
@@ -564,10 +542,8 @@ static inline int xen_map_vector(int vector)
xen_vector = XEN_RESCHEDULE_VECTOR;
break;
case CALL_FUNCTION_...
2013 Sep 11
0
[RFC PATCH v2 21/25] smp, tile: kill SMP single function call interrupt
...).name = callfunc_name;
-
/*
* The IRQ worker on PVHVM goes through the native path and uses the
* IPI mechanism.
@@ -551,8 +530,7 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
static void xen_smp_send_call_function_single_ipi(int cpu)
{
- __xen_send_IPI_mask(cpumask_of(cpu),
- XEN_CALL_FUNCTION_SINGLE_VECTOR);
+ __xen_send_IPI_mask(cpumask_of(cpu), XEN_CALL_FUNCTION_VECTOR);
}
static inline int xen_map_vector(int vector)
@@ -564,10 +542,8 @@ static inline int xen_map_vector(int vector)
xen_vector = XEN_RESCHEDULE_VECTOR;
break;
case CALL_FUNCTION_...
2015 Jan 23
0
[Resend Patch v4 11/16] smp, x86, xen: Kill SMP single function call interrupt
...).name = callfunc_name;
-
/*
* The IRQ worker on PVHVM goes through the native path and uses the
* IPI mechanism.
@@ -599,8 +578,7 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
static void xen_smp_send_call_function_single_ipi(int cpu)
{
- __xen_send_IPI_mask(cpumask_of(cpu),
- XEN_CALL_FUNCTION_SINGLE_VECTOR);
+ __xen_send_IPI_mask(cpumask_of(cpu), XEN_CALL_FUNCTION_VECTOR);
}
static inline int xen_map_vector(int vector)
@@ -612,10 +590,8 @@ static inline int xen_map_vector(int vector)
xen_vector = XEN_RESCHEDULE_VECTOR;
break;
case CALL_FUNCTION_...
2015 Jan 23
0
[Resend Patch v4 11/16] smp, x86, xen: Kill SMP single function call interrupt
...).name = callfunc_name;
-
/*
* The IRQ worker on PVHVM goes through the native path and uses the
* IPI mechanism.
@@ -599,8 +578,7 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
static void xen_smp_send_call_function_single_ipi(int cpu)
{
- __xen_send_IPI_mask(cpumask_of(cpu),
- XEN_CALL_FUNCTION_SINGLE_VECTOR);
+ __xen_send_IPI_mask(cpumask_of(cpu), XEN_CALL_FUNCTION_VECTOR);
}
static inline int xen_map_vector(int vector)
@@ -612,10 +590,8 @@ static inline int xen_map_vector(int vector)
xen_vector = XEN_RESCHEDULE_VECTOR;
break;
case CALL_FUNCTION_...
2013 Sep 30
0
[net-next PATCH V2] virtio-net: switch to use XPS to choose txq
...hey were removed also.
>
> Cc: Rusty Russell <rusty at rustcorp.com.au>
> Cc: Michael S. Tsirkin <mst at redhat.com>
> Signed-off-by: Jason Wang <jasowang at redhat.com>
Acked-by: Michael S. Tsirkin <mst at redhat.com>
> ---
> Changes from V1:
> - use cpumask_of() instead of allocate dynamically
>
> drivers/net/virtio_net.c | 48 +--------------------------------------------
> 1 files changed, 2 insertions(+), 46 deletions(-)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index defec2b..4eca652 100644
> ---...
2013 Dec 04
0
[RFC PATCH v3 18/19] smp, tile: kill SMP single function call interrupt
...).name = callfunc_name;
-
/*
* The IRQ worker on PVHVM goes through the native path and uses the
* IPI mechanism.
@@ -569,8 +548,7 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
static void xen_smp_send_call_function_single_ipi(int cpu)
{
- __xen_send_IPI_mask(cpumask_of(cpu),
- XEN_CALL_FUNCTION_SINGLE_VECTOR);
+ __xen_send_IPI_mask(cpumask_of(cpu), XEN_CALL_FUNCTION_VECTOR);
}
static inline int xen_map_vector(int vector)
@@ -582,10 +560,8 @@ static inline int xen_map_vector(int vector)
xen_vector = XEN_RESCHEDULE_VECTOR;
break;
case CALL_FUNCTION_...
2013 Dec 04
0
[RFC PATCH v3 18/19] smp, tile: kill SMP single function call interrupt
...).name = callfunc_name;
-
/*
* The IRQ worker on PVHVM goes through the native path and uses the
* IPI mechanism.
@@ -569,8 +548,7 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
static void xen_smp_send_call_function_single_ipi(int cpu)
{
- __xen_send_IPI_mask(cpumask_of(cpu),
- XEN_CALL_FUNCTION_SINGLE_VECTOR);
+ __xen_send_IPI_mask(cpumask_of(cpu), XEN_CALL_FUNCTION_VECTOR);
}
static inline int xen_map_vector(int vector)
@@ -582,10 +560,8 @@ static inline int xen_map_vector(int vector)
xen_vector = XEN_RESCHEDULE_VECTOR;
break;
case CALL_FUNCTION_...
2013 Dec 04
0
[RFC PATCH v3 18/19] smp, tile: kill SMP single function call interrupt
...).name = callfunc_name;
-
/*
* The IRQ worker on PVHVM goes through the native path and uses the
* IPI mechanism.
@@ -569,8 +548,7 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
static void xen_smp_send_call_function_single_ipi(int cpu)
{
- __xen_send_IPI_mask(cpumask_of(cpu),
- XEN_CALL_FUNCTION_SINGLE_VECTOR);
+ __xen_send_IPI_mask(cpumask_of(cpu), XEN_CALL_FUNCTION_VECTOR);
}
static inline int xen_map_vector(int vector)
@@ -582,10 +560,8 @@ static inline int xen_map_vector(int vector)
xen_vector = XEN_RESCHEDULE_VECTOR;
break;
case CALL_FUNCTION_...
2018 Aug 07
1
[PATCH net-next] net: allow to call netif_reset_xps_queues() under cpu_read_lock
...net.c b/drivers/net/virtio_net.c
index 62311dde6e71..a4abcfcf26b2 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1903,9 +1903,11 @@ static void virtnet_set_affinity(struct virtnet_info *vi)
i = 0;
for_each_online_cpu(cpu) {
+ const unsigned long *mask = cpumask_bits(cpumask_of(cpu));
+
virtqueue_set_affinity(vi->rq[i].vq, cpu);
virtqueue_set_affinity(vi->sq[i].vq, cpu);
- netif_set_xps_queue(vi->dev, cpumask_of(cpu), i);
+ __netif_set_xps_queue(vi->dev, mask, i, false, true);
i++;
}
diff --git a/include/linux/netdevice.h b/include/linux/netdevi...