search for: cpu_dead

Displaying 20 results from an estimated 56 matches for "cpu_dead".

2012 Dec 26
5
[RFC PATCH] virtio-net: reset virtqueue affinity when doing cpu hotplug
...044,26 @@ static void virtnet_set_affinity(struct virtnet_info *vi, bool set) vi->affinity_hint_set = false; } +static int virtnet_cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + switch(action) { + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + case CPU_DEAD: + case CPU_DEAD_FROZEN: + cpu_hotplug = true; + break; + default: + break; + } + return NOTIFY_OK; +} + +static struct notifier_block virtnet_cpu_notifier = { + .notifier_call = virtnet_cpu_callback, +}; + static void virtnet_get_ringparam(struct net_device *dev, struct ethtool_ringparam...
2012 Dec 26
5
[RFC PATCH] virtio-net: reset virtqueue affinity when doing cpu hotplug
...044,26 @@ static void virtnet_set_affinity(struct virtnet_info *vi, bool set) vi->affinity_hint_set = false; } +static int virtnet_cpu_callback(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + switch(action) { + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + case CPU_DEAD: + case CPU_DEAD_FROZEN: + cpu_hotplug = true; + break; + default: + break; + } + return NOTIFY_OK; +} + +static struct notifier_block virtnet_cpu_notifier = { + .notifier_call = virtnet_cpu_callback, +}; + static void virtnet_get_ringparam(struct net_device *dev, struct ethtool_ringparam...
2013 Apr 19
0
[PATCH] x86/HVM: move per-vendor function tables into .init.data
...intercept(unsigne svm_asid_g_invlpg(curr, vaddr); } -static struct hvm_function_table __read_mostly svm_function_table = { +static struct hvm_function_table __initdata svm_function_table = { .name = "SVM", .cpu_up_prepare = svm_cpu_up_prepare, .cpu_dead = svm_cpu_dead, --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -1504,7 +1504,7 @@ static void vmx_sync_pir_to_irr(struct v vlapic_set_vector(i, &vlapic->regs->data[APIC_IRR]); } -static struct hvm_function_table __read_mostly vmx_function_tabl...
2013 Jan 07
1
[PATCH V2 1/2] virtio-net: fix the set affinity bug when CPU IDs are not consecutive
As M.S.T mentioned, set affinity will not work very well when CPU IDs are not consecutive, this can happen with hot unplug. Fix this bug by traversal the online CPUs. Cc: Rusty Russell <rusty at rustcorp.com.au> Cc: "Michael S. Tsirkin" <mst at redhat.com> Cc: Jason Wang <jasowang at redhat.com> Cc: Eric Dumazet <erdnetdev at gmail.com> Cc: virtualization at
2013 Jan 07
1
[PATCH V2 1/2] virtio-net: fix the set affinity bug when CPU IDs are not consecutive
As M.S.T mentioned, set affinity will not work very well when CPU IDs are not consecutive, this can happen with hot unplug. Fix this bug by traversal the online CPUs. Cc: Rusty Russell <rusty at rustcorp.com.au> Cc: "Michael S. Tsirkin" <mst at redhat.com> Cc: Jason Wang <jasowang at redhat.com> Cc: Eric Dumazet <erdnetdev at gmail.com> Cc: virtualization at
2016 Aug 12
0
[PATCH 6/6] net: virtio-net: Convert to hotplug state machine
...- struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb); + struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, + node); + virtnet_set_affinity(vi); + return 0; +} - switch(action & ~CPU_TASKS_FROZEN) { - case CPU_ONLINE: - case CPU_DOWN_FAILED: - case CPU_DEAD: - virtnet_set_affinity(vi); - break; - case CPU_DOWN_PREPARE: - virtnet_clean_affinity(vi, (long)hcpu); - break; - default: - break; - } +static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node) +{ + struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, +...
2016 Aug 12
0
[PATCH 6/6] net: virtio-net: Convert to hotplug state machine
...- struct virtnet_info *vi = container_of(nfb, struct virtnet_info, nb); + struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, + node); + virtnet_set_affinity(vi); + return 0; +} - switch(action & ~CPU_TASKS_FROZEN) { - case CPU_ONLINE: - case CPU_DOWN_FAILED: - case CPU_DEAD: - virtnet_set_affinity(vi); - break; - case CPU_DOWN_PREPARE: - virtnet_clean_affinity(vi, (long)hcpu); - break; - default: - break; - } +static int virtnet_cpu_dead(unsigned int cpu, struct hlist_node *node) +{ + struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info, +...
2006 Feb 08
2
[PATCH] make x86_64 vcpu hotplug work like i386
...face/vcpu.h> #include <asm/desc.h> #include <asm/proto.h> #include <asm/hardirq.h> @@ -143,22 +144,7 @@ /* We halt the CPU with physical CPU hotplug */ static inline void play_dead(void) { - idle_task_exit(); - wbinvd(); - mb(); - /* Ack it */ - __get_cpu_var(cpu_state) = CPU_DEAD; - - /* We shouldn''t have to disable interrupts while dead, but - * some interrupts just don''t seem to go away, and this makes - * it "work" for testing purposes. */ - /* Death loop */ - while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) - HYPERVISOR_sched_op(SCHEDOP_...
2012 Sep 11
0
[PATCH 1/3] x86/hvm: don't use indirect calls without need
...t vmcb_struct *vmcb = v->arch.hvm_svm.vmcb; uint64_t value; @@ -1128,11 +1125,6 @@ static int svm_event_pending(struct vcpu return vmcb->eventinj.fields.v; } -static int svm_do_pmu_interrupt(struct cpu_user_regs *regs) -{ - return vpmu_do_interrupt(regs); -} - static void svm_cpu_dead(unsigned int cpu) { free_xenheap_page(per_cpu(hsa, cpu)); @@ -1997,7 +1989,6 @@ static struct hvm_function_table __read_ .get_segment_register = svm_get_segment_register, .set_segment_register = svm_set_segment_register, .get_shadow_gs_base = svm_get_shadow_gs_base, - .upd...
2007 Feb 14
9
DomU crash during migration when suspending source domain
Just run into an odd DomU crash doing live migration of a 4-VCPU domain (with 3.0.4 but the code looks the same in 2.6.18/unstable to me) - the actual panic is attached at the end of this, but the bottom line is that the code in cache_remove_shared_cpu_map (in arch/i385/kernel/cpu/intel_cacheinfo.c) is attempting to clean up the cache info for a processor that does not yet have this info setup -
2013 Jan 08
3
[PATCH V3 1/2] virtio-net: fix the set affinity bug when CPU IDs are not consecutive
As Michael mentioned, set affinity and select queue will not work very well when CPU IDs are not consecutive, this can happen with hot unplug. Fix this bug by traversal the online CPUs, and create a per cpu variable to find the mapping from CPU to the preferable virtual-queue. Cc: Rusty Russell <rusty at rustcorp.com.au> Cc: "Michael S. Tsirkin" <mst at redhat.com> Cc: Jason
2013 Jan 08
3
[PATCH V3 1/2] virtio-net: fix the set affinity bug when CPU IDs are not consecutive
As Michael mentioned, set affinity and select queue will not work very well when CPU IDs are not consecutive, this can happen with hot unplug. Fix this bug by traversal the online CPUs, and create a per cpu variable to find the mapping from CPU to the preferable virtual-queue. Cc: Rusty Russell <rusty at rustcorp.com.au> Cc: "Michael S. Tsirkin" <mst at redhat.com> Cc: Jason
2013 Jan 11
2
[PATCH V4 1/2] virtio-net: fix the set affinity bug when CPU IDs are not consecutive
As Michael mentioned, set affinity and select queue will not work very well when CPU IDs are not consecutive, this can happen with hot unplug. Fix this bug by traversal the online CPUs, and create a per cpu variable to find the mapping from CPU to the preferable virtual-queue. Cc: Rusty Russell <rusty at rustcorp.com.au> Cc: "Michael S. Tsirkin" <mst at redhat.com> Cc: Jason
2013 Jan 11
2
[PATCH V4 1/2] virtio-net: fix the set affinity bug when CPU IDs are not consecutive
As Michael mentioned, set affinity and select queue will not work very well when CPU IDs are not consecutive, this can happen with hot unplug. Fix this bug by traversal the online CPUs, and create a per cpu variable to find the mapping from CPU to the preferable virtual-queue. Cc: Rusty Russell <rusty at rustcorp.com.au> Cc: "Michael S. Tsirkin" <mst at redhat.com> Cc: Jason
2013 Mar 20
7
[PATCH V6 0/5] virtio-scsi multiqueue
This series implements virtio-scsi queue steering, which gives performance improvements of up to 50% (measured both with QEMU and tcm_vhost backends). This version rebased on Rusty's virtio ring rework patches, which has already gone into virtio-next today. We hope this can go into virtio-next together with the virtio ring rework pathes. V6: rework "redo allocation of target data"
2013 Mar 20
7
[PATCH V6 0/5] virtio-scsi multiqueue
This series implements virtio-scsi queue steering, which gives performance improvements of up to 50% (measured both with QEMU and tcm_vhost backends). This version rebased on Rusty's virtio ring rework patches, which has already gone into virtio-next today. We hope this can go into virtio-next together with the virtio ring rework pathes. V6: rework "redo allocation of target data"
2013 Jan 25
5
[PATCH V8 1/3] virtio-net: fix the set affinity bug when CPU IDs are not consecutive
As Michael mentioned, set affinity and select queue will not work very well when CPU IDs are not consecutive, this can happen with hot unplug. Fix this bug by traversal the online CPUs, and create a per cpu variable to find the mapping from CPU to the preferable virtual-queue. Cc: Rusty Russell <rusty at rustcorp.com.au> Cc: "Michael S. Tsirkin" <mst at redhat.com> Cc: Jason
2013 Jan 25
5
[PATCH V8 1/3] virtio-net: fix the set affinity bug when CPU IDs are not consecutive
As Michael mentioned, set affinity and select queue will not work very well when CPU IDs are not consecutive, this can happen with hot unplug. Fix this bug by traversal the online CPUs, and create a per cpu variable to find the mapping from CPU to the preferable virtual-queue. Cc: Rusty Russell <rusty at rustcorp.com.au> Cc: "Michael S. Tsirkin" <mst at redhat.com> Cc: Jason
2012 Mar 09
10
[PATCH 0 of 9] (v2) arm: SMP boot
This patch series implements SMP boot for arch/arm, as far as getting all CPUs up and running the idle loop. Changes from v1: - moved barriers out of loop in udelay() - dropped broken GIC change in favour of explanatory comment - made the increment of ready_cpus atomic (I couldn''t move the increment to before signalling the next CPU because the PT switch has to happen between
2013 Jan 21
6
[PATCH V6 1/3] virtio-net: fix the set affinity bug when CPU IDs are not consecutive
As Michael mentioned, set affinity and select queue will not work very well when CPU IDs are not consecutive, this can happen with hot unplug. Fix this bug by traversal the online CPUs, and create a per cpu variable to find the mapping from CPU to the preferable virtual-queue. Cc: Rusty Russell <rusty at rustcorp.com.au> Cc: "Michael S. Tsirkin" <mst at redhat.com> Cc: Jason