Keir Fraser
2012-Feb-08 09:12 UTC
Re: [PATCH 3 of 4] VIOAPIC: Emulate a version 0x20 IOAPIC
On 08/02/2012 17:05, "Tim Deegan" <tim@xen.org> wrote:> At 16:45 +0000 on 08 Feb (1328719538), Andrew Cooper wrote: >> Currently, hvm emulates a version 0x11 IOAPIC. However, depending on >> the HVM guests {IO,L}APIC setup, it may take fewer traps into Xen by >> directly using the VIOAPIC EOI register present in version 0x20, >> rather than resorting to the legacy method of flipping the trigger >> mode for the relevent RTE. >> >> Currently, all required functionality is already present in the code, >> except that it is covered by VIOAPIC_IS_IOSAPIC which is never defined. >> >> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> > > This probably ought to introduce a hvm save record to say which kind of > IOAPIC the VM was booted with, so that after a live migration the OS > doesn''t get confused. :( > > It seems unlikely that any OSes rely on the IOAPIC version (and the > behaviour of that register) being static after boot, but better safe > than sorry - there might be some confusion in resume from S3 or similar.Yes, so unless there is actually a demonstrable win from bumping the version, we should just not bother, and cull the IS_IOSAPIC code sections. -- Keir
Andrew Cooper
2012-Feb-08 16:45 UTC
[PATCH 0 of 4] Prune outdated/impossible preprocessor symbols, and update VIOAPIC emulation
Patch 1 removes CONFIG_SMP Patch 2 removes separate smp_{,r,w}mb()s as a result of patch 1 Patch 4 removes __ia64__ defines from the x86 arch tree Patch 3 is related to patch 4 and changes the VIOAPIC to emulate version 0x20 as a performance gain. It preceeds Patch 4 so as to be more clear about the functional change. Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
CONFIG_SMP is always enabled and !CONFIG_SMP is not supported. So simply the code a little by removing all #ifdefs. Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> diff -r eae25241d571 -r 101b0d7ebb00 xen/arch/x86/apic.c --- a/xen/arch/x86/apic.c +++ b/xen/arch/x86/apic.c @@ -145,9 +145,8 @@ void ack_bad_irq(unsigned int irq) void __init apic_intr_init(void) { -#ifdef CONFIG_SMP smp_intr_init(); -#endif + /* self generated IPI for local APIC timer */ set_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt); diff -r eae25241d571 -r 101b0d7ebb00 xen/arch/x86/cpu/amd.c --- a/xen/arch/x86/cpu/amd.c +++ b/xen/arch/x86/cpu/amd.c @@ -370,7 +370,6 @@ static void __devinit init_amd(struct cp { u32 l, h; -#ifdef CONFIG_SMP unsigned long long value; /* Disable TLB flush filter by setting HWCR.FFDIS on K8 @@ -384,7 +383,6 @@ static void __devinit init_amd(struct cp value |= 1 << 6; wrmsrl(MSR_K7_HWCR, value); } -#endif /* * FIXME: We should handle the K5 here. Set up the write diff -r eae25241d571 -r 101b0d7ebb00 xen/arch/x86/cpu/mtrr/cyrix.c --- a/xen/arch/x86/cpu/mtrr/cyrix.c +++ b/xen/arch/x86/cpu/mtrr/cyrix.c @@ -279,9 +279,7 @@ cyrix_arr_init(void) struct set_mtrr_context ctxt; unsigned char ccr[7]; int ccrc[7] = { 0, 0, 0, 0, 0, 0, 0 }; -#ifdef CONFIG_SMP int i; -#endif /* flush cache and enable MAPEN */ set_mtrr_prepare_save(&ctxt); @@ -334,14 +332,13 @@ cyrix_arr_init(void) ccrc[5] = 1; setCx86(CX86_CCR5, ccr[5]); } -#ifdef CONFIG_SMP + for (i = 0; i < 7; i++) ccr_state[i] = ccr[i]; for (i = 0; i < 8; i++) cyrix_get_arr(i, &arr_state[i].base, &arr_state[i].size, &arr_state[i].type); -#endif set_mtrr_done(&ctxt); /* flush cache and disable MAPEN */ diff -r eae25241d571 -r 101b0d7ebb00 xen/arch/x86/cpu/mtrr/main.c --- a/xen/arch/x86/cpu/mtrr/main.c +++ b/xen/arch/x86/cpu/mtrr/main.c @@ -142,8 +142,6 @@ struct set_mtrr_data { */ int hold_mtrr_updates_on_aps; -#ifdef CONFIG_SMP - static void ipi_handler(void *info) /* [SUMMARY] Synchronisation handler. Executed by "other" CPUs. [RETURNS] Nothing. @@ -175,8 +173,6 @@ static void ipi_handler(void *info) local_irq_restore(flags); } -#endif - static inline int types_compatible(mtrr_type type1, mtrr_type type2) { return type1 == MTRR_TYPE_UNCACHABLE || type2 == MTRR_TYPE_UNCACHABLE || diff -r eae25241d571 -r 101b0d7ebb00 xen/arch/x86/io_apic.c --- a/xen/arch/x86/io_apic.c +++ b/xen/arch/x86/io_apic.c @@ -513,7 +513,6 @@ static void clear_IO_APIC (void) } } -#ifdef CONFIG_SMP static void set_ioapic_affinity_irq(struct irq_desc *desc, const cpumask_t *mask) { @@ -550,7 +549,6 @@ set_ioapic_affinity_irq(struct irq_desc spin_unlock_irqrestore(&ioapic_lock, flags); } -#endif /* CONFIG_SMP */ /* * Find the IRQ entry number of a certain pin. @@ -630,7 +628,6 @@ static int pin_2_irq(int idx, int apic, * we need to reprogram the ioredtbls to cater for the cpus which have come online * so mask in all cases should simply be TARGET_CPUS */ -#ifdef CONFIG_SMP void /*__init*/ setup_ioapic_dest(void) { int pin, ioapic, irq, irq_entry; @@ -653,7 +650,6 @@ void /*__init*/ setup_ioapic_dest(void) } } -#endif /* * EISA Edge/Level control register, ELCR diff -r eae25241d571 -r 101b0d7ebb00 xen/arch/x86/oprofile/nmi_int.c --- a/xen/arch/x86/oprofile/nmi_int.c +++ b/xen/arch/x86/oprofile/nmi_int.c @@ -304,11 +304,6 @@ static int __init p4_init(char ** cpu_ty return 0; } -#ifndef CONFIG_SMP - *cpu_type = "i386/p4", XENOPROF_CPU_TYPE_SIZE); - model = &op_p4_spec; - return 1; -#else switch (current_cpu_data.x86_num_siblings) { case 1: *cpu_type = "i386/p4"; @@ -320,7 +315,7 @@ static int __init p4_init(char ** cpu_ty model = &op_p4_ht2_spec; return 1; } -#endif + printk("Xenoprof ERROR: P4 HyperThreading detected with > 2 threads\n"); return 0; diff -r eae25241d571 -r 101b0d7ebb00 xen/arch/x86/oprofile/op_model_p4.c --- a/xen/arch/x86/oprofile/op_model_p4.c +++ b/xen/arch/x86/oprofile/op_model_p4.c @@ -40,19 +40,13 @@ static unsigned int num_counters = NUM_C kernel boot-time. */ static inline void setup_num_counters(void) { -#ifdef CONFIG_SMP if (boot_cpu_data.x86_num_siblings == 2) /* XXX */ num_counters = NUM_COUNTERS_HT2; -#endif } static int inline addr_increment(void) { -#ifdef CONFIG_SMP return boot_cpu_data.x86_num_siblings == 2 ? 2 : 1; -#else - return 1; -#endif } @@ -383,11 +377,8 @@ static const struct p4_event_binding p4_ or "odd" part of all the divided resources. */ static unsigned int get_stagger(void) { -#ifdef CONFIG_SMP int cpu = smp_processor_id(); return (cpu != cpumask_first(per_cpu(cpu_sibling_mask, cpu))); -#endif - return 0; } @@ -709,7 +700,6 @@ static void p4_stop(struct op_msrs const } -#ifdef CONFIG_SMP struct op_x86_model_spec const op_p4_ht2_spec = { .num_counters = NUM_COUNTERS_HT2, .num_controls = NUM_CONTROLS_HT2, @@ -719,7 +709,7 @@ struct op_x86_model_spec const op_p4_ht2 .start = &p4_start, .stop = &p4_stop }; -#endif + struct op_x86_model_spec const op_p4_spec = { .num_counters = NUM_COUNTERS_NON_HT, diff -r eae25241d571 -r 101b0d7ebb00 xen/common/rcupdate.c --- a/xen/common/rcupdate.c +++ b/xen/common/rcupdate.c @@ -83,9 +83,7 @@ struct rcu_data { long blimit; /* Upper limit on a processed batch */ int cpu; struct rcu_head barrier; -#ifdef CONFIG_SMP long last_rs_qlen; /* qlen during the last resched */ -#endif }; static DEFINE_PER_CPU(struct rcu_data, rcu_data); diff -r eae25241d571 -r 101b0d7ebb00 xen/include/asm-x86/config.h --- a/xen/include/asm-x86/config.h +++ b/xen/include/asm-x86/config.h @@ -21,7 +21,6 @@ #define CONFIG_X86 1 #define CONFIG_X86_HT 1 #define CONFIG_PAGING_ASSISTANCE 1 -#define CONFIG_SMP 1 #define CONFIG_X86_LOCAL_APIC 1 #define CONFIG_X86_GOOD_APIC 1 #define CONFIG_X86_IO_APIC 1 diff -r eae25241d571 -r 101b0d7ebb00 xen/include/asm-x86/processor.h --- a/xen/include/asm-x86/processor.h +++ b/xen/include/asm-x86/processor.h @@ -189,13 +189,8 @@ struct cpuinfo_x86 { extern struct cpuinfo_x86 boot_cpu_data; -#ifdef CONFIG_SMP extern struct cpuinfo_x86 cpu_data[]; #define current_cpu_data cpu_data[smp_processor_id()] -#else -#define cpu_data (&boot_cpu_data) -#define current_cpu_data boot_cpu_data -#endif extern void set_cpuid_faulting(bool_t enable); diff -r eae25241d571 -r 101b0d7ebb00 xen/include/asm-x86/smp.h --- a/xen/include/asm-x86/smp.h +++ b/xen/include/asm-x86/smp.h @@ -17,7 +17,6 @@ #endif #define BAD_APICID -1U -#ifdef CONFIG_SMP #ifndef __ASSEMBLY__ /* @@ -65,11 +64,4 @@ void __stop_this_cpu(void); #endif /* !__ASSEMBLY__ */ -#else /* CONFIG_SMP */ - -#define cpu_physical_id(cpu) boot_cpu_physical_apicid - -#define NO_PROC_ID 0xFF /* No processor magic marker */ - #endif -#endif diff -r eae25241d571 -r 101b0d7ebb00 xen/include/asm-x86/system.h --- a/xen/include/asm-x86/system.h +++ b/xen/include/asm-x86/system.h @@ -154,15 +154,9 @@ static always_inline unsigned long __cmp #define rmb() barrier() #define wmb() barrier() -#ifdef CONFIG_SMP #define smp_mb() mb() #define smp_rmb() rmb() #define smp_wmb() wmb() -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#endif #define set_mb(var, value) do { xchg(&var, value); } while (0) #define set_wmb(var, value) do { var = value; wmb(); } while (0)
Now CONFIG_SMP has been removed, there is no need to define smp_{,r,w}mb()s which used to conditionally compiled to different operations (even though those conditonally different operations still ended up as simple barrier()s) Therefore, remove smp_{,r,w}mb()s and just use regular {,r,w}mb()s Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/arch/x86/acpi/cpu_idle.c --- a/xen/arch/x86/acpi/cpu_idle.c +++ b/xen/arch/x86/acpi/cpu_idle.c @@ -260,7 +260,7 @@ static void mwait_idle_with_hints(unsign s_time_t expires = per_cpu(timer_deadline, cpu); __monitor((void *)&mwait_wakeup(cpu), 0, 0); - smp_mb(); + mb(); /* * Timer deadline passing is the event on which we will be woken via diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/arch/x86/cpu/mtrr/main.c --- a/xen/arch/x86/cpu/mtrr/main.c +++ b/xen/arch/x86/cpu/mtrr/main.c @@ -248,7 +248,7 @@ static void set_mtrr(unsigned int reg, u /* ok, reset count and toggle gate */ atomic_set(&data.count, nr_cpus); - smp_wmb(); + wmb(); atomic_set(&data.gate,1); /* do our MTRR business */ @@ -271,7 +271,7 @@ static void set_mtrr(unsigned int reg, u cpu_relax(); atomic_set(&data.count, nr_cpus); - smp_wmb(); + wmb(); atomic_set(&data.gate,0); /* diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/arch/x86/irq.c --- a/xen/arch/x86/irq.c +++ b/xen/arch/x86/irq.c @@ -207,7 +207,7 @@ static void dynamic_irq_cleanup(unsigned spin_unlock_irqrestore(&desc->lock, flags); /* Wait to make sure it''s not being used on another CPU */ - do { smp_mb(); } while ( desc->status & IRQ_INPROGRESS ); + do { mb(); } while ( desc->status & IRQ_INPROGRESS ); if (action) xfree(action); @@ -931,7 +931,7 @@ void __init release_irq(unsigned int irq spin_unlock_irqrestore(&desc->lock,flags); /* Wait to make sure it''s not being used on another CPU */ - do { smp_mb(); } while ( desc->status & IRQ_INPROGRESS ); + do { mb(); } while ( desc->status & IRQ_INPROGRESS ); if (action && action->free_on_release) xfree(action); diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/common/domain.c --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -544,7 +544,7 @@ void domain_shutdown(struct domain *d, u d->is_shutting_down = 1; - smp_mb(); /* set shutdown status /then/ check for per-cpu deferrals */ + mb(); /* set shutdown status /then/ check for per-cpu deferrals */ for_each_vcpu ( d, v ) { @@ -594,7 +594,7 @@ int vcpu_start_shutdown_deferral(struct return 1; v->defer_shutdown = 1; - smp_mb(); /* set deferral status /then/ check for shutdown */ + mb(); /* set deferral status /then/ check for shutdown */ if ( unlikely(v->domain->is_shutting_down) ) vcpu_check_shutdown(v); @@ -604,7 +604,7 @@ int vcpu_start_shutdown_deferral(struct void vcpu_end_shutdown_deferral(struct vcpu *v) { v->defer_shutdown = 0; - smp_mb(); /* clear deferral status /then/ check for shutdown */ + mb(); /* clear deferral status /then/ check for shutdown */ if ( unlikely(v->domain->is_shutting_down) ) vcpu_check_shutdown(v); } diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/common/rcupdate.c --- a/xen/common/rcupdate.c +++ b/xen/common/rcupdate.c @@ -252,7 +252,7 @@ static void rcu_start_batch(struct rcu_c * next_pending == 0 must be visible in * __rcu_process_callbacks() before it can see new value of cur. */ - smp_wmb(); + wmb(); rcp->cur++; cpumask_copy(&rcp->cpumask, &cpu_online_map); @@ -340,7 +340,7 @@ static void __rcu_process_callbacks(stru /* see the comment and corresponding wmb() in * the rcu_start_batch() */ - smp_rmb(); + rmb(); if (!rcp->next_pending) { /* and start it/schedule start if it''s a new batch */ diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/common/schedule.c --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -657,7 +657,7 @@ static long do_poll(struct sched_poll *s #ifndef CONFIG_X86 /* set_bit() implies mb() on x86 */ /* Check for events /after/ setting flags: avoids wakeup waiting race. */ - smp_mb(); + mb(); /* * Someone may have seen we are blocked but not that we are polling, or @@ -1173,12 +1173,12 @@ static void schedule(void) void context_saved(struct vcpu *prev) { /* Clear running flag /after/ writing context to memory. */ - smp_wmb(); + wmb(); prev->is_running = 0; /* Check for migration request /after/ clearing running flag. */ - smp_mb(); + mb(); SCHED_OP(VCPU2OP(prev), context_saved, prev); diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/common/stop_machine.c --- a/xen/common/stop_machine.c +++ b/xen/common/stop_machine.c @@ -59,7 +59,7 @@ static DEFINE_SPINLOCK(stopmachine_lock) static void stopmachine_set_state(enum stopmachine_state state) { atomic_set(&stopmachine_data.done, 0); - smp_wmb(); + wmb(); stopmachine_data.state = state; } @@ -99,7 +99,7 @@ int stop_machine_run(int (*fn)(void *), atomic_set(&stopmachine_data.done, 0); stopmachine_data.state = STOPMACHINE_START; - smp_wmb(); + wmb(); for_each_cpu ( i, &allbutself ) tasklet_schedule_on_cpu(&per_cpu(stopmachine_tasklet, i), i); @@ -134,7 +134,7 @@ static void stopmachine_action(unsigned BUG_ON(cpu != smp_processor_id()); - smp_mb(); + mb(); while ( state != STOPMACHINE_EXIT ) { @@ -157,7 +157,7 @@ static void stopmachine_action(unsigned break; } - smp_mb(); + mb(); atomic_inc(&stopmachine_data.done); } diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/include/asm-x86/system.h --- a/xen/include/asm-x86/system.h +++ b/xen/include/asm-x86/system.h @@ -154,10 +154,6 @@ static always_inline unsigned long __cmp #define rmb() barrier() #define wmb() barrier() -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() - #define set_mb(var, value) do { xchg(&var, value); } while (0) #define set_wmb(var, value) do { var = value; wmb(); } while (0) diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/include/xen/list.h --- a/xen/include/xen/list.h +++ b/xen/include/xen/list.h @@ -102,7 +102,7 @@ static inline void __list_add_rcu(struct { new->next = next; new->prev = prev; - smp_wmb(); + wmb(); next->prev = new; prev->next = new; } @@ -244,7 +244,7 @@ static inline void list_replace_rcu(stru { new->next = old->next; new->prev = old->prev; - smp_wmb(); + wmb(); new->next->prev = new; new->prev->next = new; old->prev = LIST_POISON2; @@ -712,7 +712,7 @@ static inline void hlist_replace_rcu(str new->next = next; new->pprev = old->pprev; - smp_wmb(); + wmb(); if (next) new->next->pprev = &new->next; *new->pprev = new; @@ -754,7 +754,7 @@ static inline void hlist_add_head_rcu(st struct hlist_node *first = h->first; n->next = first; n->pprev = &h->first; - smp_wmb(); + wmb(); if (first) first->pprev = &n->next; h->first = n; @@ -804,7 +804,7 @@ static inline void hlist_add_before_rcu( { n->pprev = next->pprev; n->next = next; - smp_wmb(); + wmb(); next->pprev = &n->next; *(n->pprev) = n; } @@ -832,7 +832,7 @@ static inline void hlist_add_after_rcu(s { n->next = prev->next; n->pprev = &prev->next; - smp_wmb(); + wmb(); prev->next = n; if (n->next) n->next->pprev = &n->next; diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/include/xen/rcupdate.h --- a/xen/include/xen/rcupdate.h +++ b/xen/include/xen/rcupdate.h @@ -136,7 +136,7 @@ typedef struct _rcu_read_lock rcu_read_l * call documents which pointers will be dereferenced by RCU read-side * code. */ -#define rcu_assign_pointer(p, v) ({ smp_wmb(); (p) = (v); }) +#define rcu_assign_pointer(p, v) ({ wmb(); (p) = (v); }) void rcu_init(void); void rcu_check_callbacks(int cpu);
Currently, hvm emulates a version 0x11 IOAPIC. However, depending on the HVM guests {IO,L}APIC setup, it may take fewer traps into Xen by directly using the VIOAPIC EOI register present in version 0x20, rather than resorting to the legacy method of flipping the trigger mode for the relevent RTE. Currently, all required functionality is already present in the code, except that it is covered by VIOAPIC_IS_IOSAPIC which is never defined. Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> diff -r 957b5ac44e32 -r 79bc45a90933 xen/arch/x86/hvm/vioapic.c --- a/xen/arch/x86/hvm/vioapic.c +++ b/xen/arch/x86/hvm/vioapic.c @@ -227,11 +227,9 @@ static int vioapic_write( vioapic_write_indirect(vioapic, length, val); break; -#if VIOAPIC_IS_IOSAPIC case VIOAPIC_REG_EOI: vioapic_update_EOI(v->domain, val); break; -#endif default: break; diff -r 957b5ac44e32 -r 79bc45a90933 xen/include/asm-x86/hvm/vioapic.h --- a/xen/include/asm-x86/hvm/vioapic.h +++ b/xen/include/asm-x86/hvm/vioapic.h @@ -31,7 +31,7 @@ #include <public/hvm/save.h> #if !VIOAPIC_IS_IOSAPIC -#define VIOAPIC_VERSION_ID 0x11 /* IOAPIC version */ +#define VIOAPIC_VERSION_ID 0x20 /* IOAPIC version */ #else #define VIOAPIC_VERSION_ID 0x21 /* IOSAPIC version */ #endif @@ -45,7 +45,7 @@ /* Direct registers. */ #define VIOAPIC_REG_SELECT 0x00 #define VIOAPIC_REG_WINDOW 0x10 -#define VIOAPIC_REG_EOI 0x40 /* IA64 IOSAPIC only */ +#define VIOAPIC_REG_EOI 0x40 /* Indirect registers. */ #define VIOAPIC_REG_APIC_ID 0x00 /* x86 IOAPIC only */
Andrew Cooper
2012-Feb-08 16:45 UTC
[PATCH 4 of 4] CONFIG: remove #ifdef __ia64__ from the x86 arch tree
__ia64__ really really should not be defined in the x86 arch subtree, so remove it from xen/include/public/arch-x86/hvm/save.h This in turn allows the removal of VIOAPIC_IS_IOSAPIC, as x86 does not use streamlined {IO,L}APICs, allowing for the removal of more code from the x86 tree. Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> diff -r 79bc45a90933 -r d59767433c7f xen/arch/x86/hvm/vioapic.c --- a/xen/arch/x86/hvm/vioapic.c +++ b/xen/arch/x86/hvm/vioapic.c @@ -59,12 +59,10 @@ static unsigned long vioapic_read_indire | (VIOAPIC_VERSION_ID & 0xff)); break; -#if !VIOAPIC_IS_IOSAPIC case VIOAPIC_REG_APIC_ID: case VIOAPIC_REG_ARB_ID: result = ((vioapic->id & 0xf) << 24); break; -#endif default: { @@ -179,14 +177,12 @@ static void vioapic_write_indirect( /* Writes are ignored. */ break; -#if !VIOAPIC_IS_IOSAPIC case VIOAPIC_REG_APIC_ID: vioapic->id = (val >> 24) & 0xf; break; case VIOAPIC_REG_ARB_ID: break; -#endif default: { diff -r 79bc45a90933 -r d59767433c7f xen/include/asm-x86/hvm/vioapic.h --- a/xen/include/asm-x86/hvm/vioapic.h +++ b/xen/include/asm-x86/hvm/vioapic.h @@ -30,11 +30,7 @@ #include <xen/smp.h> #include <public/hvm/save.h> -#if !VIOAPIC_IS_IOSAPIC #define VIOAPIC_VERSION_ID 0x20 /* IOAPIC version */ -#else -#define VIOAPIC_VERSION_ID 0x21 /* IOSAPIC version */ -#endif #define VIOAPIC_EDGE_TRIG 0 #define VIOAPIC_LEVEL_TRIG 1 diff -r 79bc45a90933 -r d59767433c7f xen/include/public/arch-x86/hvm/save.h --- a/xen/include/public/arch-x86/hvm/save.h +++ b/xen/include/public/arch-x86/hvm/save.h @@ -344,12 +344,7 @@ DECLARE_HVM_SAVE_TYPE(PIC, 3, struct hvm * IO-APIC */ -#ifdef __ia64__ -#define VIOAPIC_IS_IOSAPIC 1 -#define VIOAPIC_NUM_PINS 24 -#else #define VIOAPIC_NUM_PINS 48 /* 16 ISA IRQs, 32 non-legacy PCI IRQS. */ -#endif struct hvm_hw_vioapic { uint64_t base_address; @@ -368,13 +363,8 @@ struct hvm_hw_vioapic { uint8_t trig_mode:1; uint8_t mask:1; uint8_t reserve:7; -#if !VIOAPIC_IS_IOSAPIC uint8_t reserved[4]; uint8_t dest_id; -#else - uint8_t reserved[3]; - uint16_t dest_id; -#endif } fields; } redirtbl[VIOAPIC_NUM_PINS]; };
Tim Deegan
2012-Feb-08 17:05 UTC
Re: [PATCH 3 of 4] VIOAPIC: Emulate a version 0x20 IOAPIC
At 16:45 +0000 on 08 Feb (1328719538), Andrew Cooper wrote:> Currently, hvm emulates a version 0x11 IOAPIC. However, depending on > the HVM guests {IO,L}APIC setup, it may take fewer traps into Xen by > directly using the VIOAPIC EOI register present in version 0x20, > rather than resorting to the legacy method of flipping the trigger > mode for the relevent RTE. > > Currently, all required functionality is already present in the code, > except that it is covered by VIOAPIC_IS_IOSAPIC which is never defined. > > Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>This probably ought to introduce a hvm save record to say which kind of IOAPIC the VM was booted with, so that after a live migration the OS doesn''t get confused. :( It seems unlikely that any OSes rely on the IOAPIC version (and the behaviour of that register) being static after boot, but better safe than sorry - there might be some confusion in resume from S3 or similar. Tim.
Andrew Cooper
2012-Feb-08 17:13 UTC
Re: [PATCH 3 of 4] VIOAPIC: Emulate a version 0x20 IOAPIC
On 08/02/12 17:05, Tim Deegan wrote:> At 16:45 +0000 on 08 Feb (1328719538), Andrew Cooper wrote: >> Currently, hvm emulates a version 0x11 IOAPIC. However, depending on >> the HVM guests {IO,L}APIC setup, it may take fewer traps into Xen by >> directly using the VIOAPIC EOI register present in version 0x20, >> rather than resorting to the legacy method of flipping the trigger >> mode for the relevent RTE. >> >> Currently, all required functionality is already present in the code, >> except that it is covered by VIOAPIC_IS_IOSAPIC which is never defined. >> >> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> > This probably ought to introduce a hvm save record to say which kind of > IOAPIC the VM was booted with, so that after a live migration the OS > doesn''t get confused. :( > > It seems unlikely that any OSes rely on the IOAPIC version (and the > behaviour of that register) being static after boot, but better safe > than sorry - there might be some confusion in resume from S3 or similar. > > Tim.Hmm - That is a very good point and I had not considered the possibility. I withdraw this patch pending more thought. -- Andrew Cooper - Dom0 Kernel Engineer, Citrix XenServer T: +44 (0)1223 225 900, http://www.citrix.com
Version 2 attached - spelling mistake in the comment. -- Andrew Cooper - Dom0 Kernel Engineer, Citrix XenServer T: +44 (0)1223 225 900, http://www.citrix.com _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Andrew Cooper
2012-Feb-08 17:24 UTC
Re: [PATCH 4 of 4] CONFIG: remove #ifdef __ia64__ from the x86 arch tree
Presented v2, refreshing the patch without emulating a version 0x20 IOAPIC -- Andrew Cooper - Dom0 Kernel Engineer, Citrix XenServer T: +44 (0)1223 225 900, http://www.citrix.com _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Jan Beulich
2012-Feb-09 10:49 UTC
Re: [PATCH 2 of 4] CONFIG: remove smp barrier definitions
>>> On 08.02.12 at 17:45, Andrew Cooper <andrew.cooper3@citrix.com> wrote: > Now CONFIG_SMP has been removed, there is no need to define > smp_{,r,w}mb()s which used to conditionally compiled to different > operations (even though those conditonally different operations still > ended up as simple barrier()s) > > Therefore, remove smp_{,r,w}mb()s and just use regular {,r,w}mb()sDid you read the Linux side description and usage guidelines before doing this? I don''t think doing the adjustment here is a good idea, even if the smp_ ones are aliases of the plain ones (which doesn''t necessarily have to the case on any future architectures Xen might get ported to). Jan> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> > > diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/arch/x86/acpi/cpu_idle.c > --- a/xen/arch/x86/acpi/cpu_idle.c > +++ b/xen/arch/x86/acpi/cpu_idle.c > @@ -260,7 +260,7 @@ static void mwait_idle_with_hints(unsign > s_time_t expires = per_cpu(timer_deadline, cpu); > > __monitor((void *)&mwait_wakeup(cpu), 0, 0); > - smp_mb(); > + mb(); > > /* > * Timer deadline passing is the event on which we will be woken via > diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/arch/x86/cpu/mtrr/main.c > --- a/xen/arch/x86/cpu/mtrr/main.c > +++ b/xen/arch/x86/cpu/mtrr/main.c > @@ -248,7 +248,7 @@ static void set_mtrr(unsigned int reg, u > > /* ok, reset count and toggle gate */ > atomic_set(&data.count, nr_cpus); > - smp_wmb(); > + wmb(); > atomic_set(&data.gate,1); > > /* do our MTRR business */ > @@ -271,7 +271,7 @@ static void set_mtrr(unsigned int reg, u > cpu_relax(); > > atomic_set(&data.count, nr_cpus); > - smp_wmb(); > + wmb(); > atomic_set(&data.gate,0); > > /* > diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/arch/x86/irq.c > --- a/xen/arch/x86/irq.c > +++ b/xen/arch/x86/irq.c > @@ -207,7 +207,7 @@ static void dynamic_irq_cleanup(unsigned > spin_unlock_irqrestore(&desc->lock, flags); > > /* Wait to make sure it''s not being used on another CPU */ > - do { smp_mb(); } while ( desc->status & IRQ_INPROGRESS ); > + do { mb(); } while ( desc->status & IRQ_INPROGRESS ); > > if (action) > xfree(action); > @@ -931,7 +931,7 @@ void __init release_irq(unsigned int irq > spin_unlock_irqrestore(&desc->lock,flags); > > /* Wait to make sure it''s not being used on another CPU */ > - do { smp_mb(); } while ( desc->status & IRQ_INPROGRESS ); > + do { mb(); } while ( desc->status & IRQ_INPROGRESS ); > > if (action && action->free_on_release) > xfree(action); > diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/common/domain.c > --- a/xen/common/domain.c > +++ b/xen/common/domain.c > @@ -544,7 +544,7 @@ void domain_shutdown(struct domain *d, u > > d->is_shutting_down = 1; > > - smp_mb(); /* set shutdown status /then/ check for per-cpu deferrals */ > + mb(); /* set shutdown status /then/ check for per-cpu deferrals */ > > for_each_vcpu ( d, v ) > { > @@ -594,7 +594,7 @@ int vcpu_start_shutdown_deferral(struct > return 1; > > v->defer_shutdown = 1; > - smp_mb(); /* set deferral status /then/ check for shutdown */ > + mb(); /* set deferral status /then/ check for shutdown */ > if ( unlikely(v->domain->is_shutting_down) ) > vcpu_check_shutdown(v); > > @@ -604,7 +604,7 @@ int vcpu_start_shutdown_deferral(struct > void vcpu_end_shutdown_deferral(struct vcpu *v) > { > v->defer_shutdown = 0; > - smp_mb(); /* clear deferral status /then/ check for shutdown */ > + mb(); /* clear deferral status /then/ check for shutdown */ > if ( unlikely(v->domain->is_shutting_down) ) > vcpu_check_shutdown(v); > } > diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/common/rcupdate.c > --- a/xen/common/rcupdate.c > +++ b/xen/common/rcupdate.c > @@ -252,7 +252,7 @@ static void rcu_start_batch(struct rcu_c > * next_pending == 0 must be visible in > * __rcu_process_callbacks() before it can see new value of cur. > */ > - smp_wmb(); > + wmb(); > rcp->cur++; > > cpumask_copy(&rcp->cpumask, &cpu_online_map); > @@ -340,7 +340,7 @@ static void __rcu_process_callbacks(stru > /* see the comment and corresponding wmb() in > * the rcu_start_batch() > */ > - smp_rmb(); > + rmb(); > > if (!rcp->next_pending) { > /* and start it/schedule start if it''s a new batch */ > diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/common/schedule.c > --- a/xen/common/schedule.c > +++ b/xen/common/schedule.c > @@ -657,7 +657,7 @@ static long do_poll(struct sched_poll *s > > #ifndef CONFIG_X86 /* set_bit() implies mb() on x86 */ > /* Check for events /after/ setting flags: avoids wakeup waiting race. > */ > - smp_mb(); > + mb(); > > /* > * Someone may have seen we are blocked but not that we are polling, or > @@ -1173,12 +1173,12 @@ static void schedule(void) > void context_saved(struct vcpu *prev) > { > /* Clear running flag /after/ writing context to memory. */ > - smp_wmb(); > + wmb(); > > prev->is_running = 0; > > /* Check for migration request /after/ clearing running flag. */ > - smp_mb(); > + mb(); > > SCHED_OP(VCPU2OP(prev), context_saved, prev); > > diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/common/stop_machine.c > --- a/xen/common/stop_machine.c > +++ b/xen/common/stop_machine.c > @@ -59,7 +59,7 @@ static DEFINE_SPINLOCK(stopmachine_lock) > static void stopmachine_set_state(enum stopmachine_state state) > { > atomic_set(&stopmachine_data.done, 0); > - smp_wmb(); > + wmb(); > stopmachine_data.state = state; > } > > @@ -99,7 +99,7 @@ int stop_machine_run(int (*fn)(void *), > atomic_set(&stopmachine_data.done, 0); > stopmachine_data.state = STOPMACHINE_START; > > - smp_wmb(); > + wmb(); > > for_each_cpu ( i, &allbutself ) > tasklet_schedule_on_cpu(&per_cpu(stopmachine_tasklet, i), i); > @@ -134,7 +134,7 @@ static void stopmachine_action(unsigned > > BUG_ON(cpu != smp_processor_id()); > > - smp_mb(); > + mb(); > > while ( state != STOPMACHINE_EXIT ) > { > @@ -157,7 +157,7 @@ static void stopmachine_action(unsigned > break; > } > > - smp_mb(); > + mb(); > atomic_inc(&stopmachine_data.done); > } > > diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/include/asm-x86/system.h > --- a/xen/include/asm-x86/system.h > +++ b/xen/include/asm-x86/system.h > @@ -154,10 +154,6 @@ static always_inline unsigned long __cmp > #define rmb() barrier() > #define wmb() barrier() > > -#define smp_mb() mb() > -#define smp_rmb() rmb() > -#define smp_wmb() wmb() > - > #define set_mb(var, value) do { xchg(&var, value); } while (0) > #define set_wmb(var, value) do { var = value; wmb(); } while (0) > > diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/include/xen/list.h > --- a/xen/include/xen/list.h > +++ b/xen/include/xen/list.h > @@ -102,7 +102,7 @@ static inline void __list_add_rcu(struct > { > new->next = next; > new->prev = prev; > - smp_wmb(); > + wmb(); > next->prev = new; > prev->next = new; > } > @@ -244,7 +244,7 @@ static inline void list_replace_rcu(stru > { > new->next = old->next; > new->prev = old->prev; > - smp_wmb(); > + wmb(); > new->next->prev = new; > new->prev->next = new; > old->prev = LIST_POISON2; > @@ -712,7 +712,7 @@ static inline void hlist_replace_rcu(str > > new->next = next; > new->pprev = old->pprev; > - smp_wmb(); > + wmb(); > if (next) > new->next->pprev = &new->next; > *new->pprev = new; > @@ -754,7 +754,7 @@ static inline void hlist_add_head_rcu(st > struct hlist_node *first = h->first; > n->next = first; > n->pprev = &h->first; > - smp_wmb(); > + wmb(); > if (first) > first->pprev = &n->next; > h->first = n; > @@ -804,7 +804,7 @@ static inline void hlist_add_before_rcu( > { > n->pprev = next->pprev; > n->next = next; > - smp_wmb(); > + wmb(); > next->pprev = &n->next; > *(n->pprev) = n; > } > @@ -832,7 +832,7 @@ static inline void hlist_add_after_rcu(s > { > n->next = prev->next; > n->pprev = &prev->next; > - smp_wmb(); > + wmb(); > prev->next = n; > if (n->next) > n->next->pprev = &n->next; > diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/include/xen/rcupdate.h > --- a/xen/include/xen/rcupdate.h > +++ b/xen/include/xen/rcupdate.h > @@ -136,7 +136,7 @@ typedef struct _rcu_read_lock rcu_read_l > * call documents which pointers will be dereferenced by RCU read-side > * code. > */ > -#define rcu_assign_pointer(p, v) ({ smp_wmb(); (p) = (v); }) > +#define rcu_assign_pointer(p, v) ({ wmb(); (p) = (v); }) > > void rcu_init(void); > void rcu_check_callbacks(int cpu); > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xensource.com > http://lists.xensource.com/xen-devel
>>> On 08.02.12 at 18:22, Andrew Cooper <andrew.cooper3@citrix.com> wrote:Doing this in x86 code is perhaps fine; you shouldn''t do this in common code though (ia64, for example, allows [at least theoretically] to be built non-SMP, even though particularly on that architecture this seems to make very little sense). Jan
Jan Beulich
2012-Feb-09 11:03 UTC
Re: [PATCH 4 of 4] CONFIG: remove #ifdef __ia64__ from the x86 arch tree
>>> On 08.02.12 at 18:24, Andrew Cooper <andrew.cooper3@citrix.com> wrote: >@@ -227,12 +223,6 @@ static int vioapic_write( > vioapic_write_indirect(vioapic, length, val); > break; > >-#if VIOAPIC_IS_IOSAPIC >- case VIOAPIC_REG_EOI: >- vioapic_update_EOI(v->domain, val); >- break; >-#endif >-Would you mind keeping that code, putting the call inside a conditional checking VIOAPIC_VERSION_ID >= 0x20? Jan> default: > break; > }
On 09/02/12 10:54, Jan Beulich wrote:>>>> On 08.02.12 at 18:22, Andrew Cooper <andrew.cooper3@citrix.com> wrote: > Doing this in x86 code is perhaps fine; you shouldn''t do this in common > code though (ia64, for example, allows [at least theoretically] to be > built non-SMP, even though particularly on that architecture this seems > to make very little sense). > > JanIm some ways, that is the same as x86 !CONFIG_SMP support; theoretically sensible but we never use it. Is !SMP ''supported'' for IA64 in any way? -- Andrew Cooper - Dom0 Kernel Engineer, Citrix XenServer T: +44 (0)1223 225 900, http://www.citrix.com
Andrew Cooper
2012-Feb-09 11:52 UTC
Re: [PATCH 4 of 4] CONFIG: remove #ifdef __ia64__ from the x86 arch tree
On 09/02/12 11:03, Jan Beulich wrote:>>>> On 08.02.12 at 18:24, Andrew Cooper <andrew.cooper3@citrix.com> wrote: >> @@ -227,12 +223,6 @@ static int vioapic_write( >> vioapic_write_indirect(vioapic, length, val); >> break; >> >> -#if VIOAPIC_IS_IOSAPIC >> - case VIOAPIC_REG_EOI: >> - vioapic_update_EOI(v->domain, val); >> - break; >> -#endif >> - > Would you mind keeping that code, putting the call inside a conditional > checking VIOAPIC_VERSION_ID >= 0x20? > > JanYes - In actual fact, I considered the same just after I emailed this patch. I shall respin. ~Andrew>> default: >> break; >> }-- Andrew Cooper - Dom0 Kernel Engineer, Citrix XenServer T: +44 (0)1223 225 900, http://www.citrix.com
Keir Fraser
2012-Feb-09 12:42 UTC
Re: [PATCH 2 of 4] CONFIG: remove smp barrier definitions
On 09/02/2012 02:49, "Jan Beulich" <JBeulich@suse.com> wrote:>>>> On 08.02.12 at 17:45, Andrew Cooper <andrew.cooper3@citrix.com> wrote: >> Now CONFIG_SMP has been removed, there is no need to define >> smp_{,r,w}mb()s which used to conditionally compiled to different >> operations (even though those conditonally different operations still >> ended up as simple barrier()s) >> >> Therefore, remove smp_{,r,w}mb()s and just use regular {,r,w}mb()s > > Did you read the Linux side description and usage guidelines before > doing this? I don''t think doing the adjustment here is a good idea, > even if the smp_ ones are aliases of the plain ones (which doesn''t > necessarily have to the case on any future architectures Xen might > get ported to).In that they can document barriers used on shared memory versus I/O memory, perhaps worth keeping the smp_* variants. -- Keir> Jan > >> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com> >> >> diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/arch/x86/acpi/cpu_idle.c >> --- a/xen/arch/x86/acpi/cpu_idle.c >> +++ b/xen/arch/x86/acpi/cpu_idle.c >> @@ -260,7 +260,7 @@ static void mwait_idle_with_hints(unsign >> s_time_t expires = per_cpu(timer_deadline, cpu); >> >> __monitor((void *)&mwait_wakeup(cpu), 0, 0); >> - smp_mb(); >> + mb(); >> >> /* >> * Timer deadline passing is the event on which we will be woken via >> diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/arch/x86/cpu/mtrr/main.c >> --- a/xen/arch/x86/cpu/mtrr/main.c >> +++ b/xen/arch/x86/cpu/mtrr/main.c >> @@ -248,7 +248,7 @@ static void set_mtrr(unsigned int reg, u >> >> /* ok, reset count and toggle gate */ >> atomic_set(&data.count, nr_cpus); >> - smp_wmb(); >> + wmb(); >> atomic_set(&data.gate,1); >> >> /* do our MTRR business */ >> @@ -271,7 +271,7 @@ static void set_mtrr(unsigned int reg, u >> cpu_relax(); >> >> atomic_set(&data.count, nr_cpus); >> - smp_wmb(); >> + wmb(); >> atomic_set(&data.gate,0); >> >> /* >> diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/arch/x86/irq.c >> --- a/xen/arch/x86/irq.c >> +++ b/xen/arch/x86/irq.c >> @@ -207,7 +207,7 @@ static void dynamic_irq_cleanup(unsigned >> spin_unlock_irqrestore(&desc->lock, flags); >> >> /* Wait to make sure it''s not being used on another CPU */ >> - do { smp_mb(); } while ( desc->status & IRQ_INPROGRESS ); >> + do { mb(); } while ( desc->status & IRQ_INPROGRESS ); >> >> if (action) >> xfree(action); >> @@ -931,7 +931,7 @@ void __init release_irq(unsigned int irq >> spin_unlock_irqrestore(&desc->lock,flags); >> >> /* Wait to make sure it''s not being used on another CPU */ >> - do { smp_mb(); } while ( desc->status & IRQ_INPROGRESS ); >> + do { mb(); } while ( desc->status & IRQ_INPROGRESS ); >> >> if (action && action->free_on_release) >> xfree(action); >> diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/common/domain.c >> --- a/xen/common/domain.c >> +++ b/xen/common/domain.c >> @@ -544,7 +544,7 @@ void domain_shutdown(struct domain *d, u >> >> d->is_shutting_down = 1; >> >> - smp_mb(); /* set shutdown status /then/ check for per-cpu deferrals */ >> + mb(); /* set shutdown status /then/ check for per-cpu deferrals */ >> >> for_each_vcpu ( d, v ) >> { >> @@ -594,7 +594,7 @@ int vcpu_start_shutdown_deferral(struct >> return 1; >> >> v->defer_shutdown = 1; >> - smp_mb(); /* set deferral status /then/ check for shutdown */ >> + mb(); /* set deferral status /then/ check for shutdown */ >> if ( unlikely(v->domain->is_shutting_down) ) >> vcpu_check_shutdown(v); >> >> @@ -604,7 +604,7 @@ int vcpu_start_shutdown_deferral(struct >> void vcpu_end_shutdown_deferral(struct vcpu *v) >> { >> v->defer_shutdown = 0; >> - smp_mb(); /* clear deferral status /then/ check for shutdown */ >> + mb(); /* clear deferral status /then/ check for shutdown */ >> if ( unlikely(v->domain->is_shutting_down) ) >> vcpu_check_shutdown(v); >> } >> diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/common/rcupdate.c >> --- a/xen/common/rcupdate.c >> +++ b/xen/common/rcupdate.c >> @@ -252,7 +252,7 @@ static void rcu_start_batch(struct rcu_c >> * next_pending == 0 must be visible in >> * __rcu_process_callbacks() before it can see new value of cur. >> */ >> - smp_wmb(); >> + wmb(); >> rcp->cur++; >> >> cpumask_copy(&rcp->cpumask, &cpu_online_map); >> @@ -340,7 +340,7 @@ static void __rcu_process_callbacks(stru >> /* see the comment and corresponding wmb() in >> * the rcu_start_batch() >> */ >> - smp_rmb(); >> + rmb(); >> >> if (!rcp->next_pending) { >> /* and start it/schedule start if it''s a new batch */ >> diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/common/schedule.c >> --- a/xen/common/schedule.c >> +++ b/xen/common/schedule.c >> @@ -657,7 +657,7 @@ static long do_poll(struct sched_poll *s >> >> #ifndef CONFIG_X86 /* set_bit() implies mb() on x86 */ >> /* Check for events /after/ setting flags: avoids wakeup waiting race. >> */ >> - smp_mb(); >> + mb(); >> >> /* >> * Someone may have seen we are blocked but not that we are polling, or >> @@ -1173,12 +1173,12 @@ static void schedule(void) >> void context_saved(struct vcpu *prev) >> { >> /* Clear running flag /after/ writing context to memory. */ >> - smp_wmb(); >> + wmb(); >> >> prev->is_running = 0; >> >> /* Check for migration request /after/ clearing running flag. */ >> - smp_mb(); >> + mb(); >> >> SCHED_OP(VCPU2OP(prev), context_saved, prev); >> >> diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/common/stop_machine.c >> --- a/xen/common/stop_machine.c >> +++ b/xen/common/stop_machine.c >> @@ -59,7 +59,7 @@ static DEFINE_SPINLOCK(stopmachine_lock) >> static void stopmachine_set_state(enum stopmachine_state state) >> { >> atomic_set(&stopmachine_data.done, 0); >> - smp_wmb(); >> + wmb(); >> stopmachine_data.state = state; >> } >> >> @@ -99,7 +99,7 @@ int stop_machine_run(int (*fn)(void *), >> atomic_set(&stopmachine_data.done, 0); >> stopmachine_data.state = STOPMACHINE_START; >> >> - smp_wmb(); >> + wmb(); >> >> for_each_cpu ( i, &allbutself ) >> tasklet_schedule_on_cpu(&per_cpu(stopmachine_tasklet, i), i); >> @@ -134,7 +134,7 @@ static void stopmachine_action(unsigned >> >> BUG_ON(cpu != smp_processor_id()); >> >> - smp_mb(); >> + mb(); >> >> while ( state != STOPMACHINE_EXIT ) >> { >> @@ -157,7 +157,7 @@ static void stopmachine_action(unsigned >> break; >> } >> >> - smp_mb(); >> + mb(); >> atomic_inc(&stopmachine_data.done); >> } >> >> diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/include/asm-x86/system.h >> --- a/xen/include/asm-x86/system.h >> +++ b/xen/include/asm-x86/system.h >> @@ -154,10 +154,6 @@ static always_inline unsigned long __cmp >> #define rmb() barrier() >> #define wmb() barrier() >> >> -#define smp_mb() mb() >> -#define smp_rmb() rmb() >> -#define smp_wmb() wmb() >> - >> #define set_mb(var, value) do { xchg(&var, value); } while (0) >> #define set_wmb(var, value) do { var = value; wmb(); } while (0) >> >> diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/include/xen/list.h >> --- a/xen/include/xen/list.h >> +++ b/xen/include/xen/list.h >> @@ -102,7 +102,7 @@ static inline void __list_add_rcu(struct >> { >> new->next = next; >> new->prev = prev; >> - smp_wmb(); >> + wmb(); >> next->prev = new; >> prev->next = new; >> } >> @@ -244,7 +244,7 @@ static inline void list_replace_rcu(stru >> { >> new->next = old->next; >> new->prev = old->prev; >> - smp_wmb(); >> + wmb(); >> new->next->prev = new; >> new->prev->next = new; >> old->prev = LIST_POISON2; >> @@ -712,7 +712,7 @@ static inline void hlist_replace_rcu(str >> >> new->next = next; >> new->pprev = old->pprev; >> - smp_wmb(); >> + wmb(); >> if (next) >> new->next->pprev = &new->next; >> *new->pprev = new; >> @@ -754,7 +754,7 @@ static inline void hlist_add_head_rcu(st >> struct hlist_node *first = h->first; >> n->next = first; >> n->pprev = &h->first; >> - smp_wmb(); >> + wmb(); >> if (first) >> first->pprev = &n->next; >> h->first = n; >> @@ -804,7 +804,7 @@ static inline void hlist_add_before_rcu( >> { >> n->pprev = next->pprev; >> n->next = next; >> - smp_wmb(); >> + wmb(); >> next->pprev = &n->next; >> *(n->pprev) = n; >> } >> @@ -832,7 +832,7 @@ static inline void hlist_add_after_rcu(s >> { >> n->next = prev->next; >> n->pprev = &prev->next; >> - smp_wmb(); >> + wmb(); >> prev->next = n; >> if (n->next) >> n->next->pprev = &n->next; >> diff -r 101b0d7ebb00 -r 957b5ac44e32 xen/include/xen/rcupdate.h >> --- a/xen/include/xen/rcupdate.h >> +++ b/xen/include/xen/rcupdate.h >> @@ -136,7 +136,7 @@ typedef struct _rcu_read_lock rcu_read_l >> * call documents which pointers will be dereferenced by RCU read-side >> * code. >> */ >> -#define rcu_assign_pointer(p, v) ({ smp_wmb(); (p) = (v); }) >> +#define rcu_assign_pointer(p, v) ({ wmb(); (p) = (v); }) >> >> void rcu_init(void); >> void rcu_check_callbacks(int cpu); >> >> _______________________________________________ >> Xen-devel mailing list >> Xen-devel@lists.xensource.com >> http://lists.xensource.com/xen-devel > >
Andrew Cooper
2012-Feb-09 13:08 UTC
Re: [PATCH 4 of 4] CONFIG: remove #ifdef __ia64__ from the x86 arch tree
Version 3 attached. It keeps the EOI register function, protected by an a check on the IOAPIC version. -- Andrew Cooper - Dom0 Kernel Engineer, Citrix XenServer T: +44 (0)1223 225 900, http://www.citrix.com _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
>>> On 09.02.12 at 12:51, Andrew Cooper <andrew.cooper3@citrix.com> wrote: > On 09/02/12 10:54, Jan Beulich wrote: >>>>> On 08.02.12 at 18:22, Andrew Cooper <andrew.cooper3@citrix.com> wrote: >> Doing this in x86 code is perhaps fine; you shouldn''t do this in common >> code though (ia64, for example, allows [at least theoretically] to be >> built non-SMP, even though particularly on that architecture this seems >> to make very little sense). >> >> Jan > > Im some ways, that is the same as x86 !CONFIG_SMP support; theoretically > sensible but we never use it. > > Is !SMP ''supported'' for IA64 in any way?Probably not, but then again the whole ia64 port apparently isn''t supported in any way these days. All I was hinting at is that their config.h does have provisions for !SMP. Jan
Jan Beulich
2012-Feb-09 14:58 UTC
Re: [PATCH 4 of 4] CONFIG: remove #ifdef __ia64__ from the x86 arch tree
>>> On 09.02.12 at 14:08, Andrew Cooper <andrew.cooper3@citrix.com> wrote: > Version 3 attached. > > It keeps the EOI register function, protected by an a check on the > IOAPIC version.Acked-by: Jan Beulich <jbeulich@suse.com>> -- > Andrew Cooper - Dom0 Kernel Engineer, Citrix XenServer > T: +44 (0)1223 225 900, http://www.citrix.com