search for: __volatile__

Displaying 20 results from an estimated 334 matches for "__volatile__".

2007 Apr 18
0
[RFC, PATCH 13/24] i386 Vmi system header
...0000 -0800 @@ -9,6 +9,8 @@ #ifdef __KERNEL__ +#include <mach_system.h> + struct task_struct; /* one of the stranger aspects of C forward declarations.. */ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next)); @@ -83,69 +85,8 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" #define savesegment(seg, value) \ asm volatile("mov %%" #seg ",%0":"=rm" (value)) -/* - * Clear and set 'TS' bit respectively - */ -#define clts() __asm__ __volatile__ ("clts") -#define read_cr0() ({ \ - unsigned int...
2007 Apr 18
0
[RFC, PATCH 13/24] i386 Vmi system header
...0000 -0800 @@ -9,6 +9,8 @@ #ifdef __KERNEL__ +#include <mach_system.h> + struct task_struct; /* one of the stranger aspects of C forward declarations.. */ extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next)); @@ -83,69 +85,8 @@ __asm__ __volatile__ ("movw %%dx,%1\n\t" #define savesegment(seg, value) \ asm volatile("mov %%" #seg ",%0":"=rm" (value)) -/* - * Clear and set 'TS' bit respectively - */ -#define clts() __asm__ __volatile__ ("clts") -#define read_cr0() ({ \ - unsigned int...
2007 Apr 18
3
[PATCH 1/4] x86 paravirt_ops: create no_paravirt.h for native ops
...=== --- a/include/asm-i386/irqflags.h +++ b/include/asm-i386/irqflags.h @@ -9,62 +9,12 @@ */ #ifndef _ASM_IRQFLAGS_H #define _ASM_IRQFLAGS_H +#include <asm/paravirt.h> #ifndef __ASSEMBLY__ -static inline unsigned long __raw_local_save_flags(void) -{ - unsigned long flags; - - __asm__ __volatile__( - "pushfl ; popl %0" - : "=g" (flags) - : /* no input */ - ); - - return flags; -} - #define raw_local_save_flags(flags) \ do { (flags) = __raw_local_save_flags(); } while (0) - -static inline void raw_local_irq_restore(unsigned long flags) -{ - __asm__ __volatile__( -...
2007 Apr 18
3
[PATCH 1/4] x86 paravirt_ops: create no_paravirt.h for native ops
...=== --- a/include/asm-i386/irqflags.h +++ b/include/asm-i386/irqflags.h @@ -9,62 +9,12 @@ */ #ifndef _ASM_IRQFLAGS_H #define _ASM_IRQFLAGS_H +#include <asm/paravirt.h> #ifndef __ASSEMBLY__ -static inline unsigned long __raw_local_save_flags(void) -{ - unsigned long flags; - - __asm__ __volatile__( - "pushfl ; popl %0" - : "=g" (flags) - : /* no input */ - ); - - return flags; -} - #define raw_local_save_flags(flags) \ do { (flags) = __raw_local_save_flags(); } while (0) - -static inline void raw_local_irq_restore(unsigned long flags) -{ - __asm__ __volatile__( -...
2007 Apr 18
2
[RFC, PATCH 17/24] i386 Vmi msr patch
...H +#include <mach_msr.h> + /* * Access to machine-specific registers (available on 586 and better only) * Note: the rd* operations modify the parameters directly (without using * pointer indirection), this allows gcc to optimize better */ -#define rdmsr(msr,val1,val2) \ - __asm__ __volatile__("rdmsr" \ - : "=a" (val1), "=d" (val2) \ - : "c" (msr)) - -#define wrmsr(msr,val1,val2) \ - __asm__ __volatile__("wrmsr" \ - : /* no outputs */ \ - : "c" (msr), "a" (val1), "d" (val2)) - #define rdmsrl(...
2007 Apr 18
2
[RFC, PATCH 17/24] i386 Vmi msr patch
...H +#include <mach_msr.h> + /* * Access to machine-specific registers (available on 586 and better only) * Note: the rd* operations modify the parameters directly (without using * pointer indirection), this allows gcc to optimize better */ -#define rdmsr(msr,val1,val2) \ - __asm__ __volatile__("rdmsr" \ - : "=a" (val1), "=d" (val2) \ - : "c" (msr)) - -#define wrmsr(msr,val1,val2) \ - __asm__ __volatile__("wrmsr" \ - : /* no outputs */ \ - : "c" (msr), "a" (val1), "d" (val2)) - #define rdmsrl(...
2007 Apr 18
5
[PATCH] paravirt.h
...=== --- a/include/asm-i386/desc.h +++ b/include/asm-i386/desc.h @@ -64,6 +64,9 @@ static inline void pack_gate(__u32 *a, _ #define DESCTYPE_DPL3 0x60 /* DPL-3 */ #define DESCTYPE_S 0x10 /* !system */ +#ifdef CONFIG_PARAVIRT +#include <asm/paravirt.h> +#else #define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8)) #define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8)) @@ -98,6 +101,7 @@ static inline void write_dt_entry(void * #define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) #define...
2007 Apr 18
5
[PATCH] paravirt.h
...=== --- a/include/asm-i386/desc.h +++ b/include/asm-i386/desc.h @@ -64,6 +64,9 @@ static inline void pack_gate(__u32 *a, _ #define DESCTYPE_DPL3 0x60 /* DPL-3 */ #define DESCTYPE_S 0x10 /* !system */ +#ifdef CONFIG_PARAVIRT +#include <asm/paravirt.h> +#else #define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8)) #define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8)) @@ -98,6 +101,7 @@ static inline void write_dt_entry(void * #define write_ldt_entry(dt, entry, a, b) write_dt_entry(dt, entry, a, b) #define...
2007 Apr 18
2
[PATCH 1/3] Paravirtualization: Kernel Ring Cleanups
Hi all, I've been looking at finding common ground between the VMI, Xen and other paravirtualization approaches, and after some discussion, we're getting somewhere. These first two patches are the fundamentals, stolen mainly from the VMI patches: removing assumptions about the kernel running in ring 0, and macro-izing all the obvious para-virtualize-needing insns. The third patch is
2007 Apr 18
2
[PATCH 1/3] Paravirtualization: Kernel Ring Cleanups
Hi all, I've been looking at finding common ground between the VMI, Xen and other paravirtualization approaches, and after some discussion, we're getting somewhere. These first two patches are the fundamentals, stolen mainly from the VMI patches: removing assumptions about the kernel running in ring 0, and macro-izing all the obvious para-virtualize-needing insns. The third patch is
2015 Dec 30
0
[PATCH 07/34] sparc: reuse asm-generic/barrier.h
...<asm-generic/barrier.h> #endif /* !(__SPARC_BARRIER_H) */ diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h index 14a9286..26c3f72 100644 --- a/arch/sparc/include/asm/barrier_64.h +++ b/arch/sparc/include/asm/barrier_64.h @@ -37,25 +37,6 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ #define rmb() __asm__ __volatile__("":::"memory") #define wmb() __asm__ __volatile__("":::"memory") -#define dma_rmb() rmb() -#define dma_wmb() wmb() - -#define smp_store_mb(__var, __value) \ - do { WRITE_ONCE(__var, __va...
2015 Dec 31
0
[PATCH v2 07/32] sparc: reuse asm-generic/barrier.h
...<asm-generic/barrier.h> #endif /* !(__SPARC_BARRIER_H) */ diff --git a/arch/sparc/include/asm/barrier_64.h b/arch/sparc/include/asm/barrier_64.h index 14a9286..26c3f72 100644 --- a/arch/sparc/include/asm/barrier_64.h +++ b/arch/sparc/include/asm/barrier_64.h @@ -37,25 +37,6 @@ do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ #define rmb() __asm__ __volatile__("":::"memory") #define wmb() __asm__ __volatile__("":::"memory") -#define dma_rmb() rmb() -#define dma_wmb() wmb() - -#define smp_store_mb(__var, __value) \ - do { WRITE_ONCE(__var, __va...
2020 Jul 02
2
[PATCH 04/18] alpha: Override READ_ONCE() with barriered implementation
...sm/barrier.h > index 92ec486a4f9e..2ecd068d91d1 100644 > --- a/arch/alpha/include/asm/barrier.h > +++ b/arch/alpha/include/asm/barrier.h > @@ -2,64 +2,17 @@ > #ifndef __BARRIER_H > #define __BARRIER_H > > -#include <asm/compiler.h> > - > #define mb() __asm__ __volatile__("mb": : :"memory") > #define rmb() __asm__ __volatile__("mb": : :"memory") > #define wmb() __asm__ __volatile__("wmb": : :"memory") > -#define read_barrier_depends() __asm__ __volatile__("mb": : :"memory") &...
2020 Jul 02
2
[PATCH 04/18] alpha: Override READ_ONCE() with barriered implementation
...sm/barrier.h > index 92ec486a4f9e..2ecd068d91d1 100644 > --- a/arch/alpha/include/asm/barrier.h > +++ b/arch/alpha/include/asm/barrier.h > @@ -2,64 +2,17 @@ > #ifndef __BARRIER_H > #define __BARRIER_H > > -#include <asm/compiler.h> > - > #define mb() __asm__ __volatile__("mb": : :"memory") > #define rmb() __asm__ __volatile__("mb": : :"memory") > #define wmb() __asm__ __volatile__("wmb": : :"memory") > -#define read_barrier_depends() __asm__ __volatile__("mb": : :"memory") &...
2005 Apr 19
0
[PATCH][1/5] x86-64-eax.patch
...ned-Off-By: Arun Sharma <arun.sharma@intel.com> diff -Nru a/xen/arch/x86/vmx_vmcs.c b/xen/arch/x86/vmx_vmcs.c --- a/xen/arch/x86/vmx_vmcs.c 2005-04-18 16:49:37 -07:00 +++ b/xen/arch/x86/vmx_vmcs.c 2005-04-18 16:49:37 -07:00 @@ -187,7 +187,7 @@ vmx_setup_platform(ed, ec); - __asm__ __volatile__ ("sgdt (%%eax) \n" :: "a"(&desc) : "memory"); + __asm__ __volatile__ ("sgdt (%0) \n" :: "a"(&desc) : "memory"); host_env.gdtr_limit = desc.size; host_env.gdtr_base = desc.address; @@ -197,7 +197,7 @@ error |=...
2007 Apr 18
3
[PATCH 1/2] paravirt.h header
...clude/asm-i386/msr.h @@ -1,5 +1,9 @@ #ifndef __ASM_MSR_H #define __ASM_MSR_H + +#ifdef CONFIG_PARAVIRT +#include <asm/paravirt.h> +#else /* * Access to machine-specific registers (available on 586 and better only) @@ -77,6 +81,7 @@ static inline void wrmsrl (unsigned long __asm__ __volatile__("rdpmc" \ : "=a" (low), "=d" (high) \ : "c" (counter)) +#endif /* !CONFIG_PARAVIRT */ /* symbolic names for some interesting MSRs */ /* Intel defined MSRs. */ =================================================================== --- a/include/as...
2015 Dec 01
11
[PATCH 1/6] x86: Add VMWare Host Communication Macros
...OUT] e.g. channel id + * @si: [INOUT] set to 0 if not used + * @di: [INOUT] set to 0 if not used + * @bp: [INOUT] set to 0 if not used + */ +#define VMW_PORT(in1, in2, port_num, magic, eax, ebx, ecx, edx, si, di) \ +({ \ + __asm__ __volatile__ ("inl %%dx" : \ + "=a"(eax), \ + "=b"(ebx), \ + "=c"(ecx), \ + "=d"(edx),...
2015 Dec 01
11
[PATCH 1/6] x86: Add VMWare Host Communication Macros
...OUT] e.g. channel id + * @si: [INOUT] set to 0 if not used + * @di: [INOUT] set to 0 if not used + * @bp: [INOUT] set to 0 if not used + */ +#define VMW_PORT(in1, in2, port_num, magic, eax, ebx, ecx, edx, si, di) \ +({ \ + __asm__ __volatile__ ("inl %%dx" : \ + "=a"(eax), \ + "=b"(ebx), \ + "=c"(ecx), \ + "=d"(edx),...
2020 Jul 06
0
[PATCH v3 3/6] powerpc: move spinlock implementation to simple_spinlock
...turn !arch_spin_value_unlocked(*lock); +} + +/* + * This returns the old value in the lock, so we succeeded + * in getting the lock if the return value is 0. + */ +static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) +{ + unsigned long tmp, token; + + token = LOCK_TOKEN; + __asm__ __volatile__( +"1: " PPC_LWARX(%0,0,%2,1) "\n\ + cmpwi 0,%0,0\n\ + bne- 2f\n\ + stwcx. %1,0,%2\n\ + bne- 1b\n" + PPC_ACQUIRE_BARRIER +"2:" + : "=&r" (tmp) + : "r" (token), "r" (&lock->slock) + : "cr0", "memory"); + +...
2016 Jan 05
2
[PATCH v2 15/32] powerpc: define __smp_xxx
...--git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h > index 980ad0c..c0deafc 100644 > --- a/arch/powerpc/include/asm/barrier.h > +++ b/arch/powerpc/include/asm/barrier.h > @@ -44,19 +44,11 @@ > #define dma_rmb() __lwsync() > #define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") > > -#ifdef CONFIG_SMP > -#define smp_lwsync() __lwsync() > +#define __smp_lwsync() __lwsync() > so __smp_lwsync() is always mapped to lwsync, right? > -#define smp_mb() mb() > -#define smp_rmb() __lwsync() > -#define sm...