search for: asm_volatile_goto

Displaying 11 results from an estimated 11 matches for "asm_volatile_goto".

2019 Sep 26
2
An error of asm goto occured while compiling Linux kernel 5.3
...while compiling Linux kernel 5.3 to IR. My LLVM version is 9.0.0 release. This error said "invalid operand for inline asm constraint 'i'" in arch/x86/include/asm/jump_table.h. The source code is static __always_inline bool arch_static_branch(struct static_key *key, bool branch) { asm_volatile_goto("1:" ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t" ".pushsection __jump_table, \"aw\" \n\t" _ASM_ALIGN "\n\t" ".long 1b - ., %l[l_yes] - . \n\t" _ASM_PTR "%c0 + %c1 - .\n\t" ".popsection \n\t" : : "i...
2020 Jul 21
2
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
...&val, _Q_LOCKED_VAL))) + atomic_t *a = &lock->val; + u32 val; + +again: + asm volatile( +"1:\t" PPC_LWARX(%0,0,%1,1) " # queued_spin_lock \n" + : "=&r" (val) + : "r" (&a->counter) + : "memory"); + + if (likely(val == 0)) { + asm_volatile_goto( + " stwcx. %0,0,%1 \n" + " bne- %l[again] \n" + "\t" PPC_ACQUIRE_BARRIER " \n" + : + : "r"(_Q_LOCKED_VAL), "r" (&a->counter) + : "cr0", "memory" + : again ); return; - - queued_spin_lock_s...
2020 Jul 21
2
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
...&val, _Q_LOCKED_VAL))) + atomic_t *a = &lock->val; + u32 val; + +again: + asm volatile( +"1:\t" PPC_LWARX(%0,0,%1,1) " # queued_spin_lock \n" + : "=&r" (val) + : "r" (&a->counter) + : "memory"); + + if (likely(val == 0)) { + asm_volatile_goto( + " stwcx. %0,0,%1 \n" + " bne- %l[again] \n" + "\t" PPC_ACQUIRE_BARRIER " \n" + : + : "r"(_Q_LOCKED_VAL), "r" (&a->counter) + : "cr0", "memory" + : again ); return; - - queued_spin_lock_s...
2020 Jul 21
0
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
...t;val; > + u32 val; > + > +again: > + asm volatile( > +"1:\t" PPC_LWARX(%0,0,%1,1) " # queued_spin_lock \n" > + : "=&r" (val) > + : "r" (&a->counter) > + : "memory"); > + > + if (likely(val == 0)) { > + asm_volatile_goto( > + " stwcx. %0,0,%1 \n" > + " bne- %l[again] \n" > + "\t" PPC_ACQUIRE_BARRIER " \n" > + : > + : "r"(_Q_LOCKED_VAL), "r" (&a->counter) > + : "cr0", "memory" > + : again )...
2018 Dec 16
1
[PATCH v2] x86, kbuild: revert macrofying inline assembly code
...cessor.h> + +#if defined(__KERNEL__) && !defined(__ASSEMBLY__) + #include <asm/asm.h> #include <linux/bitops.h> @@ -161,10 +161,37 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit); */ static __always_inline __pure bool _static_cpu_has(u16 bit) { - asm_volatile_goto("STATIC_CPU_HAS bitnum=%[bitnum] " - "cap_byte=\"%[cap_byte]\" " - "feature=%P[feature] t_yes=%l[t_yes] " - "t_no=%l[t_no] always=%P[always]" + asm_volatile_goto("1: jmp 6f\n" + "2:\n" + ".skip -(((5f-4f) - (...
2018 Dec 13
2
[PATCH] kbuild, x86: revert macros in extended asm workarounds
...cessor.h> + +#if defined(__KERNEL__) && !defined(__ASSEMBLY__) + #include <asm/asm.h> #include <linux/bitops.h> @@ -161,10 +161,37 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit); */ static __always_inline __pure bool _static_cpu_has(u16 bit) { - asm_volatile_goto("STATIC_CPU_HAS bitnum=%[bitnum] " - "cap_byte=\"%[cap_byte]\" " - "feature=%P[feature] t_yes=%l[t_yes] " - "t_no=%l[t_no] always=%P[always]" + asm_volatile_goto("1: jmp 6f\n" + "2:\n" + ".skip -(((5f-4f) - (...
2018 Dec 13
2
[PATCH] kbuild, x86: revert macros in extended asm workarounds
...cessor.h> + +#if defined(__KERNEL__) && !defined(__ASSEMBLY__) + #include <asm/asm.h> #include <linux/bitops.h> @@ -161,10 +161,37 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit); */ static __always_inline __pure bool _static_cpu_has(u16 bit) { - asm_volatile_goto("STATIC_CPU_HAS bitnum=%[bitnum] " - "cap_byte=\"%[cap_byte]\" " - "feature=%P[feature] t_yes=%l[t_yes] " - "t_no=%l[t_no] always=%P[always]" + asm_volatile_goto("1: jmp 6f\n" + "2:\n" + ".skip -(((5f-4f) - (...
2020 Jul 23
2
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
...t; +again: >> + asm volatile( >> +"1:\t" PPC_LWARX(%0,0,%1,1) " # queued_spin_lock \n" >> + : "=&r" (val) >> + : "r" (&a->counter) >> + : "memory"); >> + >> + if (likely(val == 0)) { >> + asm_volatile_goto( >> + " stwcx. %0,0,%1 \n" >> + " bne- %l[again] \n" >> + "\t" PPC_ACQUIRE_BARRIER " \n" >> + : >> + : "r"(_Q_LOCKED_VAL), "r" (&a->counter) >> + : "cr0", "memory...
2020 Jul 23
2
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
...t; +again: >> + asm volatile( >> +"1:\t" PPC_LWARX(%0,0,%1,1) " # queued_spin_lock \n" >> + : "=&r" (val) >> + : "r" (&a->counter) >> + : "memory"); >> + >> + if (likely(val == 0)) { >> + asm_volatile_goto( >> + " stwcx. %0,0,%1 \n" >> + " bne- %l[again] \n" >> + "\t" PPC_ACQUIRE_BARRIER " \n" >> + : >> + : "r"(_Q_LOCKED_VAL), "r" (&a->counter) >> + : "cr0", "memory...
2020 Jul 07
6
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
Excerpts from Waiman Long's message of July 7, 2020 4:39 am: > On 7/6/20 12:35 AM, Nicholas Piggin wrote: >> v3 is updated to use __pv_queued_spin_unlock, noticed by Waiman (thank you). >> >> Thanks, >> Nick >> >> Nicholas Piggin (6): >> powerpc/powernv: must include hvcall.h to get PAPR defines >> powerpc/pseries: move some PAPR
2020 Jul 07
6
[PATCH v3 0/6] powerpc: queued spinlocks and rwlocks
Excerpts from Waiman Long's message of July 7, 2020 4:39 am: > On 7/6/20 12:35 AM, Nicholas Piggin wrote: >> v3 is updated to use __pv_queued_spin_unlock, noticed by Waiman (thank you). >> >> Thanks, >> Nick >> >> Nicholas Piggin (6): >> powerpc/powernv: must include hvcall.h to get PAPR defines >> powerpc/pseries: move some PAPR