Displaying 20 results from an estimated 25 matches for "_asm_align".
2019 Sep 26
2
An error of asm goto occured while compiling Linux kernel 5.3
...x86/include/asm/jump_table.h.
The source code is
static __always_inline bool arch_static_branch(struct static_key *key,
bool branch)
{
asm_volatile_goto("1:"
".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
".pushsection __jump_table, \"aw\" \n\t"
_ASM_ALIGN "\n\t"
".long 1b - ., %l[l_yes] - . \n\t"
_ASM_PTR "%c0 + %c1 - .\n\t"
".popsection \n\t"
: : "i" (key), "i" (branch) : : l_yes);
return false;
l_yes:
return true;
}
The error pointed to asm_volatile_goto("1:"
I'm not sure...
2018 Dec 17
3
[PATCH v3 00/12] x86, kbuild: revert macrofying inline assembly code
This series reverts the in-kernel workarounds for inlining issues.
The commit description of 77b0bf55bc67 mentioned
"We also hope that GCC will eventually get fixed,..."
Now, GCC provides a solution.
https://gcc.gnu.org/onlinedocs/gcc/Extended-Asm.html
explains the new "asm inline" syntax.
The performance issue will be eventually solved.
[About Code cleanups]
I know Nadam
2018 Dec 17
3
[PATCH v3 00/12] x86, kbuild: revert macrofying inline assembly code
This series reverts the in-kernel workarounds for inlining issues.
The commit description of 77b0bf55bc67 mentioned
"We also hope that GCC will eventually get fixed,..."
Now, GCC provides a solution.
https://gcc.gnu.org/onlinedocs/gcc/Extended-Asm.html
explains the new "asm inline" syntax.
The performance issue will be eventually solved.
[About Code cleanups]
I know Nadam
2018 Dec 17
0
[PATCH v3 04/12] Revert "x86/paravirt: Work around GCC inlining bugs when compiling paravirt ops"
...enerate some code, and mark it as patchable by the
+ * apply_paravirt() alternate instruction patcher.
+ */
+#define _paravirt_alt(insn_string, type, clobber) \
+ "771:\n\t" insn_string "\n" "772:\n" \
+ ".pushsection .parainstructions,\"a\"\n" \
+ _ASM_ALIGN "\n" \
+ _ASM_PTR " 771b\n" \
+ " .byte " type "\n" \
+ " .byte 772b-771b\n" \
+ " .short " clobber "\n" \
+ ".popsection\n"
+
/* Generate patchable code, with the default asm parameters. */
-#define...
2017 Oct 04
0
[PATCH 08/13] x86/paravirt: Clean up paravirt_types.h
...enerate some code, and mark it as patchable by the
- * apply_paravirt() alternate instruction patcher.
- */
-#define _paravirt_alt(insn_string, type, clobber) \
- "771:\n\t" insn_string "\n" "772:\n" \
- ".pushsection .parainstructions,\"a\"\n" \
- _ASM_ALIGN "\n" \
- _ASM_PTR " 771b\n" \
- " .byte " type "\n" \
- " .byte 772b-771b\n" \
- " .short " clobber "\n" \
- ".popsection\n"
-
-/* Generate patchable code, with the default asm parameters. */
-#define...
2018 Dec 13
2
[PATCH] kbuild, x86: revert macros in extended asm workarounds
...c0\" "
- "branch=\"%c1\""
- : : "i" (key), "i" (branch) : : l_yes);
+ asm_volatile_goto("1:"
+ ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
+ ".pushsection __jump_table, \"aw\" \n\t"
+ _ASM_ALIGN "\n\t"
+ ".long 1b - ., %l[l_yes] - . \n\t"
+ _ASM_PTR "%c0 + %c1 - .\n\t"
+ ".popsection \n\t"
+ : : "i" (key), "i" (branch) : : l_yes);
+
return false;
l_yes:
return true;
@@ -30,8 +49,14 @@ static __always_inline bool arch_sta...
2018 Dec 13
2
[PATCH] kbuild, x86: revert macros in extended asm workarounds
...c0\" "
- "branch=\"%c1\""
- : : "i" (key), "i" (branch) : : l_yes);
+ asm_volatile_goto("1:"
+ ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
+ ".pushsection __jump_table, \"aw\" \n\t"
+ _ASM_ALIGN "\n\t"
+ ".long 1b - ., %l[l_yes] - . \n\t"
+ _ASM_PTR "%c0 + %c1 - .\n\t"
+ ".popsection \n\t"
+ : : "i" (key), "i" (branch) : : l_yes);
+
return false;
l_yes:
return true;
@@ -30,8 +49,14 @@ static __always_inline bool arch_sta...
2018 Dec 19
0
[PATCH v3 00/12] x86, kbuild: revert macrofying inline assembly code
...:
+.macro STATIC_JUMP_IF_TRUE target, key, def
+.Lstatic_jump_\@:
+ .if \def
+ /* Equivalent to "jmp.d32 \target" */
+ .byte 0xe9
+ .long \target - .Lstatic_jump_after_\@
+.Lstatic_jump_after_\@:
+ .else
+ .byte STATIC_KEY_INIT_NOP
+ .endif
.pushsection __jump_table, "aw"
_ASM_ALIGN
- .long .Lstatic_branch_nop_\@ - ., \l_yes - .
- _ASM_PTR \key + \branch - .
+ .long .Lstatic_jump_\@ - ., \target - .
+ _ASM_PTR \key - .
.popsection
.endm
-.macro STATIC_BRANCH_JMP l_yes:req key:req branch:req
-.Lstatic_branch_jmp_\@:
- .byte 0xe9
- .long \l_yes - .Lstatic_branch_jm...
2018 Dec 16
1
[PATCH v2] x86, kbuild: revert macrofying inline assembly code
...c0\" "
- "branch=\"%c1\""
- : : "i" (key), "i" (branch) : : l_yes);
+ asm_volatile_goto("1:"
+ ".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
+ ".pushsection __jump_table, \"aw\" \n\t"
+ _ASM_ALIGN "\n\t"
+ ".long 1b - ., %l[l_yes] - . \n\t"
+ _ASM_PTR "%c0 + %c1 - .\n\t"
+ ".popsection \n\t"
+ : : "i" (key), "i" (branch) : : l_yes);
+
return false;
l_yes:
return true;
@@ -30,8 +36,14 @@ static __always_inline bool arch_sta...
2018 Jun 11
0
[PATCH v3 6/9] x86: prevent inline distortion by paravirt ops
...o PARAVIRT_ALT type:req clobber:req pv_opptr:req
Unlike the marcro maze you replaced, this has the CALL hardcoded in. So
maybe name this PARAVIRT_CALL instead of PARAVIRT_ALT ?
> +771: ANNOTATE_RETPOLINE_SAFE
> + call *\pv_opptr
> +772: .pushsection .parainstructions,"a"
> + _ASM_ALIGN
> + _ASM_PTR 771b
> + .byte \type
> + .byte 772b-771b
> + .short \clobber
> + .popsection
> +.endm
2017 Oct 04
1
[PATCH 11/13] x86/paravirt: Add paravirt alternatives infrastructure
...ude/asm/paravirt-asm.h
@@ -21,6 +21,16 @@
.short clobbers; \
.popsection
+#define PV_ALT_SITE(oldinstr, newinstr, ops, off, clobbers) \
+ __ALTERNATIVE(.pv_altinstructions, oldinstr, newinstr, \
+ X86_FEATURE_PV_OPS); \
+ .pushsection .parainstructions, "a"; \
+ _ASM_ALIGN; \
+ _ASM_PTR 140b; \
+ .byte PV_TYPE(ops, off); \
+ .byte 142b-140b; \
+ .short clobbers; \
+ .popsection
#define COND_PUSH(set, mask, reg) \
.if ((~(set)) & mask); push %reg; .endif
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/para...
2023 Jun 08
3
[RFC PATCH 0/3] x86/paravirt: Get rid of paravirt patching
This is a small series getting rid of paravirt patching by switching
completely to alternative patching for the same functionality.
The basic idea is to add the capability to switch from indirect to
direct calls via a special alternative patching option.
This removes _some_ of the paravirt macro maze, but most of it needs
to stay due to the need of hiding the call instructions from the
compiler
2023 Jun 08
3
[RFC PATCH 0/3] x86/paravirt: Get rid of paravirt patching
This is a small series getting rid of paravirt patching by switching
completely to alternative patching for the same functionality.
The basic idea is to add the capability to switch from indirect to
direct calls via a special alternative patching option.
This removes _some_ of the paravirt macro maze, but most of it needs
to stay due to the need of hiding the call instructions from the
compiler
2017 Oct 04
0
[PATCH 06/13] x86/paravirt: Clean up paravirt-asm.h
...e ptype; \
- .byte 772b-771b; \
- .short clobbers; \
+#define PV_TYPE(ops, off) ((PARAVIRT_PATCH_##ops + (off)) / __ASM_SEL(4, 8))
+
+#define PV_SITE(insns, ops, off, clobbers) \
+771:; \
+ insns; \
+772:; \
+ .pushsection .parainstructions, "a"; \
+ _ASM_ALIGN; \
+ _ASM_PTR 771b; \
+ .byte PV_TYPE(ops, off); \
+ .byte 772b-771b; \
+ .short clobbers; \
.popsection
@@ -33,62 +35,65 @@
COND_PUSH(set, CLBR_RDX, rdx); \
COND_PUSH(set, CLBR_RSI, rsi); \
COND_PUSH(set, CLBR_RDI, rdi); \
- COND_PUSH(set, CLBR_R8, r8);...
2017 Oct 04
31
[PATCH 00/13] x86/paravirt: Make pv ops code generation more closely match reality
This changes the pv ops code generation to more closely match reality.
For example, instead of:
callq *0xffffffff81e3a400 (pv_irq_ops.save_fl)
vmlinux will now show:
pushfq
pop %rax
nop
nop
nop
nop
nop
which is what the runtime version of the code will show in most cases.
This idea was suggested by Andy Lutomirski.
The benefits are:
- For the most common runtime cases
2017 Oct 04
31
[PATCH 00/13] x86/paravirt: Make pv ops code generation more closely match reality
This changes the pv ops code generation to more closely match reality.
For example, instead of:
callq *0xffffffff81e3a400 (pv_irq_ops.save_fl)
vmlinux will now show:
pushfq
pop %rax
nop
nop
nop
nop
nop
which is what the runtime version of the code will show in most cases.
This idea was suggested by Andy Lutomirski.
The benefits are:
- For the most common runtime cases
2007 Dec 20
6
[PATCH 0/15] adjust pvops to accomodate its x86_64 variant
Hi folks,
With this series, the bulk of the work of pvops64 is done.
Here, I integrate most of the paravirt.c and paravirt.h files, making
them applicable to both architectures.
CONFIG_PARAVIRT is _not_ present yet. Basically, this code is missing page
table integration (patches currently being worked on by Jeremy).
Enjoy
2007 Dec 20
6
[PATCH 0/15] adjust pvops to accomodate its x86_64 variant
Hi folks,
With this series, the bulk of the work of pvops64 is done.
Here, I integrate most of the paravirt.c and paravirt.h files, making
them applicable to both architectures.
CONFIG_PARAVIRT is _not_ present yet. Basically, this code is missing page
table integration (patches currently being worked on by Jeremy).
Enjoy
2018 Mar 13
32
[PATCH v2 00/27] x86: PIE support and option to extend KASLR randomization
Changes:
- patch v2:
- Adapt patch to work post KPTI and compiler changes
- Redo all performance testing with latest configs and compilers
- Simplify mov macro on PIE (MOVABS now)
- Reduce GOT footprint
- patch v1:
- Simplify ftrace implementation.
- Use gcc mstack-protector-guard-reg=%gs with PIE when possible.
- rfc v3:
- Use --emit-relocs instead of -pie to reduce
2018 Mar 13
32
[PATCH v2 00/27] x86: PIE support and option to extend KASLR randomization
Changes:
- patch v2:
- Adapt patch to work post KPTI and compiler changes
- Redo all performance testing with latest configs and compilers
- Simplify mov macro on PIE (MOVABS now)
- Reduce GOT footprint
- patch v1:
- Simplify ftrace implementation.
- Use gcc mstack-protector-guard-reg=%gs with PIE when possible.
- rfc v3:
- Use --emit-relocs instead of -pie to reduce