Displaying 20 results from an estimated 27 matches for "_asm_ptr".
2018 Dec 17
3
[PATCH v3 00/12] x86, kbuild: revert macrofying inline assembly code
This series reverts the in-kernel workarounds for inlining issues.
The commit description of 77b0bf55bc67 mentioned
"We also hope that GCC will eventually get fixed,..."
Now, GCC provides a solution.
https://gcc.gnu.org/onlinedocs/gcc/Extended-Asm.html
explains the new "asm inline" syntax.
The performance issue will be eventually solved.
[About Code cleanups]
I know Nadam
2018 Dec 17
3
[PATCH v3 00/12] x86, kbuild: revert macrofying inline assembly code
This series reverts the in-kernel workarounds for inlining issues.
The commit description of 77b0bf55bc67 mentioned
"We also hope that GCC will eventually get fixed,..."
Now, GCC provides a solution.
https://gcc.gnu.org/onlinedocs/gcc/Extended-Asm.html
explains the new "asm inline" syntax.
The performance issue will be eventually solved.
[About Code cleanups]
I know Nadam
2013 Jan 19
21
[PATCH]: PVH: specify xen features strings cleany for PVH
...EATURES_STR .ascii "!writable_page_tables" \
+ "|pae_pgdir_above_4gb" \
+ "\0"
#endif
__INIT
@@ -104,7 +109,7 @@ NEXT_HYPERCALL(arch_6)
#endif
ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, _ASM_PTR startup_xen)
ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, _ASM_PTR hypercall_page)
- ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz "!writable_page_tables|pae_pgdir_above_4gb"FEATURES_PVH)
+ ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, XEN_FEATURES_STR)
ELFNOTE(Xen, XEN_ELFNOTE_PAE_MO...
2018 Dec 19
0
[PATCH v3 00/12] x86, kbuild: revert macrofying inline assembly code
..._\@:
+ .if \def
+ /* Equivalent to "jmp.d32 \target" */
+ .byte 0xe9
+ .long \target - .Lstatic_jump_after_\@
+.Lstatic_jump_after_\@:
+ .else
+ .byte STATIC_KEY_INIT_NOP
+ .endif
.pushsection __jump_table, "aw"
_ASM_ALIGN
- .long .Lstatic_branch_nop_\@ - ., \l_yes - .
- _ASM_PTR \key + \branch - .
+ .long .Lstatic_jump_\@ - ., \target - .
+ _ASM_PTR \key - .
.popsection
.endm
-.macro STATIC_BRANCH_JMP l_yes:req key:req branch:req
-.Lstatic_branch_jmp_\@:
- .byte 0xe9
- .long \l_yes - .Lstatic_branch_jmp_after_\@
-.Lstatic_branch_jmp_after_\@:
+.macro STATIC_JU...
2018 Dec 13
2
[PATCH] kbuild, x86: revert macros in extended asm workarounds
...t; #from " to=" #to \
- " handler=\"" #handler "\"\n\t"
-
-/* For C file, we already have NOKPROBE_SYMBOL macro */
-
-#endif /* __ASSEMBLY__ */
# define _ASM_EXTABLE(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_default)
@@ -161,7 +148,6 @@
_ASM_PTR (entry); \
.popsection
-#ifdef __ASSEMBLY__
.macro ALIGN_DESTINATION
/* check for bad alignment of destination */
movl %edi,%ecx
@@ -185,7 +171,34 @@
_ASM_EXTABLE_UA(100b, 103b)
_ASM_EXTABLE_UA(101b, 103b)
.endm
-#endif /* __ASSEMBLY__ */
+
+#else
+# define _EXPAND_EXTABLE_HANDLE...
2018 Dec 13
2
[PATCH] kbuild, x86: revert macros in extended asm workarounds
...t; #from " to=" #to \
- " handler=\"" #handler "\"\n\t"
-
-/* For C file, we already have NOKPROBE_SYMBOL macro */
-
-#endif /* __ASSEMBLY__ */
# define _ASM_EXTABLE(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_default)
@@ -161,7 +148,6 @@
_ASM_PTR (entry); \
.popsection
-#ifdef __ASSEMBLY__
.macro ALIGN_DESTINATION
/* check for bad alignment of destination */
movl %edi,%ecx
@@ -185,7 +171,34 @@
_ASM_EXTABLE_UA(100b, 103b)
_ASM_EXTABLE_UA(101b, 103b)
.endm
-#endif /* __ASSEMBLY__ */
+
+#else
+# define _EXPAND_EXTABLE_HANDLE...
2019 Sep 26
2
An error of asm goto occured while compiling Linux kernel 5.3
...bool arch_static_branch(struct static_key *key,
bool branch)
{
asm_volatile_goto("1:"
".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
".pushsection __jump_table, \"aw\" \n\t"
_ASM_ALIGN "\n\t"
".long 1b - ., %l[l_yes] - . \n\t"
_ASM_PTR "%c0 + %c1 - .\n\t"
".popsection \n\t"
: : "i" (key), "i" (branch) : : l_yes);
return false;
l_yes:
return true;
}
The error pointed to asm_volatile_goto("1:"
I'm not sure whether this error is related to asm goto support.
2013 Dec 13
0
[PATCH V10 10/14] xen/pvh: specify xen features strings cleanly for PVH
...able_descriptor_tables|auto_translated_physmap|supervisor_mode_kernel|hvm_callback_vector"
+
#else
-#define FEATURES_PVH /* Not supported */
+#define PVH_FEATURES_STR ""
#endif
__INIT
@@ -104,7 +103,7 @@ NEXT_HYPERCALL(arch_6)
#endif
ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, _ASM_PTR startup_xen)
ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, _ASM_PTR hypercall_page)
- ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz "!writable_page_tables|pae_pgdir_above_4gb"FEATURES_PVH)
+ ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .ascii "!writable_page_tables|pae_pgdir_above_...
2018 Dec 17
0
[PATCH v3 04/12] Revert "x86/paravirt: Work around GCC inlining bugs when compiling paravirt ops"
...patchable by the
+ * apply_paravirt() alternate instruction patcher.
+ */
+#define _paravirt_alt(insn_string, type, clobber) \
+ "771:\n\t" insn_string "\n" "772:\n" \
+ ".pushsection .parainstructions,\"a\"\n" \
+ _ASM_ALIGN "\n" \
+ _ASM_PTR " 771b\n" \
+ " .byte " type "\n" \
+ " .byte 772b-771b\n" \
+ " .short " clobber "\n" \
+ ".popsection\n"
+
/* Generate patchable code, with the default asm parameters. */
-#define paravirt_call \
- "PA...
2017 Oct 04
0
[PATCH 08/13] x86/paravirt: Clean up paravirt_types.h
...patchable by the
- * apply_paravirt() alternate instruction patcher.
- */
-#define _paravirt_alt(insn_string, type, clobber) \
- "771:\n\t" insn_string "\n" "772:\n" \
- ".pushsection .parainstructions,\"a\"\n" \
- _ASM_ALIGN "\n" \
- _ASM_PTR " 771b\n" \
- " .byte " type "\n" \
- " .byte 772b-771b\n" \
- " .short " clobber "\n" \
- ".popsection\n"
-
-/* Generate patchable code, with the default asm parameters. */
-#define paravirt_alt(insn_string) \...
2018 Dec 16
1
[PATCH v2] x86, kbuild: revert macrofying inline assembly code
...t; #from " to=" #to \
- " handler=\"" #handler "\"\n\t"
-
-/* For C file, we already have NOKPROBE_SYMBOL macro */
-
-#endif /* __ASSEMBLY__ */
# define _ASM_EXTABLE(from, to) \
_ASM_EXTABLE_HANDLE(from, to, ex_handler_default)
@@ -161,7 +148,6 @@
_ASM_PTR (entry); \
.popsection
-#ifdef __ASSEMBLY__
.macro ALIGN_DESTINATION
/* check for bad alignment of destination */
movl %edi,%ecx
@@ -185,7 +171,34 @@
_ASM_EXTABLE_UA(100b, 103b)
_ASM_EXTABLE_UA(101b, 103b)
.endm
-#endif /* __ASSEMBLY__ */
+
+#else
+# define _EXPAND_EXTABLE_HANDLE...
2018 Jun 11
0
[PATCH v3 6/9] x86: prevent inline distortion by paravirt ops
...e:req clobber:req pv_opptr:req
Unlike the marcro maze you replaced, this has the CALL hardcoded in. So
maybe name this PARAVIRT_CALL instead of PARAVIRT_ALT ?
> +771: ANNOTATE_RETPOLINE_SAFE
> + call *\pv_opptr
> +772: .pushsection .parainstructions,"a"
> + _ASM_ALIGN
> + _ASM_PTR 771b
> + .byte \type
> + .byte 772b-771b
> + .short \clobber
> + .popsection
> +.endm
2017 Oct 04
1
[PATCH 11/13] x86/paravirt: Add paravirt alternatives infrastructure
...@@ -21,6 +21,16 @@
.short clobbers; \
.popsection
+#define PV_ALT_SITE(oldinstr, newinstr, ops, off, clobbers) \
+ __ALTERNATIVE(.pv_altinstructions, oldinstr, newinstr, \
+ X86_FEATURE_PV_OPS); \
+ .pushsection .parainstructions, "a"; \
+ _ASM_ALIGN; \
+ _ASM_PTR 140b; \
+ .byte PV_TYPE(ops, off); \
+ .byte 142b-140b; \
+ .short clobbers; \
+ .popsection
#define COND_PUSH(set, mask, reg) \
.if ((~(set)) & mask); push %reg; .endif
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 5...
2017 Oct 04
0
[PATCH 06/13] x86/paravirt: Clean up paravirt-asm.h
...772b-771b; \
- .short clobbers; \
+#define PV_TYPE(ops, off) ((PARAVIRT_PATCH_##ops + (off)) / __ASM_SEL(4, 8))
+
+#define PV_SITE(insns, ops, off, clobbers) \
+771:; \
+ insns; \
+772:; \
+ .pushsection .parainstructions, "a"; \
+ _ASM_ALIGN; \
+ _ASM_PTR 771b; \
+ .byte PV_TYPE(ops, off); \
+ .byte 772b-771b; \
+ .short clobbers; \
.popsection
@@ -33,62 +35,65 @@
COND_PUSH(set, CLBR_RDX, rdx); \
COND_PUSH(set, CLBR_RSI, rsi); \
COND_PUSH(set, CLBR_RDI, rdi); \
- COND_PUSH(set, CLBR_R8, r8); \
- COND_PUSH(set,...
2017 Oct 04
31
[PATCH 00/13] x86/paravirt: Make pv ops code generation more closely match reality
This changes the pv ops code generation to more closely match reality.
For example, instead of:
callq *0xffffffff81e3a400 (pv_irq_ops.save_fl)
vmlinux will now show:
pushfq
pop %rax
nop
nop
nop
nop
nop
which is what the runtime version of the code will show in most cases.
This idea was suggested by Andy Lutomirski.
The benefits are:
- For the most common runtime cases
2017 Oct 04
31
[PATCH 00/13] x86/paravirt: Make pv ops code generation more closely match reality
This changes the pv ops code generation to more closely match reality.
For example, instead of:
callq *0xffffffff81e3a400 (pv_irq_ops.save_fl)
vmlinux will now show:
pushfq
pop %rax
nop
nop
nop
nop
nop
which is what the runtime version of the code will show in most cases.
This idea was suggested by Andy Lutomirski.
The benefits are:
- For the most common runtime cases
2018 Mar 13
32
[PATCH v2 00/27] x86: PIE support and option to extend KASLR randomization
Changes:
- patch v2:
- Adapt patch to work post KPTI and compiler changes
- Redo all performance testing with latest configs and compilers
- Simplify mov macro on PIE (MOVABS now)
- Reduce GOT footprint
- patch v1:
- Simplify ftrace implementation.
- Use gcc mstack-protector-guard-reg=%gs with PIE when possible.
- rfc v3:
- Use --emit-relocs instead of -pie to reduce
2018 Mar 13
32
[PATCH v2 00/27] x86: PIE support and option to extend KASLR randomization
Changes:
- patch v2:
- Adapt patch to work post KPTI and compiler changes
- Redo all performance testing with latest configs and compilers
- Simplify mov macro on PIE (MOVABS now)
- Reduce GOT footprint
- patch v1:
- Simplify ftrace implementation.
- Use gcc mstack-protector-guard-reg=%gs with PIE when possible.
- rfc v3:
- Use --emit-relocs instead of -pie to reduce
2017 Oct 04
28
x86: PIE support and option to extend KASLR randomization
These patches make the changes necessary to build the kernel as Position
Independent Executable (PIE) on x86_64. A PIE kernel can be relocated below
the top 2G of the virtual address space. It allows to optionally extend the
KASLR randomization range from 1G to 3G.
Thanks a lot to Ard Biesheuvel & Kees Cook on their feedback on compiler
changes, PIE support and KASLR in general. Thanks to
2017 Oct 04
28
x86: PIE support and option to extend KASLR randomization
These patches make the changes necessary to build the kernel as Position
Independent Executable (PIE) on x86_64. A PIE kernel can be relocated below
the top 2G of the virtual address space. It allows to optionally extend the
KASLR randomization range from 1G to 3G.
Thanks a lot to Ard Biesheuvel & Kees Cook on their feedback on compiler
changes, PIE support and KASLR in general. Thanks to