Juergen Gross
2023-Jun-08 14:03 UTC
[RFC PATCH 0/3] x86/paravirt: Get rid of paravirt patching
This is a small series getting rid of paravirt patching by switching completely to alternative patching for the same functionality. The basic idea is to add the capability to switch from indirect to direct calls via a special alternative patching option. This removes _some_ of the paravirt macro maze, but most of it needs to stay due to the need of hiding the call instructions from the compiler in order to avoid needless register save/restore. What is going away is the nasty stacking of alternative and paravirt patching and (of course) the special .parainstructions linker section. I have tested the series on bare metal and as Xen PV domain to still work. RFC because I'm quite sure there will be some objtool work needed (at least removing the specific paravirt handling). Juergen Gross (3): x86/paravirt: move some functions and defines to alternative x86/alternative: add indirect call patching x86/paravirt: switch mixed paravirt/alternative calls to alternative_2 arch/x86/include/asm/alternative.h | 26 +++++- arch/x86/include/asm/paravirt.h | 39 ++------- arch/x86/include/asm/paravirt_types.h | 68 +++------------- arch/x86/include/asm/qspinlock_paravirt.h | 4 +- arch/x86/include/asm/text-patching.h | 12 --- arch/x86/kernel/alternative.c | 99 +++++++++++------------ arch/x86/kernel/callthunks.c | 17 ++-- arch/x86/kernel/kvm.c | 4 +- arch/x86/kernel/module.c | 20 ++--- arch/x86/kernel/paravirt.c | 54 ++----------- arch/x86/kernel/vmlinux.lds.S | 13 --- arch/x86/tools/relocs.c | 2 +- arch/x86/xen/irq.c | 2 +- 13 files changed, 111 insertions(+), 249 deletions(-) -- 2.35.3
Juergen Gross
2023-Jun-08 14:03 UTC
[RFC PATCH 1/3] x86/paravirt: move some functions and defines to alternative
As a preparation for replacing paravirt patching completely by alternative patching, move some backend functions and #defines to alternative code and header. Signed-off-by: Juergen Gross <jgross at suse.com> --- arch/x86/include/asm/alternative.h | 16 ++++++++++++ arch/x86/include/asm/paravirt.h | 12 --------- arch/x86/include/asm/paravirt_types.h | 4 +-- arch/x86/include/asm/qspinlock_paravirt.h | 4 +-- arch/x86/kernel/alternative.c | 10 ++++++++ arch/x86/kernel/kvm.c | 4 +-- arch/x86/kernel/paravirt.c | 30 +++++++---------------- arch/x86/xen/irq.c | 2 +- 8 files changed, 41 insertions(+), 41 deletions(-) diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index d7da28fada87..a5a4944ce5d1 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -335,6 +335,22 @@ static inline int alternatives_text_reserved(void *start, void *end) */ #define ASM_NO_INPUT_CLOBBER(clbr...) "i" (0) : clbr +/* Macro for creating assembler functions avoiding any C magic. */ +#define DEFINE_ASM_FUNC(func, instr, sec) \ + asm (".pushsection " #sec ", \"ax\"\n" \ + ".global " #func "\n\t" \ + ".type " #func ", @function\n\t" \ + ASM_FUNC_ALIGN "\n" \ + #func ":\n\t" \ + ASM_ENDBR \ + instr "\n\t" \ + ASM_RET \ + ".size " #func ", . - " #func "\n\t" \ + ".popsection") + +void x86_BUG(void); +void x86_nop(void); + #else /* __ASSEMBLY__ */ #ifdef CONFIG_SMP diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index b49778664d2b..3474dac4607d 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -726,18 +726,6 @@ static __always_inline unsigned long arch_local_irq_save(void) #undef PVOP_VCALL4 #undef PVOP_CALL4 -#define DEFINE_PARAVIRT_ASM(func, instr, sec) \ - asm (".pushsection " #sec ", \"ax\"\n" \ - ".global " #func "\n\t" \ - ".type " #func ", @function\n\t" \ - ASM_FUNC_ALIGN "\n" \ - #func ":\n\t" \ - ASM_ENDBR \ - instr "\n\t" \ - ASM_RET \ - ".size " #func ", . - " #func "\n\t" \ - ".popsection") - extern void default_banner(void); #else /* __ASSEMBLY__ */ diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 4acbcddddc29..fe58f1882e9c 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -557,8 +557,6 @@ void paravirt_enter_lazy_mmu(void); void paravirt_leave_lazy_mmu(void); void paravirt_flush_lazy_mmu(void); -void _paravirt_nop(void); -void paravirt_BUG(void); unsigned long paravirt_ret0(void); #ifdef CONFIG_PARAVIRT_XXL u64 _paravirt_ident_64(u64); @@ -568,7 +566,7 @@ void pv_native_irq_enable(void); unsigned long pv_native_read_cr2(void); #endif -#define paravirt_nop ((void *)_paravirt_nop) +#define paravirt_nop ((void *)x86_nop) extern struct paravirt_patch_site __parainstructions[], __parainstructions_end[]; diff --git a/arch/x86/include/asm/qspinlock_paravirt.h b/arch/x86/include/asm/qspinlock_paravirt.h index 42b17cf10b10..2189b3379b1c 100644 --- a/arch/x86/include/asm/qspinlock_paravirt.h +++ b/arch/x86/include/asm/qspinlock_paravirt.h @@ -54,8 +54,8 @@ __PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock_slowpath, ".spinlock.text"); "pop %rdx\n\t" \ FRAME_END -DEFINE_PARAVIRT_ASM(__raw_callee_save___pv_queued_spin_unlock, - PV_UNLOCK_ASM, .spinlock.text); +DEFINE_ASM_FUNC(__raw_callee_save___pv_queued_spin_unlock, + PV_UNLOCK_ASM, .spinlock.text); #else /* CONFIG_64BIT */ diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index f615e0cb6d93..b7c70479fe58 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -253,6 +253,16 @@ static void __init_or_module noinline optimize_nops(u8 *instr, size_t len) } } +/* Low-level backend functions usable from alternative code replacements. */ +DEFINE_ASM_FUNC(x86_nop, "", .entry.text); +EXPORT_SYMBOL_GPL(x86_nop); + +noinstr void x86_BUG(void) +{ + BUG(); +} +EXPORT_SYMBOL_GPL(x86_BUG); + /* * Replace instructions with better alternatives for this CPU type. This runs * before SMP is initialized to avoid SMP problems with self modifying code. diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 1cceac5984da..d025fc630115 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -803,8 +803,8 @@ extern bool __raw_callee_save___kvm_vcpu_is_preempted(long); "cmpb $0, " __stringify(KVM_STEAL_TIME_preempted) "+steal_time(%rax)\n\t" \ "setne %al\n\t" -DEFINE_PARAVIRT_ASM(__raw_callee_save___kvm_vcpu_is_preempted, - PV_VCPU_PREEMPTED_ASM, .text); +DEFINE_ASM_FUNC(__raw_callee_save___kvm_vcpu_is_preempted, + PV_VCPU_PREEMPTED_ASM, .text); #endif static void __init kvm_guest_init(void) diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index ac10b46c5832..dfad56679f88 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -34,14 +34,8 @@ #include <asm/io_bitmap.h> #include <asm/gsseg.h> -/* - * nop stub, which must not clobber anything *including the stack* to - * avoid confusing the entry prologues. - */ -DEFINE_PARAVIRT_ASM(_paravirt_nop, "", .entry.text); - /* stub always returning 0. */ -DEFINE_PARAVIRT_ASM(paravirt_ret0, "xor %eax,%eax", .entry.text); +DEFINE_ASM_FUNC(paravirt_ret0, "xor %eax,%eax", .entry.text); void __init default_banner(void) { @@ -49,12 +43,6 @@ void __init default_banner(void) pv_info.name); } -/* Undefined instruction for dealing with missing ops pointers. */ -noinstr void paravirt_BUG(void) -{ - BUG(); -} - static unsigned paravirt_patch_call(void *insn_buff, const void *target, unsigned long addr, unsigned len) { @@ -64,11 +52,11 @@ static unsigned paravirt_patch_call(void *insn_buff, const void *target, } #ifdef CONFIG_PARAVIRT_XXL -DEFINE_PARAVIRT_ASM(_paravirt_ident_64, "mov %rdi, %rax", .text); -DEFINE_PARAVIRT_ASM(pv_native_save_fl, "pushf; pop %rax", .noinstr.text); -DEFINE_PARAVIRT_ASM(pv_native_irq_disable, "cli", .noinstr.text); -DEFINE_PARAVIRT_ASM(pv_native_irq_enable, "sti", .noinstr.text); -DEFINE_PARAVIRT_ASM(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text); +DEFINE_ASM_FUNC(_paravirt_ident_64, "mov %rdi, %rax", .text); +DEFINE_ASM_FUNC(pv_native_save_fl, "pushf; pop %rax", .noinstr.text); +DEFINE_ASM_FUNC(pv_native_irq_disable, "cli", .noinstr.text); +DEFINE_ASM_FUNC(pv_native_irq_enable, "sti", .noinstr.text); +DEFINE_ASM_FUNC(pv_native_read_cr2, "mov %cr2, %rax", .noinstr.text); #endif DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key); @@ -90,9 +78,9 @@ unsigned int paravirt_patch(u8 type, void *insn_buff, unsigned long addr, unsigned ret; if (opfunc == NULL) - /* If there's no function, patch it with paravirt_BUG() */ - ret = paravirt_patch_call(insn_buff, paravirt_BUG, addr, len); - else if (opfunc == _paravirt_nop) + /* If there's no function, patch it with x86_BUG() */ + ret = paravirt_patch_call(insn_buff, x86_BUG, addr, len); + else if (opfunc == x86_nop) ret = 0; else /* Otherwise call the function. */ diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c index 6092fea7d651..5d132f5a5d7d 100644 --- a/arch/x86/xen/irq.c +++ b/arch/x86/xen/irq.c @@ -45,7 +45,7 @@ static const typeof(pv_ops) xen_irq_ops __initconst = { /* Initial interrupt flag handling only called while interrupts off. */ .save_fl = __PV_IS_CALLEE_SAVE(paravirt_ret0), .irq_disable = __PV_IS_CALLEE_SAVE(paravirt_nop), - .irq_enable = __PV_IS_CALLEE_SAVE(paravirt_BUG), + .irq_enable = __PV_IS_CALLEE_SAVE(x86_BUG), .safe_halt = xen_safe_halt, .halt = xen_halt, -- 2.35.3
Juergen Gross
2023-Jun-08 14:03 UTC
[RFC PATCH 3/3] x86/paravirt: switch mixed paravirt/alternative calls to alternative_2
Instead of stacking alternative and paravirt patching, use the new ALT_FLAG_CALL flag to switch those mixed calls to pure alternative handling. This eliminates the need to be careful regarding the sequence of alternative and paravirt patching. For call depth tracking callthunks_setup() needs to be adapted to patch calls at alternative patching sites instead of paravirt calls. Remove the no longer needed paravirt patching and related code. Signed-off-by: Juergen Gross <jgross at suse.com> --- arch/x86/include/asm/alternative.h | 5 ++- arch/x86/include/asm/paravirt.h | 27 +++-------- arch/x86/include/asm/paravirt_types.h | 64 ++++----------------------- arch/x86/include/asm/text-patching.h | 12 ----- arch/x86/kernel/alternative.c | 52 +--------------------- arch/x86/kernel/callthunks.c | 17 ++++--- arch/x86/kernel/module.c | 20 +++------ arch/x86/kernel/paravirt.c | 30 ------------- arch/x86/kernel/vmlinux.lds.S | 13 ------ arch/x86/tools/relocs.c | 2 +- 10 files changed, 34 insertions(+), 208 deletions(-) diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 8e0871c1631a..b44028ec6850 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -89,6 +89,8 @@ struct alt_instr { u8 replacementlen; /* length of new instruction */ } __packed; +extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; + /* * Debug flag that can be tested to see whether alternative * instructions were patched in already: @@ -104,11 +106,10 @@ extern void apply_fineibt(s32 *start_retpoline, s32 *end_retpoine, s32 *start_cfi, s32 *end_cfi); struct module; -struct paravirt_patch_site; struct callthunk_sites { s32 *call_start, *call_end; - struct paravirt_patch_site *pv_start, *pv_end; + struct alt_instr *alt_start, *alt_end; }; #ifdef CONFIG_CALL_THUNKS diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 3474dac4607d..f510031c54ca 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -730,35 +730,20 @@ extern void default_banner(void); #else /* __ASSEMBLY__ */ -#define _PVSITE(ptype, ops, word, algn) \ -771:; \ - ops; \ -772:; \ - .pushsection .parainstructions,"a"; \ - .align algn; \ - word 771b; \ - .byte ptype; \ - .byte 772b-771b; \ - _ASM_ALIGN; \ - .popsection - - #ifdef CONFIG_X86_64 #ifdef CONFIG_PARAVIRT_XXL +#ifdef CONFIG_DEBUG_ENTRY -#define PARA_PATCH(off) ((off) / 8) -#define PARA_SITE(ptype, ops) _PVSITE(ptype, ops, .quad, 8) #define PARA_INDIRECT(addr) *addr(%rip) -#ifdef CONFIG_DEBUG_ENTRY .macro PARA_IRQ_save_fl - PARA_SITE(PARA_PATCH(PV_IRQ_save_fl), - ANNOTATE_RETPOLINE_SAFE; - call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl);) + ANNOTATE_RETPOLINE_SAFE; + call PARA_INDIRECT(pv_ops+PV_IRQ_save_fl); .endm -#define SAVE_FLAGS ALTERNATIVE "PARA_IRQ_save_fl;", "pushf; pop %rax;", \ - ALT_NOT(X86_FEATURE_XENPV) +#define SAVE_FLAGS ALTERNATIVE_2 "PARA_IRQ_save_fl;", ALT_CALL_INSTR, \ + ALT_CALL_ALWAYS, "pushf; pop %rax;", \ + ALT_NOT(X86_FEATURE_XENPV) #endif #endif /* CONFIG_PARAVIRT_XXL */ #endif /* CONFIG_X86_64 */ diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index fe58f1882e9c..c4465fed8204 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -3,13 +3,6 @@ #define _ASM_X86_PARAVIRT_TYPES_H #ifndef __ASSEMBLY__ -/* These all sit in the .parainstructions section to tell us what to patch. */ -struct paravirt_patch_site { - u8 *instr; /* original instructions */ - u8 type; /* type of this instruction */ - u8 len; /* length of original instruction */ -}; - /* Lazy mode for batching updates / context switch */ enum paravirt_lazy_mode { PARAVIRT_LAZY_NONE, @@ -257,43 +250,11 @@ struct paravirt_patch_template { extern struct pv_info pv_info; extern struct paravirt_patch_template pv_ops; -#define PARAVIRT_PATCH(x) \ - (offsetof(struct paravirt_patch_template, x) / sizeof(void *)) - -#define paravirt_type(op) \ - [paravirt_typenum] "i" (PARAVIRT_PATCH(op)), \ - [paravirt_opptr] "m" (pv_ops.op) -/* - * Generate some code, and mark it as patchable by the - * apply_paravirt() alternate instruction patcher. - */ -#define _paravirt_alt(insn_string, type) \ - "771:\n\t" insn_string "\n" "772:\n" \ - ".pushsection .parainstructions,\"a\"\n" \ - _ASM_ALIGN "\n" \ - _ASM_PTR " 771b\n" \ - " .byte " type "\n" \ - " .byte 772b-771b\n" \ - _ASM_ALIGN "\n" \ - ".popsection\n" - -/* Generate patchable code, with the default asm parameters. */ -#define paravirt_alt(insn_string) \ - _paravirt_alt(insn_string, "%c[paravirt_typenum]") - -/* Simple instruction patching code. */ -#define NATIVE_LABEL(a,x,b) "\n\t.globl " a #x "_" #b "\n" a #x "_" #b ":\n\t" - -unsigned int paravirt_patch(u8 type, void *insn_buff, unsigned long addr, unsigned int len); +#define paravirt_ptr(op) [paravirt_opptr] "m" (pv_ops.op) int paravirt_disable_iospace(void); -/* - * This generates an indirect call based on the operation type number. - * The type number, computed in PARAVIRT_PATCH, is derived from the - * offset into the paravirt_patch_template structure, and can therefore be - * freely converted back into a structure offset. - */ +/* This generates an indirect call based on the operation type number. */ #define PARAVIRT_CALL \ ANNOTATE_RETPOLINE_SAFE \ "call *%[paravirt_opptr];" @@ -326,12 +287,6 @@ int paravirt_disable_iospace(void); * However, x86_64 also has to clobber all caller saved registers, which * unfortunately, are quite a bit (r8 - r11) * - * The call instruction itself is marked by placing its start address - * and size into the .parainstructions section, so that - * apply_paravirt() in arch/i386/kernel/alternative.c can do the - * appropriate patching under the control of the backend pv_init_ops - * implementation. - * * Unfortunately there's no way to get gcc to generate the args setup * for the call, and then allow the call itself to be generated by an * inline asm. Because of this, we must do the complete arg setup and @@ -435,9 +390,10 @@ int paravirt_disable_iospace(void); ({ \ PVOP_CALL_ARGS; \ PVOP_TEST_NULL(op); \ - asm volatile(paravirt_alt(PARAVIRT_CALL) \ + asm volatile(ALTERNATIVE(PARAVIRT_CALL, ALT_CALL_INSTR, \ + ALT_CALL_ALWAYS) \ : call_clbr, ASM_CALL_CONSTRAINT \ - : paravirt_type(op), \ + : paravirt_ptr(op), \ ##__VA_ARGS__ \ : "memory", "cc" extra_clbr); \ ret; \ @@ -448,10 +404,11 @@ int paravirt_disable_iospace(void); ({ \ PVOP_CALL_ARGS; \ PVOP_TEST_NULL(op); \ - asm volatile(ALTERNATIVE(paravirt_alt(PARAVIRT_CALL), \ - alt, cond) \ + asm volatile(ALTERNATIVE_2(PARAVIRT_CALL, \ + ALT_CALL_INSTR, ALT_CALL_ALWAYS, \ + alt, cond) \ : call_clbr, ASM_CALL_CONSTRAINT \ - : paravirt_type(op), \ + : paravirt_ptr(op), \ ##__VA_ARGS__ \ : "memory", "cc" extra_clbr); \ ret; \ @@ -568,9 +525,6 @@ unsigned long pv_native_read_cr2(void); #define paravirt_nop ((void *)x86_nop) -extern struct paravirt_patch_site __parainstructions[], - __parainstructions_end[]; - #endif /* __ASSEMBLY__ */ #endif /* CONFIG_PARAVIRT */ #endif /* _ASM_X86_PARAVIRT_TYPES_H */ diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h index 29832c338cdc..0b70653a98c1 100644 --- a/arch/x86/include/asm/text-patching.h +++ b/arch/x86/include/asm/text-patching.h @@ -6,18 +6,6 @@ #include <linux/stddef.h> #include <asm/ptrace.h> -struct paravirt_patch_site; -#ifdef CONFIG_PARAVIRT -void apply_paravirt(struct paravirt_patch_site *start, - struct paravirt_patch_site *end); -#else -static inline void apply_paravirt(struct paravirt_patch_site *start, - struct paravirt_patch_site *end) -{} -#define __parainstructions NULL -#define __parainstructions_end NULL -#endif - /* * Currently, the max observed size in the kernel code is * JUMP_LABEL_NOP_SIZE/RELATIVEJUMP_SIZE, which are 5. diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index f0371304c1b5..58e9fadc30d6 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -118,7 +118,6 @@ extern s32 __retpoline_sites[], __retpoline_sites_end[]; extern s32 __return_sites[], __return_sites_end[]; extern s32 __cfi_sites[], __cfi_sites_end[]; extern s32 __ibt_endbr_seal[], __ibt_endbr_seal_end[]; -extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; extern s32 __smp_locks[], __smp_locks_end[]; void text_poke_early(void *addr, const void *opcode, size_t len); @@ -1265,32 +1264,6 @@ int alternatives_text_reserved(void *start, void *end) } #endif /* CONFIG_SMP */ -#ifdef CONFIG_PARAVIRT -void __init_or_module apply_paravirt(struct paravirt_patch_site *start, - struct paravirt_patch_site *end) -{ - struct paravirt_patch_site *p; - char insn_buff[MAX_PATCH_LEN]; - - for (p = start; p < end; p++) { - unsigned int used; - - BUG_ON(p->len > MAX_PATCH_LEN); - /* prep the buffer with the original instructions */ - memcpy(insn_buff, p->instr, p->len); - used = paravirt_patch(p->type, insn_buff, (unsigned long)p->instr, p->len); - - BUG_ON(used > p->len); - - /* Pad the rest with nops */ - add_nops(insn_buff + used, p->len - used); - text_poke_early(p->instr, insn_buff, p->len); - } -} -extern struct paravirt_patch_site __start_parainstructions[], - __stop_parainstructions[]; -#endif /* CONFIG_PARAVIRT */ - /* * Self-test for the INT3 based CALL emulation code. * @@ -1396,28 +1369,11 @@ void __init alternative_instructions(void) */ /* - * Paravirt patching and alternative patching can be combined to - * replace a function call with a short direct code sequence (e.g. - * by setting a constant return value instead of doing that in an - * external function). - * In order to make this work the following sequence is required: - * 1. set (artificial) features depending on used paravirt - * functions which can later influence alternative patching - * 2. apply paravirt patching (generally replacing an indirect - * function call with a direct one) - * 3. apply alternative patching (e.g. replacing a direct function - * call with a custom code sequence) - * Doing paravirt patching after alternative patching would clobber - * the optimization of the custom code with a function call again. + * Make sure to set (artificial) features depending on used paravirt + * functions which can later influence alternative patching. */ paravirt_set_cap(); - /* - * First patch paravirt functions, such that we overwrite the indirect - * call with the direct call. - */ - apply_paravirt(__parainstructions, __parainstructions_end); - __apply_fineibt(__retpoline_sites, __retpoline_sites_end, __cfi_sites, __cfi_sites_end, true); @@ -1428,10 +1384,6 @@ void __init alternative_instructions(void) apply_retpolines(__retpoline_sites, __retpoline_sites_end); apply_returns(__return_sites, __return_sites_end); - /* - * Then patch alternatives, such that those paravirt calls that are in - * alternatives can be overwritten by their immediate fragments. - */ apply_alternatives(__alt_instructions, __alt_instructions_end); /* diff --git a/arch/x86/kernel/callthunks.c b/arch/x86/kernel/callthunks.c index 22ab13966427..2c1673410f3e 100644 --- a/arch/x86/kernel/callthunks.c +++ b/arch/x86/kernel/callthunks.c @@ -238,14 +238,13 @@ patch_call_sites(s32 *start, s32 *end, const struct core_text *ct) } static __init_or_module void -patch_paravirt_call_sites(struct paravirt_patch_site *start, - struct paravirt_patch_site *end, - const struct core_text *ct) +patch_alt_call_sites(struct alt_instr *start, struct alt_instr *end, + const struct core_text *ct) { - struct paravirt_patch_site *p; + struct alt_instr *a; - for (p = start; p < end; p++) - patch_call(p->instr, ct); + for (a = start; a < end; a++) + patch_call((u8 *)&a->instr_offset + a->instr_offset, ct); } static __init_or_module void @@ -253,7 +252,7 @@ callthunks_setup(struct callthunk_sites *cs, const struct core_text *ct) { prdbg("Patching call sites %s\n", ct->name); patch_call_sites(cs->call_start, cs->call_end, ct); - patch_paravirt_call_sites(cs->pv_start, cs->pv_end, ct); + patch_alt_call_sites(cs->alt_start, cs->alt_end, ct); prdbg("Patching call sites done%s\n", ct->name); } @@ -262,8 +261,8 @@ void __init callthunks_patch_builtin_calls(void) struct callthunk_sites cs = { .call_start = __call_sites, .call_end = __call_sites_end, - .pv_start = __parainstructions, - .pv_end = __parainstructions_end + .alt_start = __alt_instructions, + .alt_end = __alt_instructions_end }; if (!cpu_feature_enabled(X86_FEATURE_CALL_DEPTH)) diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index b05f62ee2344..ec58ffd1c840 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c @@ -276,7 +276,7 @@ int module_finalize(const Elf_Ehdr *hdr, struct module *me) { const Elf_Shdr *s, *alt = NULL, *locks = NULL, - *para = NULL, *orc = NULL, *orc_ip = NULL, + *orc = NULL, *orc_ip = NULL, *retpolines = NULL, *returns = NULL, *ibt_endbr = NULL, *calls = NULL, *cfi = NULL; char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; @@ -286,8 +286,6 @@ int module_finalize(const Elf_Ehdr *hdr, alt = s; if (!strcmp(".smp_locks", secstrings + s->sh_name)) locks = s; - if (!strcmp(".parainstructions", secstrings + s->sh_name)) - para = s; if (!strcmp(".orc_unwind", secstrings + s->sh_name)) orc = s; if (!strcmp(".orc_unwind_ip", secstrings + s->sh_name)) @@ -304,14 +302,6 @@ int module_finalize(const Elf_Ehdr *hdr, ibt_endbr = s; } - /* - * See alternative_instructions() for the ordering rules between the - * various patching types. - */ - if (para) { - void *pseg = (void *)para->sh_addr; - apply_paravirt(pseg, pseg + para->sh_size); - } if (retpolines || cfi) { void *rseg = NULL, *cseg = NULL; unsigned int rsize = 0, csize = 0; @@ -341,7 +331,7 @@ int module_finalize(const Elf_Ehdr *hdr, void *aseg = (void *)alt->sh_addr; apply_alternatives(aseg, aseg + alt->sh_size); } - if (calls || para) { + if (calls || alt) { struct callthunk_sites cs = {}; if (calls) { @@ -349,9 +339,9 @@ int module_finalize(const Elf_Ehdr *hdr, cs.call_end = (void *)calls->sh_addr + calls->sh_size; } - if (para) { - cs.pv_start = (void *)para->sh_addr; - cs.pv_end = (void *)para->sh_addr + para->sh_size; + if (alt) { + cs.alt_start = (void *)alt->sh_addr; + cs.alt_end = (void *)alt->sh_addr + alt->sh_size; } callthunks_patch_module_calls(&cs, me); diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index dfad56679f88..1ef889110919 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -43,14 +43,6 @@ void __init default_banner(void) pv_info.name); } -static unsigned paravirt_patch_call(void *insn_buff, const void *target, - unsigned long addr, unsigned len) -{ - __text_gen_insn(insn_buff, CALL_INSN_OPCODE, - (void *)addr, target, CALL_INSN_SIZE); - return CALL_INSN_SIZE; -} - #ifdef CONFIG_PARAVIRT_XXL DEFINE_ASM_FUNC(_paravirt_ident_64, "mov %rdi, %rax", .text); DEFINE_ASM_FUNC(pv_native_save_fl, "pushf; pop %rax", .noinstr.text); @@ -67,28 +59,6 @@ void __init native_pv_lock_init(void) static_branch_disable(&virt_spin_lock_key); } -unsigned int paravirt_patch(u8 type, void *insn_buff, unsigned long addr, - unsigned int len) -{ - /* - * Neat trick to map patch type back to the call within the - * corresponding structure. - */ - void *opfunc = *((void **)&pv_ops + type); - unsigned ret; - - if (opfunc == NULL) - /* If there's no function, patch it with x86_BUG() */ - ret = paravirt_patch_call(insn_buff, x86_BUG, addr, len); - else if (opfunc == x86_nop) - ret = 0; - else - /* Otherwise call the function. */ - ret = paravirt_patch_call(insn_buff, opfunc, addr, len); - - return ret; -} - struct static_key paravirt_steal_enabled; struct static_key paravirt_steal_rq_enabled; diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 25f155205770..5159d80bee38 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -258,19 +258,6 @@ SECTIONS } #endif - /* - * start address and size of operations which during runtime - * can be patched with virtualization friendly instructions or - * baremetal native ones. Think page table operations. - * Details in paravirt_types.h - */ - . = ALIGN(8); - .parainstructions : AT(ADDR(.parainstructions) - LOAD_OFFSET) { - __parainstructions = .; - *(.parainstructions) - __parainstructions_end = .; - } - #ifdef CONFIG_RETPOLINE /* * List of instructions that call/jmp/jcc to retpoline thunks diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c index d30949e25ebd..a3bae2b24626 100644 --- a/arch/x86/tools/relocs.c +++ b/arch/x86/tools/relocs.c @@ -66,7 +66,7 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = { [S_REL] "^(__init_(begin|end)|" "__x86_cpu_dev_(start|end)|" - "(__parainstructions|__alt_instructions)(_end)?|" + "__alt_instructions(_end)?|" "(__iommu_table|__apicdrivers|__smp_locks)(_end)?|" "__(start|end)_pci_.*|" #if CONFIG_FW_LOADER -- 2.35.3
Juergen Gross
2023-Jul-10 12:29 UTC
[RFC PATCH 0/3] x86/paravirt: Get rid of paravirt patching
Any comments? On 08.06.23 16:03, Juergen Gross wrote:> This is a small series getting rid of paravirt patching by switching > completely to alternative patching for the same functionality. > > The basic idea is to add the capability to switch from indirect to > direct calls via a special alternative patching option. > > This removes _some_ of the paravirt macro maze, but most of it needs > to stay due to the need of hiding the call instructions from the > compiler in order to avoid needless register save/restore. > > What is going away is the nasty stacking of alternative and paravirt > patching and (of course) the special .parainstructions linker section. > > I have tested the series on bare metal and as Xen PV domain to still > work. > > RFC because I'm quite sure there will be some objtool work needed > (at least removing the specific paravirt handling). > > Juergen Gross (3): > x86/paravirt: move some functions and defines to alternative > x86/alternative: add indirect call patching > x86/paravirt: switch mixed paravirt/alternative calls to alternative_2 > > arch/x86/include/asm/alternative.h | 26 +++++- > arch/x86/include/asm/paravirt.h | 39 ++------- > arch/x86/include/asm/paravirt_types.h | 68 +++------------- > arch/x86/include/asm/qspinlock_paravirt.h | 4 +- > arch/x86/include/asm/text-patching.h | 12 --- > arch/x86/kernel/alternative.c | 99 +++++++++++------------ > arch/x86/kernel/callthunks.c | 17 ++-- > arch/x86/kernel/kvm.c | 4 +- > arch/x86/kernel/module.c | 20 ++--- > arch/x86/kernel/paravirt.c | 54 ++----------- > arch/x86/kernel/vmlinux.lds.S | 13 --- > arch/x86/tools/relocs.c | 2 +- > arch/x86/xen/irq.c | 2 +- > 13 files changed, 111 insertions(+), 249 deletions(-) >-------------- next part -------------- A non-text attachment was scrubbed... Name: OpenPGP_0xB0DE9DD628BF132F.asc Type: application/pgp-keys Size: 3098 bytes Desc: OpenPGP public key URL: <http://lists.linuxfoundation.org/pipermail/virtualization/attachments/20230710/b5a21da1/attachment.bin> -------------- next part -------------- A non-text attachment was scrubbed... Name: OpenPGP_signature Type: application/pgp-signature Size: 495 bytes Desc: OpenPGP digital signature URL: <http://lists.linuxfoundation.org/pipermail/virtualization/attachments/20230710/b5a21da1/attachment.sig>
Possibly Parallel Threads
- [RFC PATCH 0/3] x86/paravirt: Get rid of paravirt patching
- [PATCH] x86/paravirt: convert simple paravirt functions to asm
- [RFC PATCH 0/3] x86/paravirt: Get rid of paravirt patching
- [PATCH v2] x86/paravirt: convert simple paravirt functions to asm
- [patch 0/6] Various cleanups