Josh Triplett
2014-Nov-02 17:31 UTC
[PATCH v4 00/10] x86: Support compiling out userspace IO (iopl and ioperm)
This patch series makes it possible to compile out the userspace IO system calls, iopl and ioperm. The first patch does some 32/64 unification in copy_thread to make subsequent changes easier. The second patch simplifies the complex calculation of the TSS segment limit, which also makes it easier to change in the last patch. Patches 3-9 introduce helpers to make it easier to compile out IO. The last patch adds and uses the new CONFIG_X86_IOPORT to support compiling out userspace IO. v3 had patches 3-10 as a single patch; v4 splits out the various helpers and macros into separate patches, as requested by Thomas Gleixner. I've verified that this compiles after each patch. Josh Triplett (10): x86: process: Unify 32-bit and 64-bit copy_thread I/O bitmap handling x86: tss: Eliminate fragile calculation of TSS segment limit x86: processor.h: Introduce macros to initialize IO fields of thread and TSS x86: paravirt: Wrap initialization of set_iopl_mask in a macro x86: cpu: Add helper function unifying 32-bit and 64-bit IO init in cpu_init x86: process: Introduce helper to clear a thread's IO bitmap x86: process: Introduce helper to switch iopl mask x86: process: Introduce helper for IO-related bits of exit_thread x86: process: Introduce helper to switch IO bitmap x86: Support compiling out userspace IO (iopl and ioperm) arch/x86/Kconfig | 10 ++++ arch/x86/include/asm/desc.h | 11 +---- arch/x86/include/asm/paravirt.h | 2 + arch/x86/include/asm/paravirt_types.h | 5 ++ arch/x86/include/asm/processor.h | 55 +++++++++++++++++---- arch/x86/include/asm/syscalls.h | 3 ++ arch/x86/kernel/Makefile | 3 +- arch/x86/kernel/cpu/common.c | 12 +---- arch/x86/kernel/entry_64.S | 9 ++-- arch/x86/kernel/paravirt.c | 2 +- arch/x86/kernel/process-io.h | 93 +++++++++++++++++++++++++++++++++++ arch/x86/kernel/process.c | 34 ++----------- arch/x86/kernel/process_32.c | 41 +++++---------- arch/x86/kernel/process_64.c | 27 +++------- arch/x86/kernel/ptrace.c | 8 +++ arch/x86/xen/enlighten.c | 4 +- drivers/tty/vt/vt_ioctl.c | 2 +- kernel/sys_ni.c | 5 ++ 18 files changed, 208 insertions(+), 118 deletions(-) create mode 100644 arch/x86/kernel/process-io.h -- 2.1.1
Josh Triplett
2014-Nov-02 17:31 UTC
[PATCH v4 01/10] x86: process: Unify 32-bit and 64-bit copy_thread I/O bitmap handling
The 32-bit and 64-bit versions of copy_thread have functionally identical handling for copying the I/O bitmap, modulo differences in error handling. Clean up the error paths in both by moving the copy of the I/O bitmap to the end, to eliminate the need to free it if subsequent copy steps fail; move the resulting identical code to a static inline in a common header. Signed-off-by: Josh Triplett <josh at joshtriplett.org> Acked-by: Kees Cook <keescook at chromium.org> --- arch/x86/kernel/process-io.h | 22 ++++++++++++++++++++++ arch/x86/kernel/process_32.c | 28 ++++++++-------------------- arch/x86/kernel/process_64.c | 25 +++++-------------------- 3 files changed, 35 insertions(+), 40 deletions(-) create mode 100644 arch/x86/kernel/process-io.h diff --git a/arch/x86/kernel/process-io.h b/arch/x86/kernel/process-io.h new file mode 100644 index 0000000..d884444 --- /dev/null +++ b/arch/x86/kernel/process-io.h @@ -0,0 +1,22 @@ +#ifndef _X86_KERNEL_PROCESS_IO_H +#define _X86_KERNEL_PROCESS_IO_H + +static inline int copy_io_bitmap(struct task_struct *me, + struct task_struct *p) +{ + if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) { + p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr, + IO_BITMAP_BYTES, GFP_KERNEL); + if (!p->thread.io_bitmap_ptr) { + p->thread.io_bitmap_max = 0; + return -ENOMEM; + } + set_tsk_thread_flag(p, TIF_IO_BITMAP); + } else { + p->thread.io_bitmap_ptr = NULL; + } + + return 0; +} + +#endif /* _X86_KERNEL_PROCESS_IO_H */ diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 8f3ebfe..07550ff 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -55,6 +55,8 @@ #include <asm/debugreg.h> #include <asm/switch_to.h> +#include "process-io.h" + asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread"); @@ -134,7 +136,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, { struct pt_regs *childregs = task_pt_regs(p); struct task_struct *tsk; - int err; p->thread.sp = (unsigned long) childregs; p->thread.sp0 = (unsigned long) (childregs+1); @@ -166,32 +167,19 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, p->thread.io_bitmap_ptr = NULL; tsk = current; - err = -ENOMEM; - - if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { - p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr, - IO_BITMAP_BYTES, GFP_KERNEL); - if (!p->thread.io_bitmap_ptr) { - p->thread.io_bitmap_max = 0; - return -ENOMEM; - } - set_tsk_thread_flag(p, TIF_IO_BITMAP); - } - - err = 0; /* * Set a new TLS for the child thread? */ - if (clone_flags & CLONE_SETTLS) + if (clone_flags & CLONE_SETTLS) { + int err; err = do_set_thread_area(p, -1, (struct user_desc __user *)childregs->si, 0); - - if (err && p->thread.io_bitmap_ptr) { - kfree(p->thread.io_bitmap_ptr); - p->thread.io_bitmap_max = 0; + if(err) + return err; } - return err; + + return copy_io_bitmap(tsk, p); } void diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 3ed4a68..b1babb4 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -50,6 +50,8 @@ #include <asm/debugreg.h> #include <asm/switch_to.h> +#include "process-io.h" + asmlinkage extern void ret_from_fork(void); __visible DEFINE_PER_CPU(unsigned long, old_rsp); @@ -154,7 +156,6 @@ static inline u32 read_32bit_tls(struct task_struct *t, int tls) int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg, struct task_struct *p) { - int err; struct pt_regs *childregs; struct task_struct *me = current; @@ -191,21 +192,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, if (sp) childregs->sp = sp; - err = -ENOMEM; - if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) { - p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr, - IO_BITMAP_BYTES, GFP_KERNEL); - if (!p->thread.io_bitmap_ptr) { - p->thread.io_bitmap_max = 0; - return -ENOMEM; - } - set_tsk_thread_flag(p, TIF_IO_BITMAP); - } - /* * Set a new TLS for the child thread? */ if (clone_flags & CLONE_SETTLS) { + int err; #ifdef CONFIG_IA32_EMULATION if (test_thread_flag(TIF_IA32)) err = do_set_thread_area(p, -1, @@ -214,16 +205,10 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, #endif err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); if (err) - goto out; - } - err = 0; -out: - if (err && p->thread.io_bitmap_ptr) { - kfree(p->thread.io_bitmap_ptr); - p->thread.io_bitmap_max = 0; + return err; } - return err; + return copy_io_bitmap(me, p); } static void -- 2.1.1
Josh Triplett
2014-Nov-02 17:32 UTC
[PATCH v4 02/10] x86: tss: Eliminate fragile calculation of TSS segment limit
__set_tss_desc has a complex calculation of the TSS segment limit, duplicating the quirky details of the I/O bitmap array length, and requiring a complex comment to explain. Replace that calculation with a simpler one based on the offsetof the "stack" field that follows the array. That then removes the last use of IO_BITMAP_OFFSET, so delete it. Signed-off-by: Josh Triplett <josh at joshtriplett.org> Acked-by: Alexander van Heukelum <heukelum at fastmail.fm> --- arch/x86/include/asm/desc.h | 11 +---------- arch/x86/include/asm/processor.h | 4 +++- 2 files changed, 4 insertions(+), 11 deletions(-) diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h index 50d033a..f8dc455 100644 --- a/arch/x86/include/asm/desc.h +++ b/arch/x86/include/asm/desc.h @@ -177,16 +177,7 @@ static inline void __set_tss_desc(unsigned cpu, unsigned int entry, void *addr) struct desc_struct *d = get_cpu_gdt_table(cpu); tss_desc tss; - /* - * sizeof(unsigned long) coming from an extra "long" at the end - * of the iobitmap. See tss_struct definition in processor.h - * - * -1? seg base+limit should be pointing to the address of the - * last valid byte - */ - set_tssldt_descriptor(&tss, (unsigned long)addr, DESC_TSS, - IO_BITMAP_OFFSET + IO_BITMAP_BYTES + - sizeof(unsigned long) - 1); + set_tssldt_descriptor(&tss, (unsigned long)addr, DESC_TSS, TSS_LIMIT); write_gdt_entry(d, entry, &tss, DESC_TSS); } diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index eb71ec7..76751cd 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -258,9 +258,11 @@ struct x86_hw_tss { #define IO_BITMAP_BITS 65536 #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) -#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) #define INVALID_IO_BITMAP_OFFSET 0x8000 +/* Segment limits point to the last valid byte, hence the -1 */ +#define TSS_LIMIT (offsetof(struct tss_struct, stack) - 1) + struct tss_struct { /* * The hardware state: -- 2.1.1
Josh Triplett
2014-Nov-02 17:32 UTC
[PATCH v4 03/10] x86: processor.h: Introduce macros to initialize IO fields of thread and TSS
No functional change, just splitting out parts of the existing INIT_THREAD and INIT_TSS macros. This will simplify making those fields optional later. Signed-off-by: Josh Triplett <josh at joshtriplett.org> --- arch/x86/include/asm/processor.h | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 76751cd..0a12534 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -842,12 +842,7 @@ static inline void spin_lock_prefetch(const void *x) #define STACK_TOP TASK_SIZE #define STACK_TOP_MAX STACK_TOP -#define INIT_THREAD { \ - .sp0 = sizeof(init_stack) + (long)&init_stack, \ - .vm86_info = NULL, \ - .sysenter_cs = __KERNEL_CS, \ - .io_bitmap_ptr = NULL, \ -} +#define INIT_THREAD_IO .io_bitmap_ptr = NULL, /* * Note that the .io_bitmap member must be extra-big. This is because @@ -855,6 +850,15 @@ static inline void spin_lock_prefetch(const void *x) * permission bitmap. The extra byte must be all 1 bits, and must * be within the limit. */ +#define INIT_TSS_IO .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, + +#define INIT_THREAD { \ + .sp0 = sizeof(init_stack) + (long)&init_stack, \ + .vm86_info = NULL, \ + .sysenter_cs = __KERNEL_CS, \ + INIT_THREAD_IO \ +} + #define INIT_TSS { \ .x86_tss = { \ .sp0 = sizeof(init_stack) + (long)&init_stack, \ @@ -862,7 +866,7 @@ static inline void spin_lock_prefetch(const void *x) .ss1 = __KERNEL_CS, \ .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ }, \ - .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, \ + INIT_TSS_IO \ } extern unsigned long thread_saved_pc(struct task_struct *tsk); -- 2.1.1
Josh Triplett
2014-Nov-02 17:32 UTC
[PATCH v4 04/10] x86: paravirt: Wrap initialization of set_iopl_mask in a macro
This will allow making set_iopl_mask optional later. Signed-off-by: Josh Triplett <josh at joshtriplett.org> --- arch/x86/include/asm/paravirt_types.h | 1 + arch/x86/kernel/paravirt.c | 2 +- arch/x86/xen/enlighten.c | 4 ++-- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 7549b8b..3caf2a8 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -143,6 +143,7 @@ struct pv_cpu_ops { void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t); void (*set_iopl_mask)(unsigned mask); +#define INIT_SET_IOPL_MASK(f) .set_iopl_mask = f, void (*wbinvd)(void); void (*io_delay)(void); diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 548d25f..e7969d4 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -383,7 +383,7 @@ __visible struct pv_cpu_ops pv_cpu_ops = { .iret = native_iret, .swapgs = native_swapgs, - .set_iopl_mask = native_set_iopl_mask, + INIT_SET_IOPL_MASK(native_set_iopl_mask) .io_delay = native_io_delay, .start_context_switch = paravirt_nop, diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 1a3f044..8ad0778 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -912,7 +912,7 @@ static void xen_load_sp0(struct tss_struct *tss, xen_mc_issue(PARAVIRT_LAZY_CPU); } -static void xen_set_iopl_mask(unsigned mask) +static void __maybe_unused xen_set_iopl_mask(unsigned mask) { struct physdev_set_iopl set_iopl; @@ -1279,7 +1279,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { .write_idt_entry = xen_write_idt_entry, .load_sp0 = xen_load_sp0, - .set_iopl_mask = xen_set_iopl_mask, + INIT_SET_IOPL_MASK(xen_set_iopl_mask) .io_delay = xen_io_delay, /* Xen takes care of %gs when switching to usermode for us */ -- 2.1.1
Josh Triplett
2014-Nov-02 17:32 UTC
[PATCH v4 05/10] x86: cpu: Add helper function unifying 32-bit and 64-bit IO init in cpu_init
Previously, the 64-bit code initialized both the io_bitmap_base and io_bitmap, while the 32-bit code only initialized io_bitmap_base. Factor the initialization out into a helper function init_tss_io, and call it from both versions of cpu_init. This will also make it easier to make these IO-related fields optional later. Signed-off-by: Josh Triplett <josh at joshtriplett.org> --- arch/x86/include/asm/processor.h | 14 ++++++++++++++ arch/x86/kernel/cpu/common.c | 12 ++---------- 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 0a12534..1fa78f7 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -286,6 +286,20 @@ struct tss_struct { DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss); +static inline void init_tss_io(struct tss_struct *t) +{ + int i; + + t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); + + /* + * <= is required because the CPU will access up to + * 8 bits beyond the end of the IO permission bitmap. + */ + for (i = 0; i <= IO_BITMAP_LONGS; i++) + t->io_bitmap[i] = ~0UL; +} + /* * Save the original ist values for checking stack pointers during debugging */ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 4b4f78c..ae2e8d7 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1297,7 +1297,6 @@ void cpu_init(void) struct tss_struct *t; unsigned long v; int cpu = stack_smp_processor_id(); - int i; wait_for_master_cpu(cpu); @@ -1357,14 +1356,7 @@ void cpu_init(void) } } - t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); - - /* - * <= is required because the CPU will access up to - * 8 bits beyond the end of the IO permission bitmap. - */ - for (i = 0; i <= IO_BITMAP_LONGS; i++) - t->io_bitmap[i] = ~0UL; + init_tss_io(t); atomic_inc(&init_mm.mm_count); me->active_mm = &init_mm; @@ -1419,7 +1411,7 @@ void cpu_init(void) load_TR_desc(); load_LDT(&init_mm.context); - t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); + init_tss_io(t); #ifdef CONFIG_DOUBLEFAULT /* Set up doublefault TSS pointer in the GDT */ -- 2.1.1
Josh Triplett
2014-Nov-02 17:32 UTC
[PATCH v4 06/10] x86: process: Introduce helper to clear a thread's IO bitmap
This will make it easier to make the io_bitmap_ptr field optional later. Signed-off-by: Josh Triplett <josh at joshtriplett.org> --- arch/x86/kernel/process-io.h | 5 +++++ arch/x86/kernel/process_32.c | 4 ++-- arch/x86/kernel/process_64.c | 2 +- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/process-io.h b/arch/x86/kernel/process-io.h index d884444..ffa65e8 100644 --- a/arch/x86/kernel/process-io.h +++ b/arch/x86/kernel/process-io.h @@ -1,6 +1,11 @@ #ifndef _X86_KERNEL_PROCESS_IO_H #define _X86_KERNEL_PROCESS_IO_H +static inline void clear_thread_io_bitmap(struct task_struct *p) +{ + p->thread.io_bitmap_ptr = NULL; +} + static inline int copy_io_bitmap(struct task_struct *me, struct task_struct *p) { diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 07550ff..b55f78e 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -154,7 +154,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, childregs->orig_ax = -1; childregs->cs = __KERNEL_CS | get_kernel_rpl(); childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED; - p->thread.io_bitmap_ptr = NULL; + clear_thread_io_bitmap(p); return 0; } *childregs = *current_pt_regs(); @@ -165,7 +165,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, p->thread.ip = (unsigned long) ret_from_fork; task_user_gs(p) = get_user_gs(current_pt_regs()); - p->thread.io_bitmap_ptr = NULL; + clear_thread_io_bitmap(p); tsk = current; /* diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index b1babb4..d18f3fc 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -164,7 +164,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, p->thread.sp = (unsigned long) childregs; p->thread.usersp = me->thread.usersp; set_tsk_thread_flag(p, TIF_FORK); - p->thread.io_bitmap_ptr = NULL; + clear_thread_io_bitmap(p); savesegment(gs, p->thread.gsindex); p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs; -- 2.1.1
Josh Triplett
2014-Nov-02 17:32 UTC
[PATCH v4 07/10] x86: process: Introduce helper to switch iopl mask
This simplifies __switch_to a bit, and will make it easier to make iopl optional later. Signed-off-by: Josh Triplett <josh at joshtriplett.org> --- arch/x86/kernel/process-io.h | 13 +++++++++++++ arch/x86/kernel/process_32.c | 9 +-------- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/arch/x86/kernel/process-io.h b/arch/x86/kernel/process-io.h index ffa65e8..6d4f147 100644 --- a/arch/x86/kernel/process-io.h +++ b/arch/x86/kernel/process-io.h @@ -24,4 +24,17 @@ static inline int copy_io_bitmap(struct task_struct *me, return 0; } +static inline void switch_iopl_mask(struct thread_struct *prev, + struct thread_struct *next) +{ + /* + * Restore IOPL if needed. In normal use, the flags restore + * in the switch assembly will handle this. But if the kernel + * is running virtualized at a non-zero CPL, the popf will + * not restore flags, so it must be done in a separate step. + */ + if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl)) + set_iopl_mask(next->iopl); +} + #endif /* _X86_KERNEL_PROCESS_IO_H */ diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index b55f78e..3b82293 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -265,14 +265,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) */ load_TLS(next, cpu); - /* - * Restore IOPL if needed. In normal use, the flags restore - * in the switch assembly will handle this. But if the kernel - * is running virtualized at a non-zero CPL, the popf will - * not restore flags, so it must be done in a separate step. - */ - if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl)) - set_iopl_mask(next->iopl); + switch_iopl_mask(prev, next); /* * If it were not for PREEMPT_ACTIVE we could guarantee that the -- 2.1.1
Josh Triplett
2014-Nov-02 17:32 UTC
[PATCH v4 08/10] x86: process: Introduce helper for IO-related bits of exit_thread
This makes the two major functions of exit_thread (IO and FPU) more obvious, and will make it easier to make IO optional later. Signed-off-by: Josh Triplett <josh at joshtriplett.org> --- arch/x86/kernel/process-io.h | 20 ++++++++++++++++++++ arch/x86/kernel/process.c | 20 +++----------------- 2 files changed, 23 insertions(+), 17 deletions(-) diff --git a/arch/x86/kernel/process-io.h b/arch/x86/kernel/process-io.h index 6d4f147..012c1d5 100644 --- a/arch/x86/kernel/process-io.h +++ b/arch/x86/kernel/process-io.h @@ -24,6 +24,26 @@ static inline int copy_io_bitmap(struct task_struct *me, return 0; } +static inline void exit_thread_io(struct task_struct *me) +{ + struct thread_struct *t = &me->thread; + unsigned long *bp = t->io_bitmap_ptr; + + if (bp) { + struct tss_struct *tss = &per_cpu(init_tss, get_cpu()); + + t->io_bitmap_ptr = NULL; + clear_thread_flag(TIF_IO_BITMAP); + /* + * Careful, clear this in the TSS too: + */ + memset(tss->io_bitmap, 0xff, t->io_bitmap_max); + t->io_bitmap_max = 0; + put_cpu(); + kfree(bp); + } +} + static inline void switch_iopl_mask(struct thread_struct *prev, struct thread_struct *next) { diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index e127dda..37b45ca 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -29,6 +29,8 @@ #include <asm/debugreg.h> #include <asm/nmi.h> +#include "process-io.h" + /* * per-CPU TSS segments. Threads are completely 'soft' on Linux, * no more per-task TSS's. The TSS size is kept cacheline-aligned @@ -104,23 +106,7 @@ void arch_task_cache_init(void) void exit_thread(void) { struct task_struct *me = current; - struct thread_struct *t = &me->thread; - unsigned long *bp = t->io_bitmap_ptr; - - if (bp) { - struct tss_struct *tss = &per_cpu(init_tss, get_cpu()); - - t->io_bitmap_ptr = NULL; - clear_thread_flag(TIF_IO_BITMAP); - /* - * Careful, clear this in the TSS too: - */ - memset(tss->io_bitmap, 0xff, t->io_bitmap_max); - t->io_bitmap_max = 0; - put_cpu(); - kfree(bp); - } - + exit_thread_io(me); drop_fpu(me); } -- 2.1.1
Josh Triplett
2014-Nov-02 17:32 UTC
[PATCH v4 09/10] x86: process: Introduce helper to switch IO bitmap
This simplifies __switch_to_xtra, and will make it easier to make IO optional later. Signed-off-by: Josh Triplett <josh at joshtriplett.org> --- arch/x86/kernel/process-io.h | 23 +++++++++++++++++++++++ arch/x86/kernel/process.c | 14 +------------- 2 files changed, 24 insertions(+), 13 deletions(-) diff --git a/arch/x86/kernel/process-io.h b/arch/x86/kernel/process-io.h index 012c1d5..e48d5c9 100644 --- a/arch/x86/kernel/process-io.h +++ b/arch/x86/kernel/process-io.h @@ -57,4 +57,27 @@ static inline void switch_iopl_mask(struct thread_struct *prev, set_iopl_mask(next->iopl); } +static inline void switch_io_bitmap(struct tss_struct *tss, + struct task_struct *prev_p, + struct task_struct *next_p) +{ + struct thread_struct *prev, *next; + prev = &prev_p->thread; + next = &next_p->thread; + + if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { + /* + * Copy the relevant range of the IO bitmap. + * Normally this is 128 bytes or less: + */ + memcpy(tss->io_bitmap, next->io_bitmap_ptr, + max(prev->io_bitmap_max, next->io_bitmap_max)); + } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) { + /* + * Clear any possible leftover bits: + */ + memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); + } +} + #endif /* _X86_KERNEL_PROCESS_IO_H */ diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 37b45ca..7ac01bf 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -211,19 +211,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, hard_enable_TSC(); } - if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { - /* - * Copy the relevant range of the IO bitmap. - * Normally this is 128 bytes or less: - */ - memcpy(tss->io_bitmap, next->io_bitmap_ptr, - max(prev->io_bitmap_max, next->io_bitmap_max)); - } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) { - /* - * Clear any possible leftover bits: - */ - memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); - } + switch_io_bitmap(tss, prev_p, next_p); propagate_user_return_notify(prev_p, next_p); } -- 2.1.1
Josh Triplett
2014-Nov-02 17:33 UTC
[PATCH v4 10/10] x86: Support compiling out userspace IO (iopl and ioperm)
On the vast majority of modern systems, no processes will use the userspsace IO syscalls, iopl and ioperm. Add a new config option, CONFIG_X86_IOPORT, to support configuring them out of the kernel entirely. Most current systems do not run programs using these syscalls, so X86_IOPORT does not depend on EXPERT, though it does still default to y. In addition to saving a significant amount of space, this also reduces the size of several major kernel data structures, drops a bit of code from several task-related hot paths, and reduces the attack surface of the kernel. bloat-o-meter results: add/remove: 0/4 grow/shrink: 4/9 up/down: 83/-9074 (-8991) function old new delta drop_fpu 80 160 +80 perf_event_init_task 638 639 +1 perf_event_exec 192 193 +1 perf_event_aux 132 133 +1 test_tsk_thread_flag 10 - -10 ioperm_active 14 3 -11 init_task 916 904 -12 flush_thread 168 149 -19 cpu_init 364 339 -25 __drop_fpu 60 - -60 ioperm_get 92 6 -86 exit_thread 105 10 -95 sys_iopl 106 - -106 copy_thread 364 257 -107 __switch_to_xtra 234 123 -111 sys_ioperm 240 - -240 init_tss 8576 384 -8192 Signed-off-by: Josh Triplett <josh at joshtriplett.org> --- arch/x86/Kconfig | 10 ++++++++++ arch/x86/include/asm/paravirt.h | 2 ++ arch/x86/include/asm/paravirt_types.h | 4 ++++ arch/x86/include/asm/processor.h | 19 ++++++++++++++++++- arch/x86/include/asm/syscalls.h | 3 +++ arch/x86/kernel/Makefile | 3 ++- arch/x86/kernel/entry_64.S | 9 ++++++--- arch/x86/kernel/process-io.h | 10 ++++++++++ arch/x86/kernel/ptrace.c | 8 ++++++++ drivers/tty/vt/vt_ioctl.c | 2 +- kernel/sys_ni.c | 5 +++++ 11 files changed, 69 insertions(+), 6 deletions(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index f2327e8..a7de2eb 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -984,6 +984,16 @@ config X86_ESPFIX64 def_bool y depends on X86_16BIT && X86_64 +config X86_IOPORT + bool "iopl and ioperm system calls" + default y + ---help--- + This option enables the iopl and ioperm system calls, which allow + privileged userspace processes to directly access I/O ports. This + is used by software that drives hardware directly from userspace + without using a kernel driver. Unless you intend to run such + software, you can safely say N here. + config TOSHIBA tristate "Toshiba Laptop support" depends on X86_32 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index cd6e161..52d2ec8 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -299,10 +299,12 @@ static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g) { PVOP_VCALL3(pv_cpu_ops.write_idt_entry, dt, entry, g); } +#ifdef CONFIG_X86_IOPORT static inline void set_iopl_mask(unsigned mask) { PVOP_VCALL1(pv_cpu_ops.set_iopl_mask, mask); } +#endif /* CONFIG_X86_IOPORT */ /* The paravirtualized I/O functions */ static inline void slow_down_io(void) diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 3caf2a8..a22a5d4 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -142,8 +142,12 @@ struct pv_cpu_ops { void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t); +#ifdef CONFIG_X86_IOPORT void (*set_iopl_mask)(unsigned mask); #define INIT_SET_IOPL_MASK(f) .set_iopl_mask = f, +#else +#define INIT_SET_IOPL_MASK(f) +#endif void (*wbinvd)(void); void (*io_delay)(void); diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 1fa78f7..6e0b639 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -255,7 +255,11 @@ struct x86_hw_tss { /* * IO-bitmap sizes: */ +#ifdef CONFIG_X86_IOPORT #define IO_BITMAP_BITS 65536 +#else +#define IO_BITMAP_BITS 0 +#endif #define IO_BITMAP_BYTES (IO_BITMAP_BITS/8) #define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) #define INVALID_IO_BITMAP_OFFSET 0x8000 @@ -269,6 +273,7 @@ struct tss_struct { */ struct x86_hw_tss x86_tss; +#ifdef CONFIG_X86_IOPORT /* * The extra 1 is there because the CPU will access an * additional byte beyond the end of the IO permission @@ -276,6 +281,7 @@ struct tss_struct { * be within the limit. */ unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; +#endif /* CONFIG_X86_IOPORT */ /* * .. and then another 0x100 bytes for the emergency kernel stack: @@ -288,6 +294,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss); static inline void init_tss_io(struct tss_struct *t) { +#ifdef CONFIG_X86_IOPORT int i; t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); @@ -298,6 +305,9 @@ static inline void init_tss_io(struct tss_struct *t) */ for (i = 0; i <= IO_BITMAP_LONGS; i++) t->io_bitmap[i] = ~0UL; +#else + t->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; +#endif } /* @@ -524,11 +534,13 @@ struct thread_struct { unsigned int saved_fs; unsigned int saved_gs; #endif +#ifdef CONFIG_X86_IOPORT /* IO permissions: */ unsigned long *io_bitmap_ptr; unsigned long iopl; /* Max allowed port in the bitmap, in bytes: */ unsigned io_bitmap_max; +#endif /* CONFIG_X86_IOPORT */ /* * fpu_counter contains the number of consecutive context switches * that the FPU is used. If this is over a threshold, the lazy fpu @@ -545,7 +557,7 @@ struct thread_struct { */ static inline void native_set_iopl_mask(unsigned mask) { -#ifdef CONFIG_X86_32 +#if defined(CONFIG_X86_IOPORT) && defined(CONFIG_X86_32) unsigned int reg; asm volatile ("pushfl;" @@ -856,6 +868,7 @@ static inline void spin_lock_prefetch(const void *x) #define STACK_TOP TASK_SIZE #define STACK_TOP_MAX STACK_TOP +#ifdef CONFIG_X86_IOPORT #define INIT_THREAD_IO .io_bitmap_ptr = NULL, /* @@ -865,6 +878,10 @@ static inline void spin_lock_prefetch(const void *x) * be within the limit. */ #define INIT_TSS_IO .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, +#else +#define INIT_THREAD_IO +#define INIT_TSS_IO +#endif #define INIT_THREAD { \ .sp0 = sizeof(init_stack) + (long)&init_stack, \ diff --git a/arch/x86/include/asm/syscalls.h b/arch/x86/include/asm/syscalls.h index 592a6a6..4db79ea 100644 --- a/arch/x86/include/asm/syscalls.h +++ b/arch/x86/include/asm/syscalls.h @@ -16,9 +16,12 @@ #include <linux/types.h> /* Common in X86_32 and X86_64 */ + +#ifdef CONFIG_X86_IOPORT /* kernel/ioport.c */ asmlinkage long sys_ioperm(unsigned long, unsigned long, int); asmlinkage long sys_iopl(unsigned int); +#endif /* CONFIG_X86_IOPORT */ /* kernel/ldt.c */ asmlinkage int sys_modify_ldt(int, void __user *, unsigned long); diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 8f1e774..84d5fbe 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -20,7 +20,8 @@ CFLAGS_irq.o := -I$(src)/../include/asm/trace obj-y := process_$(BITS).o signal.o entry_$(BITS).o obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o -obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o +obj-y += time.o ldt.o dumpstack.o nmi.o +obj-$(CONFIG_X86_IOPORT) += ioport.o obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o obj-$(CONFIG_IRQ_WORK) += irq_work.o obj-y += probe_roms.o diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index df088bb..1e6a74e0 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -609,6 +609,11 @@ ENTRY(stub_\func) END(stub_\func) .endm + FORK_LIKE clone + FORK_LIKE fork + FORK_LIKE vfork + +#ifdef CONFIG_X86_IOPORT .macro FIXED_FRAME label,func ENTRY(\label) CFI_STARTPROC @@ -621,10 +626,8 @@ ENTRY(\label) END(\label) .endm - FORK_LIKE clone - FORK_LIKE fork - FORK_LIKE vfork FIXED_FRAME stub_iopl, sys_iopl +#endif /* CONFIG_X86_IOPORT */ ENTRY(ptregscall_common) DEFAULT_FRAME 1 8 /* offset 8: return address */ diff --git a/arch/x86/kernel/process-io.h b/arch/x86/kernel/process-io.h index e48d5c9..3e773fa 100644 --- a/arch/x86/kernel/process-io.h +++ b/arch/x86/kernel/process-io.h @@ -3,12 +3,15 @@ static inline void clear_thread_io_bitmap(struct task_struct *p) { +#ifdef CONFIG_X86_IOPORT p->thread.io_bitmap_ptr = NULL; +#endif /* CONFIG_X86_IOPORT */ } static inline int copy_io_bitmap(struct task_struct *me, struct task_struct *p) { +#ifdef CONFIG_X86_IOPORT if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) { p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr, IO_BITMAP_BYTES, GFP_KERNEL); @@ -20,12 +23,14 @@ static inline int copy_io_bitmap(struct task_struct *me, } else { p->thread.io_bitmap_ptr = NULL; } +#endif /* CONFIG_X86_IOPORT */ return 0; } static inline void exit_thread_io(struct task_struct *me) { +#ifdef CONFIG_X86_IOPORT struct thread_struct *t = &me->thread; unsigned long *bp = t->io_bitmap_ptr; @@ -42,11 +47,13 @@ static inline void exit_thread_io(struct task_struct *me) put_cpu(); kfree(bp); } +#endif /* CONFIG_X86_IOPORT */ } static inline void switch_iopl_mask(struct thread_struct *prev, struct thread_struct *next) { +#ifdef CONFIG_X86_IOPORT /* * Restore IOPL if needed. In normal use, the flags restore * in the switch assembly will handle this. But if the kernel @@ -55,12 +62,14 @@ static inline void switch_iopl_mask(struct thread_struct *prev, */ if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl)) set_iopl_mask(next->iopl); +#endif /* CONFIG_X86_IOPORT */ } static inline void switch_io_bitmap(struct tss_struct *tss, struct task_struct *prev_p, struct task_struct *next_p) { +#ifdef CONFIG_X86_IOPORT struct thread_struct *prev, *next; prev = &prev_p->thread; next = &next_p->thread; @@ -78,6 +87,7 @@ static inline void switch_io_bitmap(struct tss_struct *tss, */ memset(tss->io_bitmap, 0xff, prev->io_bitmap_max); } +#endif /* CONFIG_X86_IOPORT */ } #endif /* _X86_KERNEL_PROCESS_IO_H */ diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c index 749b0e4..fdb74a8 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c @@ -785,7 +785,11 @@ static int ptrace_set_debugreg(struct task_struct *tsk, int n, static int ioperm_active(struct task_struct *target, const struct user_regset *regset) { +#ifdef CONFIG_X86_IOPORT return target->thread.io_bitmap_max / regset->size; +#else + return 0; +#endif } static int ioperm_get(struct task_struct *target, @@ -793,12 +797,16 @@ static int ioperm_get(struct task_struct *target, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { +#ifdef CONFIG_X86_IOPORT if (!target->thread.io_bitmap_ptr) return -ENXIO; return user_regset_copyout(&pos, &count, &kbuf, &ubuf, target->thread.io_bitmap_ptr, 0, IO_BITMAP_BYTES); +#else + return -ENXIO; +#endif } /* diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c index 2bd78e2..7c7d405 100644 --- a/drivers/tty/vt/vt_ioctl.c +++ b/drivers/tty/vt/vt_ioctl.c @@ -410,7 +410,7 @@ int vt_ioctl(struct tty_struct *tty, * * XXX: you should never use these, just call ioperm directly.. */ -#ifdef CONFIG_X86 +#ifdef CONFIG_X86_IOPORT case KDADDIO: case KDDELIO: /* diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 02aa418..f3014fe 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -224,3 +224,8 @@ cond_syscall(sys_seccomp); /* access BPF programs and maps */ cond_syscall(sys_bpf); + +/* userspace I/O port access */ +cond_syscall(stub_iopl); +cond_syscall(sys_iopl); +cond_syscall(sys_ioperm); -- 2.1.1
One Thousand Gnomes
2014-Nov-03 12:10 UTC
[PATCH v4 10/10] x86: Support compiling out userspace IO (iopl and ioperm)
On Sun, 2 Nov 2014 09:33:01 -0800 Josh Triplett <josh at joshtriplett.org> wrote:> On the vast majority of modern systems, no processes will use the > userspsace IO syscalls, iopl and ioperm. Add a new config option, > CONFIG_X86_IOPORT, to support configuring them out of the kernel > entirely. Most current systems do not run programs using these > syscalls, so X86_IOPORT does not depend on EXPERT, though it does still > default to y.This isn't unreasonable but there are drivers with userspace helpers that use iopl/ioperm type functionality where you should be doing a SELECT of X86_IOPORT. The one that comes to mind is the uvesa driver. From a quick scan it may these days be the only mainstream one that needs the select adding. Some X servers for legacy cards still use io port access. There are also a couple of other highly non-obvious userspace users that hang on for some systems - eg some older servers DMI and error records can only by read via a real mode BIOS call so management tools have no choice but to go the lrmi/io path. Still makes sense IMHO. From a code perspective however you could define IO_BITMAP_LONGS to 0, add an IO_BITMAP_SIZE (defined as LONGS + 1 or 0) and as far as I can see gcc would then optimise out a lot of the code you are ifdeffing Alan
Konrad Rzeszutek Wilk
2014-Dec-01 15:37 UTC
[Xen-devel] [PATCH v4 04/10] x86: paravirt: Wrap initialization of set_iopl_mask in a macro
On Sun, Nov 02, 2014 at 09:32:20AM -0800, Josh Triplett wrote:> This will allow making set_iopl_mask optional later. > > Signed-off-by: Josh Triplett <josh at joshtriplett.org>Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk at oracle.com>> --- > arch/x86/include/asm/paravirt_types.h | 1 + > arch/x86/kernel/paravirt.c | 2 +- > arch/x86/xen/enlighten.c | 4 ++-- > 3 files changed, 4 insertions(+), 3 deletions(-) > > diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h > index 7549b8b..3caf2a8 100644 > --- a/arch/x86/include/asm/paravirt_types.h > +++ b/arch/x86/include/asm/paravirt_types.h > @@ -143,6 +143,7 @@ struct pv_cpu_ops { > void (*load_sp0)(struct tss_struct *tss, struct thread_struct *t); > > void (*set_iopl_mask)(unsigned mask); > +#define INIT_SET_IOPL_MASK(f) .set_iopl_mask = f, > > void (*wbinvd)(void); > void (*io_delay)(void); > diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c > index 548d25f..e7969d4 100644 > --- a/arch/x86/kernel/paravirt.c > +++ b/arch/x86/kernel/paravirt.c > @@ -383,7 +383,7 @@ __visible struct pv_cpu_ops pv_cpu_ops = { > .iret = native_iret, > .swapgs = native_swapgs, > > - .set_iopl_mask = native_set_iopl_mask, > + INIT_SET_IOPL_MASK(native_set_iopl_mask) > .io_delay = native_io_delay, > > .start_context_switch = paravirt_nop, > diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c > index 1a3f044..8ad0778 100644 > --- a/arch/x86/xen/enlighten.c > +++ b/arch/x86/xen/enlighten.c > @@ -912,7 +912,7 @@ static void xen_load_sp0(struct tss_struct *tss, > xen_mc_issue(PARAVIRT_LAZY_CPU); > } > > -static void xen_set_iopl_mask(unsigned mask) > +static void __maybe_unused xen_set_iopl_mask(unsigned mask) > { > struct physdev_set_iopl set_iopl; > > @@ -1279,7 +1279,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { > .write_idt_entry = xen_write_idt_entry, > .load_sp0 = xen_load_sp0, > > - .set_iopl_mask = xen_set_iopl_mask, > + INIT_SET_IOPL_MASK(xen_set_iopl_mask) > .io_delay = xen_io_delay, > > /* Xen takes care of %gs when switching to usermode for us */ > -- > 2.1.1 > > > _______________________________________________ > Xen-devel mailing list > Xen-devel at lists.xen.org > http://lists.xen.org/xen-devel
Possibly Parallel Threads
- [PATCH v4 00/10] x86: Support compiling out userspace IO (iopl and ioperm)
- [PATCH 0/3] x86: Support compiling out userspace I/O (iopl and ioperm)
- [PATCH 0/3] x86: Support compiling out userspace I/O (iopl and ioperm)
- [PATCH v3 1/3] x86: process: Unify 32-bit and 64-bit copy_thread I/O bitmap handling
- [PATCH v3 1/3] x86: process: Unify 32-bit and 64-bit copy_thread I/O bitmap handling