Displaying 20 results from an estimated 49 matches for "next_p".
2007 Apr 18
0
[PATCH 2/6] Paravirt CPU hypercall batching mode
.../kernel/process.c Thu Dec 14 14:26:24 2006 -0800
+++ b/arch/i386/kernel/process.c Thu Dec 14 14:50:22 2006 -0800
@@ -665,6 +665,31 @@ struct task_struct fastcall * __switch_t
load_TLS(next, cpu);
/*
+ * Now maybe handle debug registers and/or IO bitmaps
+ */
+ if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW)
+ || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)))
+ __switch_to_xtra(next_p, tss);
+
+ disable_tsc(prev_p, next_p);
+
+ /*
+ * Leave lazy mode, flushing any hypercalls made here.
+ * This must be done before restoring TLS segments so
+ * the GDT and LDT ar...
2007 Apr 18
0
[PATCH 2/6] Paravirt CPU hypercall batching mode
.../kernel/process.c Thu Dec 14 14:26:24 2006 -0800
+++ b/arch/i386/kernel/process.c Thu Dec 14 14:50:22 2006 -0800
@@ -665,6 +665,31 @@ struct task_struct fastcall * __switch_t
load_TLS(next, cpu);
/*
+ * Now maybe handle debug registers and/or IO bitmaps
+ */
+ if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW)
+ || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)))
+ __switch_to_xtra(next_p, tss);
+
+ disable_tsc(prev_p, next_p);
+
+ /*
+ * Leave lazy mode, flushing any hypercalls made here.
+ * This must be done before restoring TLS segments so
+ * the GDT and LDT ar...
2007 Apr 18
2
[PATCH 2/5] Paravirt cpu batching.patch
...+665,37 @@ struct task_struct fastcall * __switch_t
load_TLS(next, cpu);
/*
+ * Restore IOPL if needed.
+ */
+ if (unlikely(prev->iopl != next->iopl))
+ set_iopl_mask(next->iopl);
+
+ /*
+ * Now maybe handle debug registers and/or IO bitmaps
+ */
+ if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW)
+ || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)))
+ __switch_to_xtra(next_p, tss);
+
+ /*
+ * Leave lazy mode, flushing any hypercalls made here.
+ * This must be done before restoring TLS segments so
+ * the GDT and LDT are properly updated, and must be
+...
2007 Apr 18
2
[PATCH 2/5] Paravirt cpu batching.patch
...+665,37 @@ struct task_struct fastcall * __switch_t
load_TLS(next, cpu);
/*
+ * Restore IOPL if needed.
+ */
+ if (unlikely(prev->iopl != next->iopl))
+ set_iopl_mask(next->iopl);
+
+ /*
+ * Now maybe handle debug registers and/or IO bitmaps
+ */
+ if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW)
+ || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)))
+ __switch_to_xtra(next_p, tss);
+
+ /*
+ * Leave lazy mode, flushing any hypercalls made here.
+ * This must be done before restoring TLS segments so
+ * the GDT and LDT are properly updated, and must be
+...
2017 Jun 27
0
[PATCH v3 3/5] threads: Use thread-local storage for errors.
...errno */
+
+ /* Error handler and stack of old error handlers. */
+ guestfs_error_handler_cb error_cb;
+ void * error_cb_data;
+ struct error_cb_stack *error_cb_stack;
+};
+
+static void
+free_error_data (struct error_data *error_data)
+{
+ struct error_cb_stack *p, *next_p;
+
+ free (error_data->last_error);
+ for (p = error_data->error_cb_stack; p != NULL; p = next_p) {
+ next_p = p->next;
+ free (p);
+ }
+ free (error_data);
+}
+
+/* Free all the error_data structs created for a particular handle. */
+void
+guestfs_int_free_error_data_list (guest...
2015 Jun 06
0
[PATCH 3/5] threads: Use thread-local storage for errors.
...errno */
+
+ /* Error handler and stack of old error handlers. */
+ guestfs_error_handler_cb error_cb;
+ void * error_cb_data;
+ struct error_cb_stack *error_cb_stack;
+};
+
+static void
+free_error_data (struct error_data *error_data)
+{
+ struct error_cb_stack *p, *next_p;
+
+ free (error_data->last_error);
+ for (p = error_data->error_cb_stack; p != NULL; p = next_p) {
+ next_p = p->next;
+ free (p);
+ }
+ free (error_data);
+}
+
+/* Free all the error_data structs created for a particular handle. */
+void
+guestfs_int_free_error_data_list (guest...
2013 Oct 22
0
[PATCH 3/3] x86: Support compiling out userspace I/O (iopl and ioperm)
...*/
+ if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
+ set_iopl_mask(next->iopl);
+#endif /* CONFIG_X86_IOPORT */
+}
+
+static inline void switch_io_bitmap(struct tss_struct *tss,
+ struct task_struct *prev_p,
+ struct task_struct *next_p)
+{
+#ifdef CONFIG_X86_IOPORT
+ struct thread_struct *prev, *next;
+ prev = &prev_p->thread;
+ next = &next_p->thread;
+
+ if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
+ /*
+ * Copy the relevant range of the IO bitmap.
+...
2014 Mar 11
0
[PATCHv2 3/3] x86: Support compiling out userspace I/O (iopl and ioperm)
...*/
+ if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
+ set_iopl_mask(next->iopl);
+#endif /* CONFIG_X86_IOPORT */
+}
+
+static inline void switch_io_bitmap(struct tss_struct *tss,
+ struct task_struct *prev_p,
+ struct task_struct *next_p)
+{
+#ifdef CONFIG_X86_IOPORT
+ struct thread_struct *prev, *next;
+ prev = &prev_p->thread;
+ next = &next_p->thread;
+
+ if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
+ /*
+ * Copy the relevant range of the IO bitmap.
+...
2013 Oct 26
1
[PATCH 3/3] x86: Support compiling out userspace I/O (iopl and ioperm)
...> + set_iopl_mask(next->iopl);
> +#endif /* CONFIG_X86_IOPORT */
> +}
> +
> +static inline void switch_io_bitmap(struct tss_struct *tss,
> + struct task_struct *prev_p,
> + struct task_struct *next_p)
> +{
> +#ifdef CONFIG_X86_IOPORT
> + struct thread_struct *prev, *next;
> + prev = &prev_p->thread;
> + next = &next_p->thread;
> +
> + if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
> + /*
> +...
2013 Oct 26
1
[PATCH 3/3] x86: Support compiling out userspace I/O (iopl and ioperm)
...> + set_iopl_mask(next->iopl);
> +#endif /* CONFIG_X86_IOPORT */
> +}
> +
> +static inline void switch_io_bitmap(struct tss_struct *tss,
> + struct task_struct *prev_p,
> + struct task_struct *next_p)
> +{
> +#ifdef CONFIG_X86_IOPORT
> + struct thread_struct *prev, *next;
> + prev = &prev_p->thread;
> + next = &next_p->thread;
> +
> + if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
> + /*
> +...
2014 Oct 29
4
[PATCH v3 1/3] x86: process: Unify 32-bit and 64-bit copy_thread I/O bitmap handling
The 32-bit and 64-bit versions of copy_thread have functionally
identical handling for copying the I/O bitmap, modulo differences in
error handling. Clean up the error paths in both by moving the copy of
the I/O bitmap to the end, to eliminate the need to free it if
subsequent copy steps fail; move the resulting identical code to a
static inline in a common header.
Signed-off-by: Josh Triplett
2014 Oct 29
4
[PATCH v3 1/3] x86: process: Unify 32-bit and 64-bit copy_thread I/O bitmap handling
The 32-bit and 64-bit versions of copy_thread have functionally
identical handling for copying the I/O bitmap, modulo differences in
error handling. Clean up the error paths in both by moving the copy of
the I/O bitmap to the end, to eliminate the need to free it if
subsequent copy steps fail; move the resulting identical code to a
static inline in a common header.
Signed-off-by: Josh Triplett
2014 Oct 29
0
[PATCH v3 3/3] x86: Support compiling out userspace I/O (iopl and ioperm)
...> + set_iopl_mask(next->iopl);
> +#endif /* CONFIG_X86_IOPORT */
> +}
> +
> +static inline void switch_io_bitmap(struct tss_struct *tss,
> + struct task_struct *prev_p,
> + struct task_struct *next_p)
> +{
> +#ifdef CONFIG_X86_IOPORT
> + struct thread_struct *prev, *next;
> + prev = &prev_p->thread;
> + next = &next_p->thread;
> +
> + if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
> + /*
> +...
2013 Oct 22
9
[PATCH 0/3] x86: Support compiling out userspace I/O (iopl and ioperm)
This patch series makes it possible to compile out the iopl and ioperm system
calls, which allow privileged processes to request permission to directly poke
I/O ports from userspace.
Nothing on a modern Linux system uses these calls anymore, and anything new
should be using /dev/port instead, or better yet writing a driver.
Copying the bloat-o-meter stats from the final patch:
32-bit
2013 Oct 22
9
[PATCH 0/3] x86: Support compiling out userspace I/O (iopl and ioperm)
This patch series makes it possible to compile out the iopl and ioperm system
calls, which allow privileged processes to request permission to directly poke
I/O ports from userspace.
Nothing on a modern Linux system uses these calls anymore, and anything new
should be using /dev/port instead, or better yet writing a driver.
Copying the bloat-o-meter stats from the final patch:
32-bit
2013 Oct 31
1
[PATCH 3/3] x86: Support compiling out userspace I/O (iopl and ioperm)
...&& unlikely(prev->iopl != next->iopl))
> + set_iopl_mask(next->iopl);
> +#endif /* CONFIG_X86_IOPORT */
> +}
> +
> +static inline void switch_io_bitmap(struct tss_struct *tss,
> + struct task_struct *prev_p,
> + struct task_struct *next_p)
> +{
> +#ifdef CONFIG_X86_IOPORT
> + struct thread_struct *prev, *next;
> + prev = &prev_p->thread;
> + next = &next_p->thread;
> +
> + if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
> + /*
> +...
2013 Oct 31
1
[PATCH 3/3] x86: Support compiling out userspace I/O (iopl and ioperm)
...&& unlikely(prev->iopl != next->iopl))
> + set_iopl_mask(next->iopl);
> +#endif /* CONFIG_X86_IOPORT */
> +}
> +
> +static inline void switch_io_bitmap(struct tss_struct *tss,
> + struct task_struct *prev_p,
> + struct task_struct *next_p)
> +{
> +#ifdef CONFIG_X86_IOPORT
> + struct thread_struct *prev, *next;
> + prev = &prev_p->thread;
> + next = &next_p->thread;
> +
> + if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
> + /*
> +...
2014 Oct 29
2
[PATCH v3 3/3] x86: Support compiling out userspace I/O (iopl and ioperm)
...*/
+ if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
+ set_iopl_mask(next->iopl);
+#endif /* CONFIG_X86_IOPORT */
+}
+
+static inline void switch_io_bitmap(struct tss_struct *tss,
+ struct task_struct *prev_p,
+ struct task_struct *next_p)
+{
+#ifdef CONFIG_X86_IOPORT
+ struct thread_struct *prev, *next;
+ prev = &prev_p->thread;
+ next = &next_p->thread;
+
+ if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
+ /*
+ * Copy the relevant range of the IO bitmap.
+...
2014 Oct 29
2
[PATCH v3 3/3] x86: Support compiling out userspace I/O (iopl and ioperm)
...*/
+ if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
+ set_iopl_mask(next->iopl);
+#endif /* CONFIG_X86_IOPORT */
+}
+
+static inline void switch_io_bitmap(struct tss_struct *tss,
+ struct task_struct *prev_p,
+ struct task_struct *next_p)
+{
+#ifdef CONFIG_X86_IOPORT
+ struct thread_struct *prev, *next;
+ prev = &prev_p->thread;
+ next = &next_p->thread;
+
+ if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
+ /*
+ * Copy the relevant range of the IO bitmap.
+...
2014 Mar 11
2
[PATCHv2 1/3] x86: process: Unify 32-bit and 64-bit copy_thread I/O bitmap handling
The 32-bit and 64-bit versions of copy_thread have functionally
identical handling for copying the I/O bitmap, modulo differences in
error handling. Clean up the error paths in both by moving the copy of
the I/O bitmap to the end, to eliminate the need to free it if
subsequent copy steps fail; move the resulting identical code to a
static inline in a common header.
Signed-off-by: Josh Triplett