Displaying 20 results from an estimated 50 matches for "sync_cor".
Did you mean:
sync_core
2007 Apr 18
2
Heads up
Zachary Amsden wrote:
> Jeremy Fitzhardinge wrote:
>> Zachary Amsden wrote:
>>> I removed sync_core from paravirt ops (it is always a raw cpuid in
>>> all cases). I didn't want to touch Jeremy's patch 020 and cause
>>> sync problems for him, so I haven't removed it there.
>> It wouldn't necessarily be cpuid on an hvm guest.
>
> Yes, CPUID could be...
2007 Apr 18
1
[PATCH] (with benchmarks) binary patching of paravirt_ops call sites
...esc_struct;
@@ -15,6 +22,8 @@ struct paravirt_ops
{
unsigned int kernel_rpl;
+ unsigned (*patch)(unsigned int type, void *firstinsn, unsigned len);
+
void (*cpuid)(unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx);
@@ -108,10 +117,6 @@ static inline void sync_core(void)
#define read_cr4_safe(x) paravirt_ops.read_cr4_safe()
#define write_cr4(x) paravirt_ops.write_cr4(x)
-#define __local_save_flags() paravirt_ops.save_fl()
-#define __local_irq_restore(f) paravirt_ops.restore_fl(f)
-#define local_irq_disable() paravirt_ops.irq_disable()
-#define local_irq_...
2007 Apr 18
1
[PATCH] (with benchmarks) binary patching of paravirt_ops call sites
...esc_struct;
@@ -15,6 +22,8 @@ struct paravirt_ops
{
unsigned int kernel_rpl;
+ unsigned (*patch)(unsigned int type, void *firstinsn, unsigned len);
+
void (*cpuid)(unsigned int *eax, unsigned int *ebx,
unsigned int *ecx, unsigned int *edx);
@@ -108,10 +117,6 @@ static inline void sync_core(void)
#define read_cr4_safe(x) paravirt_ops.read_cr4_safe()
#define write_cr4(x) paravirt_ops.write_cr4(x)
-#define __local_save_flags() paravirt_ops.save_fl()
-#define __local_irq_restore(f) paravirt_ops.restore_fl(f)
-#define local_irq_disable() paravirt_ops.irq_disable()
-#define local_irq_...
2020 Apr 08
0
[RFC PATCH 15/26] x86/alternatives: Non-emulated text poking
...-- it is emulated by Xen and might not
> + * execute an iret (or similar synchronizing instruction)
> + * internally.
> + *
> + * cpuid() would trap as well. Unclear if that's a solution
> + * either.
> + */
> + if (in_nmi())
> + cpuid_eax(1);
> + else
> + sync_core();
> +}
That's not thinking staight; what do you think the INT3 does when it
happens inside an NMI ?
2007 Apr 18
3
[PATCH 1/4] x86 paravirt_ops: create no_paravirt.h for native ops
...;2" (*ecx));
-}
-
/*
* Generic CPUID function
* clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
@@ -281,13 +270,6 @@ static inline void clear_in_cr4 (unsigne
outb((reg), 0x22); \
outb((data), 0x23); \
} while (0)
-
-/* Stop speculative execution */
-static inline void sync_core(void)
-{
- int tmp;
- asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
-}
static inline void __monitor(const void *eax, unsigned long ecx,
unsigned long edx)
@@ -508,33 +490,6 @@ static inline voi...
2007 Apr 18
3
[PATCH 1/4] x86 paravirt_ops: create no_paravirt.h for native ops
...;2" (*ecx));
-}
-
/*
* Generic CPUID function
* clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
@@ -281,13 +270,6 @@ static inline void clear_in_cr4 (unsigne
outb((reg), 0x22); \
outb((data), 0x23); \
} while (0)
-
-/* Stop speculative execution */
-static inline void sync_core(void)
-{
- int tmp;
- asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
-}
static inline void __monitor(const void *eax, unsigned long ecx,
unsigned long edx)
@@ -508,33 +490,6 @@ static inline voi...
2018 May 23
0
[PATCH v3 21/27] x86/ftrace: Adapt function tracing for PIE support
...he ftrace
+ * hooking algorithm working with the expected 5 bytes instruction.
+ */
+ memcpy(replaced, new_code, MCOUNT_INSN_SIZE);
+ replaced[MCOUNT_INSN_SIZE] = ideal_nops[1][0];
+
+ ip = text_ip_addr(ip);
+
+ if (probe_kernel_write((void *)ip, replaced, sizeof(replaced)))
+ return -EPERM;
+
+ sync_core();
+
+ return 0;
+
+}
+
int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
@@ -153,7 +191,7 @@ int ftrace_make_nop(struct module *mod,
* just modify the code directly.
*/
if (addr == MCOUNT_ADDR)
- return ftrace_modify_code_direct(rec->ip, ol...
2018 Mar 13
0
[PATCH v2 21/27] x86/ftrace: Adapt function tracing for PIE support
...he ftrace
+ * hooking algorithm working with the expected 5 bytes instruction.
+ */
+ memcpy(replaced, new_code, MCOUNT_INSN_SIZE);
+ replaced[MCOUNT_INSN_SIZE] = ideal_nops[1][0];
+
+ ip = text_ip_addr(ip);
+
+ if (probe_kernel_write((void *)ip, replaced, sizeof(replaced)))
+ return -EPERM;
+
+ sync_core();
+
+ return 0;
+
+}
+
int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr)
{
@@ -153,7 +191,7 @@ int ftrace_make_nop(struct module *mod,
* just modify the code directly.
*/
if (addr == MCOUNT_ADDR)
- return ftrace_modify_code_direct(rec->ip, ol...
2007 Apr 18
3
[PATCH 1/2] paravirt.h header
...implement inline
+ assembler replacements. */
+
+ void (fastcall *cpuid)(unsigned int *eax, unsigned int *ebx,
+ unsigned int *ecx, unsigned int *edx);
+
+ unsigned int (fastcall *get_debugreg)(int regno);
+ void (fastcall *set_debugreg)(int regno, unsigned int value);
+
+ void (fastcall *sync_core)(void);
+
+ void (fastcall *clts)(void);
+
+ unsigned int (fastcall *read_cr0)(void);
+ void (fastcall *write_cr0)(unsigned int);
+
+ unsigned int (fastcall *read_cr2)(void);
+ void (fastcall *write_cr2)(unsigned int);
+
+ unsigned int (fastcall *read_cr3)(void);
+ void (fastcall *write_cr3)(unsig...
2020 Apr 08
2
[RFC PATCH 00/26] Runtime paravirt patching
...ese, necessitates that target pv-ops not be preemptible.
I don't think that is a correct inferrence.
> Once that is a given (for safety these need to be explicitly whitelisted
> in runtime_patch()), use a state-machine with the primary CPU doing the
> patching and secondary CPUs in a sync_core() loop.
>
> In case we hit an INT3/BP (in NMI or thread-context) we makes forward
> progress by continuing the patching instead of emulating.
>
> One remaining issue is inter-dependent pv-ops which are also executed in
> the NMI handler -- patching can potentially deadlock in...
2020 Apr 08
2
[RFC PATCH 00/26] Runtime paravirt patching
...ese, necessitates that target pv-ops not be preemptible.
I don't think that is a correct inferrence.
> Once that is a given (for safety these need to be explicitly whitelisted
> in runtime_patch()), use a state-machine with the primary CPU doing the
> patching and secondary CPUs in a sync_core() loop.
>
> In case we hit an INT3/BP (in NMI or thread-context) we makes forward
> progress by continuing the patching instead of emulating.
>
> One remaining issue is inter-dependent pv-ops which are also executed in
> the NMI handler -- patching can potentially deadlock in...
2019 Dec 05
6
[PATCH v10 00/11] x86: PIE support to extend KASLR randomization
...):
- Swap rax for rdx on entry/64 changes based on feedback.
- Addressed feedback from Borislav Petkov on boot, paravirt, alternatives
and globally.
- Rebased the patchset and ensure it works with large kaslr (not included).
- patch v9 (assembly):
- Moved to relative reference for sync_core based on feedback.
- x86/crypto had multiple algorithms deleted, removed PIE changes to them.
- fix typo on comment end line.
- patch v8 (assembly):
- Fix issues in crypto changes (thanks to Eric Biggers).
- Remove unnecessary jump table change.
- Change author and signoff to chrom...
2019 Dec 05
6
[PATCH v10 00/11] x86: PIE support to extend KASLR randomization
...):
- Swap rax for rdx on entry/64 changes based on feedback.
- Addressed feedback from Borislav Petkov on boot, paravirt, alternatives
and globally.
- Rebased the patchset and ensure it works with large kaslr (not included).
- patch v9 (assembly):
- Moved to relative reference for sync_core based on feedback.
- x86/crypto had multiple algorithms deleted, removed PIE changes to them.
- fix typo on comment end line.
- patch v8 (assembly):
- Fix issues in crypto changes (thanks to Eric Biggers).
- Remove unnecessary jump table change.
- Change author and signoff to chrom...
2019 Jul 30
5
[PATCH v9 00/11] x86: PIE support to extend KASLR randomization
Minor changes based on feedback and rebase from v8.
Splitting the previous serie in two. This part contains assembly code
changes required for PIE but without any direct dependencies with the
rest of the patchset.
Changes:
- patch v9 (assembly):
- Moved to relative reference for sync_core based on feedback.
- x86/crypto had multiple algorithms deleted, removed PIE changes to them.
- fix typo on comment end line.
- patch v8 (assembly):
- Fix issues in crypto changes (thanks to Eric Biggers).
- Remove unnecessary jump table change.
- Change author and signoff to chrom...
2019 Jul 30
5
[PATCH v9 00/11] x86: PIE support to extend KASLR randomization
Minor changes based on feedback and rebase from v8.
Splitting the previous serie in two. This part contains assembly code
changes required for PIE but without any direct dependencies with the
rest of the patchset.
Changes:
- patch v9 (assembly):
- Moved to relative reference for sync_core based on feedback.
- x86/crypto had multiple algorithms deleted, removed PIE changes to them.
- fix typo on comment end line.
- patch v8 (assembly):
- Fix issues in crypto changes (thanks to Eric Biggers).
- Remove unnecessary jump table change.
- Change author and signoff to chrom...
2007 Apr 18
7
[patch 0/6] Various cleanups
Hi Andi,
Here's a little batch of cleanups:
- re-enable VDSO when PARAVIRT is enabled
- make the parainstructions symbols match the
other altinstructions naming convention
- add kernel command-line options to disable altinstructions for
smp and pv_ops
Oh, and I'm mailing your noreplacement patch back at you, for no
particularly good reason.
J
--
2007 Apr 18
7
[patch 0/6] Various cleanups
Hi Andi,
Here's a little batch of cleanups:
- re-enable VDSO when PARAVIRT is enabled
- make the parainstructions symbols match the
other altinstructions naming convention
- add kernel command-line options to disable altinstructions for
smp and pv_ops
Oh, and I'm mailing your noreplacement patch back at you, for no
particularly good reason.
J
--
2020 Mar 03
4
[PATCH v11 00/11] x86: PIE support to extend KASLR randomization
...on entry/64 changes based on feedback.
> - Addressed feedback from Borislav Petkov on boot, paravirt, alternatives
> and globally.
> - Rebased the patchset and ensure it works with large kaslr (not included).
> - patch v9 (assembly):
> - Moved to relative reference for sync_core based on feedback.
> - x86/crypto had multiple algorithms deleted, removed PIE changes to them.
> - fix typo on comment end line.
> - patch v8 (assembly):
> - Fix issues in crypto changes (thanks to Eric Biggers).
> - Remove unnecessary jump table change.
> - Chan...
2020 Mar 03
4
[PATCH v11 00/11] x86: PIE support to extend KASLR randomization
...on entry/64 changes based on feedback.
> - Addressed feedback from Borislav Petkov on boot, paravirt, alternatives
> and globally.
> - Rebased the patchset and ensure it works with large kaslr (not included).
> - patch v9 (assembly):
> - Moved to relative reference for sync_core based on feedback.
> - x86/crypto had multiple algorithms deleted, removed PIE changes to them.
> - fix typo on comment end line.
> - patch v8 (assembly):
> - Fix issues in crypto changes (thanks to Eric Biggers).
> - Remove unnecessary jump table change.
> - Chan...
2007 Apr 18
2
[PATCH 1/3] Paravirtualization: Kernel Ring Cleanups
Hi all,
I've been looking at finding common ground between the VMI, Xen and
other paravirtualization approaches, and after some discussion, we're
getting somewhere.
These first two patches are the fundamentals, stolen mainly from the
VMI patches: removing assumptions about the kernel running in ring 0,
and macro-izing all the obvious para-virtualize-needing insns. The
third patch is