Displaying 11 results from an estimated 11 matches for "vsyscall_addr".
2007 Apr 18
0
[RFC/PATCH PV_OPS X86_64 14/17] paravirt_ops - vsyscall
...0 = __pa_symbol(paravirt_ops.vsyscall_page);
+#endif
/* Note that VSYSCALL_MAPPED_PAGES must agree with the code below. */
__set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
@@ -299,7 +308,14 @@ static int __init vsyscall_init(void)
BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu));
- map_vsyscall();
+#ifdef CONFIG_PARAVIRT
+ if (paravirt_ops.vsyscall_page)
+#endif
+ map_vsyscall();
+#ifdef CONFIG_PARAVIRT
+ else
+ __sysctl_...
2007 Apr 18
0
[RFC/PATCH PV_OPS X86_64 14/17] paravirt_ops - vsyscall
...0 = __pa_symbol(paravirt_ops.vsyscall_page);
+#endif
/* Note that VSYSCALL_MAPPED_PAGES must agree with the code below. */
__set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
@@ -299,7 +308,14 @@ static int __init vsyscall_init(void)
BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu));
- map_vsyscall();
+#ifdef CONFIG_PARAVIRT
+ if (paravirt_ops.vsyscall_page)
+#endif
+ map_vsyscall();
+#ifdef CONFIG_PARAVIRT
+ else
+ __sysctl_...
2011 Jul 27
9
[PATCH 0/5] Collected vdso/vsyscall fixes for 3.1
This fixes various problems that cropped up with the vdso patches.
- Patch 1 fixes an information leak to userspace.
- Patches 2 and 3 fix the kernel build on gold.
- Patches 4 and 5 fix Xen (I hope).
Konrad, could you could test these on Xen and run 'test_vsyscall test' [1]?
I don't have a usable Xen setup.
Also, I'd appreciate a review of patches 4 and 5 from some
2011 Jul 27
9
[PATCH 0/5] Collected vdso/vsyscall fixes for 3.1
This fixes various problems that cropped up with the vdso patches.
- Patch 1 fixes an information leak to userspace.
- Patches 2 and 3 fix the kernel build on gold.
- Patches 4 and 5 fix Xen (I hope).
Konrad, could you could test these on Xen and run 'test_vsyscall test' [1]?
I don't have a usable Xen setup.
Also, I'd appreciate a review of patches 4 and 5 from some
2011 Jul 27
9
[PATCH 0/5] Collected vdso/vsyscall fixes for 3.1
This fixes various problems that cropped up with the vdso patches.
- Patch 1 fixes an information leak to userspace.
- Patches 2 and 3 fix the kernel build on gold.
- Patches 4 and 5 fix Xen (I hope).
Konrad, could you could test these on Xen and run 'test_vsyscall test' [1]?
I don't have a usable Xen setup.
Also, I'd appreciate a review of patches 4 and 5 from some
2011 Aug 03
10
[PATCH v2 0/6] Collected vdso/vsyscall fixes for 3.1
This fixes various problems that cropped up with the vdso patches.
- Patch 1 fixes an information leak to userspace.
- Patches 2 and 3 fix the kernel build on gold.
- Patches 4 and 5 fix Xen (I hope).
- Patch 6 (optional) adds a trace event to vsyscall emulation. It will
make it easier to handle performance regression reports :)
[1]
2011 Aug 03
10
[PATCH v2 0/6] Collected vdso/vsyscall fixes for 3.1
This fixes various problems that cropped up with the vdso patches.
- Patch 1 fixes an information leak to userspace.
- Patches 2 and 3 fix the kernel build on gold.
- Patches 4 and 5 fix Xen (I hope).
- Patch 6 (optional) adds a trace event to vsyscall emulation. It will
make it easier to handle performance regression reports :)
[1]
2011 Aug 03
10
[PATCH v2 0/6] Collected vdso/vsyscall fixes for 3.1
This fixes various problems that cropped up with the vdso patches.
- Patch 1 fixes an information leak to userspace.
- Patches 2 and 3 fix the kernel build on gold.
- Patches 4 and 5 fix Xen (I hope).
- Patch 6 (optional) adds a trace event to vsyscall emulation. It will
make it easier to handle performance regression reports :)
[1]
2007 Apr 18
3
[PATCH 1 of 1] x86_64: Put .note.* sections into a PT_NOTE segment in vmlinux
.../* out-of-line lock text */
.text.lock : AT(ADDR(.text.lock) - LOAD_OFFSET) { *(.text.lock) }
@@ -57,7 +63,7 @@ #endif
.data : AT(ADDR(.data) - LOAD_OFFSET) {
*(.data)
CONSTRUCTORS
- }
+ } :data
_edata = .; /* End of data section */
@@ -89,7 +95,7 @@ #define VVIRT_OFFSET (VSYSCALL_ADDR - VS
#define VVIRT(x) (ADDR(x) - VVIRT_OFFSET)
. = VSYSCALL_ADDR;
- .vsyscall_0 : AT(VSYSCALL_PHYS_ADDR) { *(.vsyscall_0) }
+ .vsyscall_0 : AT(VSYSCALL_PHYS_ADDR) { *(.vsyscall_0) } :user
__vsyscall_0 = VSYSCALL_VIRT_ADDR;
. = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
@@ -132,7 +138,7 @@...
2007 Apr 18
0
[PATCH] paravirt_ops x86_64 , take 2
...0 = __pa_symbol(paravirt_ops.vsyscall_page);
+#endif
/* Note that VSYSCALL_MAPPED_PAGES must agree with the code below. */
__set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
@@ -300,7 +309,14 @@ static int __init vsyscall_init(void)
BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu));
- map_vsyscall();
+#ifdef CONFIG_PARAVIRT
+ if (paravirt_ops.vsyscall_page)
+#endif
+ map_vsyscall();
+#ifdef CONFIG_PARAVIRT
+ else
+ __sysctl_...
2007 Apr 18
0
[PATCH] paravirt_ops x86_64 , take 2
...0 = __pa_symbol(paravirt_ops.vsyscall_page);
+#endif
/* Note that VSYSCALL_MAPPED_PAGES must agree with the code below. */
__set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
@@ -300,7 +309,14 @@ static int __init vsyscall_init(void)
BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu));
- map_vsyscall();
+#ifdef CONFIG_PARAVIRT
+ if (paravirt_ops.vsyscall_page)
+#endif
+ map_vsyscall();
+#ifdef CONFIG_PARAVIRT
+ else
+ __sysctl_...