Displaying 20 results from an estimated 36 matches for "set_linear_mapping".
2007 Apr 18
0
[PATCH 8/10] Vmi kmap_atomic_pte fix.patch
Implement vmi_kmap_atomic_pte in terms of the backend set_linear_mapping
operation. The conversion is rather straighforward; call kmap_atomic
and then inform the hypervisor of the page mapping.
The _flush_tlb damage is due to macros being pulled in from highmem.h.
Signed-off-by: Zachary Amsden <zach@vmware.com>
diff -r 2207a31829e7 arch/i386/kernel/vmi.c
--- a...
2007 Apr 18
0
[PATCH 8/10] Vmi kmap_atomic_pte fix.patch
Implement vmi_kmap_atomic_pte in terms of the backend set_linear_mapping
operation. The conversion is rather straighforward; call kmap_atomic
and then inform the hypervisor of the page mapping.
The _flush_tlb damage is due to macros being pulled in from highmem.h.
Signed-off-by: Zachary Amsden <zach@vmware.com>
diff -r 2207a31829e7 arch/i386/kernel/vmi.c
--- a...
2007 Apr 18
0
[PATCH 8/9] Vmi apic ops.diff
...e_tsc;
static int disable_mtrr;
static int disable_noidle;
+static int disable_vmi_timer;
/* Cached VMI operations */
struct {
@@ -662,12 +663,12 @@ void vmi_bringup(void)
void vmi_bringup(void)
{
/* We must establish the lowmem mapping for MMU ops to work */
- if (vmi_rom)
+ if (vmi_ops.set_linear_mapping)
vmi_ops.set_linear_mapping(0, __PAGE_OFFSET, max_low_pfn, 0);
}
/*
- * Return a pointer to the VMI function or a NOP stub
+ * Return a pointer to a VMI function or NULL if unimplemented
*/
static void *vmi_get_function(int vmicall)
{
@@ -678,12 +679,13 @@ static void *vmi_get_function(i...
2007 Apr 18
0
[PATCH 8/9] Vmi apic ops.diff
...e_tsc;
static int disable_mtrr;
static int disable_noidle;
+static int disable_vmi_timer;
/* Cached VMI operations */
struct {
@@ -662,12 +663,12 @@ void vmi_bringup(void)
void vmi_bringup(void)
{
/* We must establish the lowmem mapping for MMU ops to work */
- if (vmi_rom)
+ if (vmi_ops.set_linear_mapping)
vmi_ops.set_linear_mapping(0, __PAGE_OFFSET, max_low_pfn, 0);
}
/*
- * Return a pointer to the VMI function or a NOP stub
+ * Return a pointer to a VMI function or NULL if unimplemented
*/
static void *vmi_get_function(int vmicall)
{
@@ -678,12 +679,13 @@ static void *vmi_get_function(i...
2007 Apr 18
0
[PATCH 3/3] Vmi native fix
...15e2685a3c arch/i386/kernel/vmi.c
--- a/arch/i386/kernel/vmi.c Thu Jan 04 15:56:40 2007 -0800
+++ b/arch/i386/kernel/vmi.c Thu Jan 04 15:57:38 2007 -0800
@@ -645,7 +645,8 @@ void vmi_bringup(void)
void vmi_bringup(void)
{
/* We must establish the lowmem mapping for MMU ops to work */
- vmi_ops.set_linear_mapping(0, __PAGE_OFFSET, max_low_pfn, 0);
+ if (vmi_rom)
+ vmi_ops.set_linear_mapping(0, __PAGE_OFFSET, max_low_pfn, 0);
}
/*
2007 Apr 18
0
[PATCH 3/3] Vmi native fix
...15e2685a3c arch/i386/kernel/vmi.c
--- a/arch/i386/kernel/vmi.c Thu Jan 04 15:56:40 2007 -0800
+++ b/arch/i386/kernel/vmi.c Thu Jan 04 15:57:38 2007 -0800
@@ -645,7 +645,8 @@ void vmi_bringup(void)
void vmi_bringup(void)
{
/* We must establish the lowmem mapping for MMU ops to work */
- vmi_ops.set_linear_mapping(0, __PAGE_OFFSET, max_low_pfn, 0);
+ if (vmi_rom)
+ vmi_ops.set_linear_mapping(0, __PAGE_OFFSET, max_low_pfn, 0);
}
/*
2007 Apr 18
31
[PATCH 00/28] Updates for firstfloor paravirt-ops patches
Hi Andi,
This is a set of updates for the firstfloor patch queue.
Quick rundown:
revert-mm-x86_64-mm-account-for-module-percpu-space-separately-from-kernel-percpu.patch
separate-module-percpu-space.patch
Update the module percpu accounting patch
fix-ff-allow-percpu-variables-to-be-page-aligned.patch
Make sure the percpu memory allocation is page-aligned
2007 Apr 18
31
[PATCH 00/28] Updates for firstfloor paravirt-ops patches
Hi Andi,
This is a set of updates for the firstfloor patch queue.
Quick rundown:
revert-mm-x86_64-mm-account-for-module-percpu-space-separately-from-kernel-percpu.patch
separate-module-percpu-space.patch
Update the module percpu accounting patch
fix-ff-allow-percpu-variables-to-be-page-aligned.patch
Make sure the percpu memory allocation is page-aligned
2007 Apr 18
1
[PATCH 4/9] Vmi fix highpte
...ive
+ * page tables.
+ *
+ * We use slot zero for the linear mapping of physical memory, and
+ * in HIGHPTE kernels, slot 1 and 2 for KM_PTE0 and KM_PTE1.
+ *
+ * args: SLOT VA COUNT PFN
+ */
+ BUG_ON(type != KM_PTE0 && type != KM_PTE1);
+ vmi_ops.set_linear_mapping((type - KM_PTE0)+1, (u32)va, 1, pfn);
+}
+
static void vmi_allocate_pt(u32 pfn)
{
vmi_set_page_type(pfn, VMI_PAGE_L1);
@@ -813,6 +831,7 @@ static inline int __init activate_vmi(vo
vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage);
vmi_ops.release_page = vmi_get_function(VMI_CA...
2007 Apr 18
1
[PATCH 4/9] Vmi fix highpte
...ive
+ * page tables.
+ *
+ * We use slot zero for the linear mapping of physical memory, and
+ * in HIGHPTE kernels, slot 1 and 2 for KM_PTE0 and KM_PTE1.
+ *
+ * args: SLOT VA COUNT PFN
+ */
+ BUG_ON(type != KM_PTE0 && type != KM_PTE1);
+ vmi_ops.set_linear_mapping((type - KM_PTE0)+1, (u32)va, 1, pfn);
+}
+
static void vmi_allocate_pt(u32 pfn)
{
vmi_set_page_type(pfn, VMI_PAGE_L1);
@@ -813,6 +831,7 @@ static inline int __init activate_vmi(vo
vmi_ops.allocate_page = vmi_get_function(VMI_CALL_AllocatePage);
vmi_ops.release_page = vmi_get_function(VMI_CA...
2007 Apr 18
0
[PATCH 4/5] Vmi.patch
...u32 selector);
+ void (fastcall *set_kernel_stack)(u32 selector, u32 esp0);
+ void (fastcall *allocate_page)(u32, u32, u32, u32, u32);
+ void (fastcall *release_page)(u32, u32);
+ void (fastcall *set_pte)(pte_t, pte_t *, unsigned);
+ void (fastcall *update_pte)(pte_t *, unsigned);
+ void (fastcall *set_linear_mapping)(int, u32, u32, u32);
+ void (fastcall *flush_tlb)(int);
+ void (fastcall *set_initial_ap_state)(int, int);
+} vmi_ops;
+
+/* XXX move this to alternative.h */
+extern struct paravirt_patch __start_parainstructions[],
+ __stop_parainstructions[];
+
+/*
+ * VMI patching routines.
+ */
+#define MNEM...
2007 Apr 18
0
[PATCH 4/5] Vmi.patch
...u32 selector);
+ void (fastcall *set_kernel_stack)(u32 selector, u32 esp0);
+ void (fastcall *allocate_page)(u32, u32, u32, u32, u32);
+ void (fastcall *release_page)(u32, u32);
+ void (fastcall *set_pte)(pte_t, pte_t *, unsigned);
+ void (fastcall *update_pte)(pte_t *, unsigned);
+ void (fastcall *set_linear_mapping)(int, u32, u32, u32);
+ void (fastcall *flush_tlb)(int);
+ void (fastcall *set_initial_ap_state)(int, int);
+} vmi_ops;
+
+/* XXX move this to alternative.h */
+extern struct paravirt_patch __start_parainstructions[],
+ __stop_parainstructions[];
+
+/*
+ * VMI patching routines.
+ */
+#define MNEM...
2007 Apr 18
0
[PATCH 5/6] VMI backend for paravirt-ops
...u32 selector);
+ void (fastcall *set_kernel_stack)(u32 selector, u32 esp0);
+ void (fastcall *allocate_page)(u32, u32, u32, u32, u32);
+ void (fastcall *release_page)(u32, u32);
+ void (fastcall *set_pte)(pte_t, pte_t *, unsigned);
+ void (fastcall *update_pte)(pte_t *, unsigned);
+ void (fastcall *set_linear_mapping)(int, u32, u32, u32);
+ void (fastcall *flush_tlb)(int);
+ void (fastcall *set_initial_ap_state)(int, int);
+} vmi_ops;
+
+/* XXX move this to alternative.h */
+extern struct paravirt_patch __start_parainstructions[],
+ __stop_parainstructions[];
+
+/*
+ * VMI patching routines.
+ */
+#define MNEM...
2007 Apr 18
0
[PATCH 5/6] VMI backend for paravirt-ops
...u32 selector);
+ void (fastcall *set_kernel_stack)(u32 selector, u32 esp0);
+ void (fastcall *allocate_page)(u32, u32, u32, u32, u32);
+ void (fastcall *release_page)(u32, u32);
+ void (fastcall *set_pte)(pte_t, pte_t *, unsigned);
+ void (fastcall *update_pte)(pte_t *, unsigned);
+ void (fastcall *set_linear_mapping)(int, u32, u32, u32);
+ void (fastcall *flush_tlb)(int);
+ void (fastcall *set_initial_ap_state)(int, int);
+} vmi_ops;
+
+/* XXX move this to alternative.h */
+extern struct paravirt_patch __start_parainstructions[],
+ __stop_parainstructions[];
+
+/*
+ * VMI patching routines.
+ */
+#define MNEM...
2010 Aug 23
1
Removing VMI kernel support from 2.6.37
...u32);
- void (*write_ldt_entry)(struct desc_struct *, int, u32, u32);
- void (*set_kernel_stack)(u32 selector, u32 sp0);
- void (*allocate_page)(u32, u32, u32, u32, u32);
- void (*release_page)(u32, u32);
- void (*set_pte)(pte_t, pte_t *, unsigned);
- void (*update_pte)(pte_t *, unsigned);
- void (*set_linear_mapping)(int, void *, u32, u32);
- void (*_flush_tlb)(int);
- void (*set_initial_ap_state)(int, int);
- void (*halt)(void);
- void (*set_lazy_mode)(int mode);
-} vmi_ops;
-
-/* Cached VMI operations */
-struct vmi_timer_ops vmi_timer_ops;
-
-/*
- * VMI patching routines.
- */
-#define MNEM_CALL 0xe8
-#de...
2010 Aug 23
1
Removing VMI kernel support from 2.6.37
...u32);
- void (*write_ldt_entry)(struct desc_struct *, int, u32, u32);
- void (*set_kernel_stack)(u32 selector, u32 sp0);
- void (*allocate_page)(u32, u32, u32, u32, u32);
- void (*release_page)(u32, u32);
- void (*set_pte)(pte_t, pte_t *, unsigned);
- void (*update_pte)(pte_t *, unsigned);
- void (*set_linear_mapping)(int, void *, u32, u32);
- void (*_flush_tlb)(int);
- void (*set_initial_ap_state)(int, int);
- void (*halt)(void);
- void (*set_lazy_mode)(int mode);
-} vmi_ops;
-
-/* Cached VMI operations */
-struct vmi_timer_ops vmi_timer_ops;
-
-/*
- * VMI patching routines.
- */
-#define MNEM_CALL 0xe8
-#de...
2007 Oct 09
2
[PATCH RFC REPOST 1/2] paravirt: refactor struct paravirt_ops into smaller pv_*_ops
...get_function(VMI_CALL_ReleasePage);
if (vmi_ops.release_page) {
- paravirt_ops.release_pt = vmi_release_pt;
- paravirt_ops.release_pd = vmi_release_pd;
+ pv_mmu_ops.release_pt = vmi_release_pt;
+ pv_mmu_ops.release_pd = vmi_release_pd;
}
/* Set linear is needed in all cases */
vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping);
#ifdef CONFIG_HIGHPTE
if (vmi_ops.set_linear_mapping)
- paravirt_ops.kmap_atomic_pte = vmi_kmap_atomic_pte;
+ pv_mmu_ops.kmap_atomic_pte = vmi_kmap_atomic_pte;
#endif
/*
@@ -863,17 +863,17 @@ static inline int __init activate_vmi(vo
* the...
2007 Oct 09
2
[PATCH RFC REPOST 1/2] paravirt: refactor struct paravirt_ops into smaller pv_*_ops
...get_function(VMI_CALL_ReleasePage);
if (vmi_ops.release_page) {
- paravirt_ops.release_pt = vmi_release_pt;
- paravirt_ops.release_pd = vmi_release_pd;
+ pv_mmu_ops.release_pt = vmi_release_pt;
+ pv_mmu_ops.release_pd = vmi_release_pd;
}
/* Set linear is needed in all cases */
vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping);
#ifdef CONFIG_HIGHPTE
if (vmi_ops.set_linear_mapping)
- paravirt_ops.kmap_atomic_pte = vmi_kmap_atomic_pte;
+ pv_mmu_ops.kmap_atomic_pte = vmi_kmap_atomic_pte;
#endif
/*
@@ -863,17 +863,17 @@ static inline int __init activate_vmi(vo
* the...
2007 Sep 28
2
[PATCH RFC] paravirt_ops: refactor struct paravirt_ops into smaller pv_*_ops
...get_function(VMI_CALL_ReleasePage);
if (vmi_ops.release_page) {
- paravirt_ops.release_pt = vmi_release_pt;
- paravirt_ops.release_pd = vmi_release_pd;
+ pv_mmu_ops.release_pt = vmi_release_pt;
+ pv_mmu_ops.release_pd = vmi_release_pd;
}
/* Set linear is needed in all cases */
vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping);
#ifdef CONFIG_HIGHPTE
if (vmi_ops.set_linear_mapping)
- paravirt_ops.kmap_atomic_pte = vmi_kmap_atomic_pte;
+ pv_mmu_ops.kmap_atomic_pte = vmi_kmap_atomic_pte;
#endif
/*
@@ -863,17 +863,17 @@ static inline int __init activate_vmi(vo
* the...
2007 Sep 28
2
[PATCH RFC] paravirt_ops: refactor struct paravirt_ops into smaller pv_*_ops
...get_function(VMI_CALL_ReleasePage);
if (vmi_ops.release_page) {
- paravirt_ops.release_pt = vmi_release_pt;
- paravirt_ops.release_pd = vmi_release_pd;
+ pv_mmu_ops.release_pt = vmi_release_pt;
+ pv_mmu_ops.release_pd = vmi_release_pd;
}
/* Set linear is needed in all cases */
vmi_ops.set_linear_mapping = vmi_get_function(VMI_CALL_SetLinearMapping);
#ifdef CONFIG_HIGHPTE
if (vmi_ops.set_linear_mapping)
- paravirt_ops.kmap_atomic_pte = vmi_kmap_atomic_pte;
+ pv_mmu_ops.kmap_atomic_pte = vmi_kmap_atomic_pte;
#endif
/*
@@ -863,17 +863,17 @@ static inline int __init activate_vmi(vo
* the...