Displaying 20 results from an estimated 73 matches for "max_low_pfn".
2008 Apr 16
1
[PATCH] kvm: move kvmclock initialization inside kvm_guest_init
...(KVM_FEATURE_NOP_IO_DELAY))
pv_cpu_ops.io_delay = kvm_io_delay;
diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c
index 65f3a23..029350c 100644
--- a/arch/x86/kernel/setup_32.c
+++ b/arch/x86/kernel/setup_32.c
@@ -771,10 +771,6 @@ void __init setup_arch(char **cmdline_p)
max_low_pfn = setup_memory();
-#ifdef CONFIG_KVM_CLOCK
- kvmclock_init();
-#endif
-
#ifdef CONFIG_VMI
/*
* Must be after max_low_pfn is determined, and before kernel
diff --git a/include/linux/kvm_para.h b/include/linux/kvm_para.h
index 3ddce03..c5e662c 100644
--- a/include/linux/kvm_para.h
+++ b/inclu...
2008 Apr 16
1
[PATCH] kvm: move kvmclock initialization inside kvm_guest_init
...(KVM_FEATURE_NOP_IO_DELAY))
pv_cpu_ops.io_delay = kvm_io_delay;
diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c
index 65f3a23..029350c 100644
--- a/arch/x86/kernel/setup_32.c
+++ b/arch/x86/kernel/setup_32.c
@@ -771,10 +771,6 @@ void __init setup_arch(char **cmdline_p)
max_low_pfn = setup_memory();
-#ifdef CONFIG_KVM_CLOCK
- kvmclock_init();
-#endif
-
#ifdef CONFIG_VMI
/*
* Must be after max_low_pfn is determined, and before kernel
diff --git a/include/linux/kvm_para.h b/include/linux/kvm_para.h
index 3ddce03..c5e662c 100644
--- a/include/linux/kvm_para.h
+++ b/inclu...
2006 Jun 22
1
[PATCH] fix linux ioremap() of domain local memory
...that is local to this virtual
* machine (i.e., not an I/O page, nor a memory page belonging to another VM).
- * See the comment that accompanies mfn_to_local_pfn() in page.h to understand
- * why this works.
*/
static inline int is_local_lowmem(unsigned long address)
{
extern unsigned long max_low_pfn;
- return (mfn_to_local_pfn(address >> PAGE_SHIFT) < max_low_pfn);
+ return ((address >> PAGE_SHIFT) < max_low_pfn);
}
/*
--
Do not meddle in the internals of kernels, for they are subtle and quick to panic.
_______________________________________________
Xen-devel mail...
2019 May 08
2
[PATCH 04/10] s390/mm: force swiotlb for protected virtualization
...*/
> + swiotlb_init(1);
> + swiotlb_update_mem_attributes();
> + swiotlb_force = SWIOTLB_FORCE;
> +}
> +
> void __init mem_init(void)
> {
> cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
> @@ -134,6 +182,8 @@ void __init mem_init(void)
> set_max_mapnr(max_low_pfn);
> high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
>
> + pv_init();
> +
> /* Setup guest page hinting */
> cmma_init();
>
2019 May 08
2
[PATCH 04/10] s390/mm: force swiotlb for protected virtualization
...*/
> + swiotlb_init(1);
> + swiotlb_update_mem_attributes();
> + swiotlb_force = SWIOTLB_FORCE;
> +}
> +
> void __init mem_init(void)
> {
> cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
> @@ -134,6 +182,8 @@ void __init mem_init(void)
> set_max_mapnr(max_low_pfn);
> high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
>
> + pv_init();
> +
> /* Setup guest page hinting */
> cmma_init();
>
2019 Apr 09
0
[RFC PATCH 03/12] s390/mm: force swiotlb for protected virtualization
...*/
> + swiotlb_init(1);
> + swiotlb_update_mem_attributes();
> + swiotlb_force = SWIOTLB_FORCE;
> +}
> +
> void __init mem_init(void)
> {
> cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
> @@ -134,6 +176,8 @@ void __init mem_init(void)
> set_max_mapnr(max_low_pfn);
> high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
>
> + pv_init();
> +
> /* Setup guest page hinting */
> cmma_init();
>
2007 Apr 18
0
[PATCH 3/3] Vmi native fix
...a/arch/i386/kernel/vmi.c Thu Jan 04 15:56:40 2007 -0800
+++ b/arch/i386/kernel/vmi.c Thu Jan 04 15:57:38 2007 -0800
@@ -645,7 +645,8 @@ void vmi_bringup(void)
void vmi_bringup(void)
{
/* We must establish the lowmem mapping for MMU ops to work */
- vmi_ops.set_linear_mapping(0, __PAGE_OFFSET, max_low_pfn, 0);
+ if (vmi_rom)
+ vmi_ops.set_linear_mapping(0, __PAGE_OFFSET, max_low_pfn, 0);
}
/*
2007 Apr 18
0
[PATCH 3/3] Vmi native fix
...a/arch/i386/kernel/vmi.c Thu Jan 04 15:56:40 2007 -0800
+++ b/arch/i386/kernel/vmi.c Thu Jan 04 15:57:38 2007 -0800
@@ -645,7 +645,8 @@ void vmi_bringup(void)
void vmi_bringup(void)
{
/* We must establish the lowmem mapping for MMU ops to work */
- vmi_ops.set_linear_mapping(0, __PAGE_OFFSET, max_low_pfn, 0);
+ if (vmi_rom)
+ vmi_ops.set_linear_mapping(0, __PAGE_OFFSET, max_low_pfn, 0);
}
/*
2007 Apr 18
0
[PATCH 8/10] Vmi kmap_atomic_pte fix.patch
..._PTE0)+1, va, 1, page_to_pfn(page));
+
+ return va;
+}
+#endif
static void vmi_allocate_pt(u32 pfn)
{
@@ -656,7 +663,7 @@ void vmi_bringup(void)
{
/* We must establish the lowmem mapping for MMU ops to work */
if (vmi_ops.set_linear_mapping)
- vmi_ops.set_linear_mapping(0, __PAGE_OFFSET, max_low_pfn, 0);
+ vmi_ops.set_linear_mapping(0, (void *)__PAGE_OFFSET, max_low_pfn, 0);
}
/*
@@ -793,8 +800,8 @@ static inline int __init activate_vmi(vo
para_wrap(set_lazy_mode, vmi_set_lazy_mode, set_lazy_mode, SetLazyMode);
/* user and kernel flush are just handled with different flags to FlushT...
2007 Apr 18
0
[PATCH 8/10] Vmi kmap_atomic_pte fix.patch
..._PTE0)+1, va, 1, page_to_pfn(page));
+
+ return va;
+}
+#endif
static void vmi_allocate_pt(u32 pfn)
{
@@ -656,7 +663,7 @@ void vmi_bringup(void)
{
/* We must establish the lowmem mapping for MMU ops to work */
if (vmi_ops.set_linear_mapping)
- vmi_ops.set_linear_mapping(0, __PAGE_OFFSET, max_low_pfn, 0);
+ vmi_ops.set_linear_mapping(0, (void *)__PAGE_OFFSET, max_low_pfn, 0);
}
/*
@@ -793,8 +800,8 @@ static inline int __init activate_vmi(vo
para_wrap(set_lazy_mode, vmi_set_lazy_mode, set_lazy_mode, SetLazyMode);
/* user and kernel flush are just handled with different flags to FlushT...
2019 Apr 26
0
[PATCH 04/10] s390/mm: force swiotlb for protected virtualization
...return;
+
+ /* make sure bounce buffers are shared */
+ swiotlb_init(1);
+ swiotlb_update_mem_attributes();
+ swiotlb_force = SWIOTLB_FORCE;
+}
+
void __init mem_init(void)
{
cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
@@ -134,6 +182,8 @@ void __init mem_init(void)
set_max_mapnr(max_low_pfn);
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+ pv_init();
+
/* Setup guest page hinting */
cmma_init();
--
2.16.4
2019 Jun 06
0
[PATCH v4 1/8] s390/mm: force swiotlb for protected virtualization
...return;
+
+ /* make sure bounce buffers are shared */
+ swiotlb_init(1);
+ swiotlb_update_mem_attributes();
+ swiotlb_force = SWIOTLB_FORCE;
+}
+
void __init mem_init(void)
{
cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
@@ -136,6 +181,8 @@ void __init mem_init(void)
set_max_mapnr(max_low_pfn);
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+ pv_init();
+
/* Setup guest page hinting */
cmma_init();
--
2.17.1
2019 Jun 12
0
[PATCH v5 1/8] s390/mm: force swiotlb for protected virtualization
...return;
+
+ /* make sure bounce buffers are shared */
+ swiotlb_init(1);
+ swiotlb_update_mem_attributes();
+ swiotlb_force = SWIOTLB_FORCE;
+}
+
void __init mem_init(void)
{
cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
@@ -136,6 +181,8 @@ void __init mem_init(void)
set_max_mapnr(max_low_pfn);
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+ pv_init();
+
/* Setup guest page hinting */
cmma_init();
--
2.17.1
2019 May 23
0
[PATCH v2 1/8] s390/mm: force swiotlb for protected virtualization
...return;
+
+ /* make sure bounce buffers are shared */
+ swiotlb_init(1);
+ swiotlb_update_mem_attributes();
+ swiotlb_force = SWIOTLB_FORCE;
+}
+
void __init mem_init(void)
{
cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
@@ -136,6 +181,8 @@ void __init mem_init(void)
set_max_mapnr(max_low_pfn);
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+ pv_init();
+
/* Setup guest page hinting */
cmma_init();
--
2.13.4
2019 May 29
0
[PATCH v3 1/8] s390/mm: force swiotlb for protected virtualization
...return;
+
+ /* make sure bounce buffers are shared */
+ swiotlb_init(1);
+ swiotlb_update_mem_attributes();
+ swiotlb_force = SWIOTLB_FORCE;
+}
+
void __init mem_init(void)
{
cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
@@ -136,6 +181,8 @@ void __init mem_init(void)
set_max_mapnr(max_low_pfn);
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+ pv_init();
+
/* Setup guest page hinting */
cmma_init();
--
2.13.4
2019 Apr 09
0
[RFC PATCH 03/12] s390/mm: force swiotlb for protected virtualization
...t; + swiotlb_force = SWIOTLB_FORCE;
> > > +}
> > > +
> > > void __init mem_init(void)
> > > {
> > > cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
> > > @@ -134,6 +176,8 @@ void __init mem_init(void)
> > > set_max_mapnr(max_low_pfn);
> > > high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
> > >
> > > + pv_init();
> > > +
> > > /* Setup guest page hinting */
> > > cmma_init();
> > >
> >
>
2019 May 09
0
[PATCH 04/10] s390/mm: force swiotlb for protected virtualization
...lb_update_mem_attributes();
> > + swiotlb_force = SWIOTLB_FORCE;
> > +}
> > +
> > void __init mem_init(void)
> > {
> > cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
> > @@ -134,6 +182,8 @@ void __init mem_init(void)
> > set_max_mapnr(max_low_pfn);
> > high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
> >
> > + pv_init();
> > +
> > /* Setup guest page hinting */
> > cmma_init();
> >
>
2019 May 09
0
[PATCH 04/10] s390/mm: force swiotlb for protected virtualization
...lb_init(1);
> +??? swiotlb_update_mem_attributes();
> +??? swiotlb_force = SWIOTLB_FORCE;
> +}
> +
> ?void __init mem_init(void)
> ?{
> ???? cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
> @@ -134,6 +182,8 @@ void __init mem_init(void)
> ???? set_max_mapnr(max_low_pfn);
> ???????? high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
> ?+??? pv_init();
> +
> ???? /* Setup guest page hinting */
> ???? cmma_init();
> ?-- 2.16.4
>
>
--
-- Jason J. Herne (jjherne at linux.ibm.com)
2019 Sep 06
0
[vhost:linux-next 13/15] arch/ia64/include/asm/page.h:51:23: warning: "hpage_shift" is not defined, evaluates to 0
...ia64/page.h Bob Picco 2005-10-04 119 #elif defined(CONFIG_DISCONTIGMEM)
b77dae5293efba include/asm-ia64/page.h Dean Roe 2005-11-09 120 extern unsigned long min_low_pfn;
^1da177e4c3f41 include/asm-ia64/page.h Linus Torvalds 2005-04-16 121 extern unsigned long max_low_pfn;
b77dae5293efba include/asm-ia64/page.h Dean Roe 2005-11-09 122 # define pfn_valid(pfn) (((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn) && ia64_pfn_valid(pfn))
^1da177e4c3f41 include/asm-ia64/page.h Linus Torvalds 2005-04-16 123 #endif
^1da177e4c3f4...
2007 Apr 28
4
confused about the balloon code
hi
I try to understand the code of balloon ,and got confused about the
following parts of code:
static int decrease_reservation(unsigned long nr_pages)
{
....
if (!PageHighMem(page)) {
v = phys_to_virt(pfn << PAGE_SHIFT);
scrub_pages(v, 1);
ret = HYPERVISOR_update_va_mapping(
(unsigned long)v, __pte_ma(0), 0);