Displaying 20 results from an estimated 30 matches for "high_memory".
2020 Jul 28
0
[vhost:vhost 38/45] include/linux/vdpa.h:43:21: error: expected ':', ',', ';', '}' or '__attribute__' before '.' token
....c:14:
include/linux/scatterlist.h: In function 'sg_set_buf':
arch/m68k/include/asm/page_mm.h:169:49: warning: ordered comparison of pointer with null pointer [-Wextra]
169 | #define virt_addr_valid(kaddr) ((void *)(kaddr) >= (void *)PAGE_OFFSET && (void *)(kaddr) < high_memory)
| ^~
include/linux/compiler.h:78:42: note: in definition of macro 'unlikely'
78 | # define unlikely(x) __builtin_expect(!!(x), 0)
| ^
include/linux/scatterlist.h:143:2: no...
2019 May 08
2
[PATCH 04/10] s390/mm: force swiotlb for protected virtualization
...> + swiotlb_update_mem_attributes();
> + swiotlb_force = SWIOTLB_FORCE;
> +}
> +
> void __init mem_init(void)
> {
> cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
> @@ -134,6 +182,8 @@ void __init mem_init(void)
> set_max_mapnr(max_low_pfn);
> high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
>
> + pv_init();
> +
> /* Setup guest page hinting */
> cmma_init();
>
2019 May 08
2
[PATCH 04/10] s390/mm: force swiotlb for protected virtualization
...> + swiotlb_update_mem_attributes();
> + swiotlb_force = SWIOTLB_FORCE;
> +}
> +
> void __init mem_init(void)
> {
> cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
> @@ -134,6 +182,8 @@ void __init mem_init(void)
> set_max_mapnr(max_low_pfn);
> high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
>
> + pv_init();
> +
> /* Setup guest page hinting */
> cmma_init();
>
2019 Apr 09
0
[RFC PATCH 03/12] s390/mm: force swiotlb for protected virtualization
...> + swiotlb_update_mem_attributes();
> + swiotlb_force = SWIOTLB_FORCE;
> +}
> +
> void __init mem_init(void)
> {
> cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
> @@ -134,6 +176,8 @@ void __init mem_init(void)
> set_max_mapnr(max_low_pfn);
> high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
>
> + pv_init();
> +
> /* Setup guest page hinting */
> cmma_init();
>
2007 Jul 06
0
Xen 3.03 - no DMA memory available
...after system startup, it is
very unlikely to fail.
This leads to a kernel panic telling that the pgalloc function which
allocate memory ressources has failed...
Then I have tried to use the private pool in high memory mode, i.e. the
module will assume that unused physical memory is present at the high_memory
address, i.e. memory not managed by the Linux memory manager. This memory
block is mapped into kernel space and administered by the mpool allocation
functions. High memory must be reserved using either the mem=xxx kernel
parameter (recommended), or by hardcoding the memory limit in the kernel
image...
2007 Apr 18
0
[PATCH 5/9] 00mm6 kpte flush.patch
...tomic(void *kvaddr, enum km_type type)
{
-#ifdef CONFIG_DEBUG_HIGHMEM
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
+#ifdef CONFIG_DEBUG_HIGHMEM
if (vaddr >= PAGE_OFFSET && vaddr < (unsigned long)high_memory) {
dec_preempt_count();
preempt_check_resched();
@@ -68,14 +65,14 @@ void kunmap_atomic(void *kvaddr, enum km
if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
BUG();
-
+#endif
/*
- * force other mappings to Oops if they'll try to access
- * this pte without first remap it
+ * F...
2019 Apr 26
0
[PATCH 04/10] s390/mm: force swiotlb for protected virtualization
...e bounce buffers are shared */
+ swiotlb_init(1);
+ swiotlb_update_mem_attributes();
+ swiotlb_force = SWIOTLB_FORCE;
+}
+
void __init mem_init(void)
{
cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
@@ -134,6 +182,8 @@ void __init mem_init(void)
set_max_mapnr(max_low_pfn);
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+ pv_init();
+
/* Setup guest page hinting */
cmma_init();
--
2.16.4
2019 Jun 06
0
[PATCH v4 1/8] s390/mm: force swiotlb for protected virtualization
...e bounce buffers are shared */
+ swiotlb_init(1);
+ swiotlb_update_mem_attributes();
+ swiotlb_force = SWIOTLB_FORCE;
+}
+
void __init mem_init(void)
{
cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
@@ -136,6 +181,8 @@ void __init mem_init(void)
set_max_mapnr(max_low_pfn);
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+ pv_init();
+
/* Setup guest page hinting */
cmma_init();
--
2.17.1
2019 Jun 12
0
[PATCH v5 1/8] s390/mm: force swiotlb for protected virtualization
...e bounce buffers are shared */
+ swiotlb_init(1);
+ swiotlb_update_mem_attributes();
+ swiotlb_force = SWIOTLB_FORCE;
+}
+
void __init mem_init(void)
{
cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
@@ -136,6 +181,8 @@ void __init mem_init(void)
set_max_mapnr(max_low_pfn);
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+ pv_init();
+
/* Setup guest page hinting */
cmma_init();
--
2.17.1
2019 May 23
0
[PATCH v2 1/8] s390/mm: force swiotlb for protected virtualization
...e bounce buffers are shared */
+ swiotlb_init(1);
+ swiotlb_update_mem_attributes();
+ swiotlb_force = SWIOTLB_FORCE;
+}
+
void __init mem_init(void)
{
cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
@@ -136,6 +181,8 @@ void __init mem_init(void)
set_max_mapnr(max_low_pfn);
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+ pv_init();
+
/* Setup guest page hinting */
cmma_init();
--
2.13.4
2019 May 29
0
[PATCH v3 1/8] s390/mm: force swiotlb for protected virtualization
...e bounce buffers are shared */
+ swiotlb_init(1);
+ swiotlb_update_mem_attributes();
+ swiotlb_force = SWIOTLB_FORCE;
+}
+
void __init mem_init(void)
{
cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
@@ -136,6 +181,8 @@ void __init mem_init(void)
set_max_mapnr(max_low_pfn);
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
+ pv_init();
+
/* Setup guest page hinting */
cmma_init();
--
2.13.4
2019 Apr 09
0
[RFC PATCH 03/12] s390/mm: force swiotlb for protected virtualization
...t; > > +}
> > > +
> > > void __init mem_init(void)
> > > {
> > > cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
> > > @@ -134,6 +176,8 @@ void __init mem_init(void)
> > > set_max_mapnr(max_low_pfn);
> > > high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
> > >
> > > + pv_init();
> > > +
> > > /* Setup guest page hinting */
> > > cmma_init();
> > >
> >
>
2019 May 09
0
[PATCH 04/10] s390/mm: force swiotlb for protected virtualization
...> + swiotlb_force = SWIOTLB_FORCE;
> > +}
> > +
> > void __init mem_init(void)
> > {
> > cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
> > @@ -134,6 +182,8 @@ void __init mem_init(void)
> > set_max_mapnr(max_low_pfn);
> > high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
> >
> > + pv_init();
> > +
> > /* Setup guest page hinting */
> > cmma_init();
> >
>
2019 May 09
0
[PATCH 04/10] s390/mm: force swiotlb for protected virtualization
..._update_mem_attributes();
> +??? swiotlb_force = SWIOTLB_FORCE;
> +}
> +
> ?void __init mem_init(void)
> ?{
> ???? cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
> @@ -134,6 +182,8 @@ void __init mem_init(void)
> ???? set_max_mapnr(max_low_pfn);
> ???????? high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
> ?+??? pv_init();
> +
> ???? /* Setup guest page hinting */
> ???? cmma_init();
> ?-- 2.16.4
>
>
--
-- Jason J. Herne (jjherne at linux.ibm.com)
2007 Apr 18
8
[patch 0/8] Basic infrastructure patches for a paravirtualized kernel
Hi Andrew,
This series of patches lays the basic ground work for the
paravirtualized kernel patches coming later on. I think this lot is
ready for the rough-and-tumble world of the -mm tree.
The main change from the last posting is that all the page-table
related patches have been moved out, and will be posted separately.
Also, the off-by-one in reserving the top of address space has been
2007 Apr 18
8
[patch 0/8] Basic infrastructure patches for a paravirtualized kernel
Hi Andrew,
This series of patches lays the basic ground work for the
paravirtualized kernel patches coming later on. I think this lot is
ready for the rough-and-tumble world of the -mm tree.
The main change from the last posting is that all the page-table
related patches have been moved out, and will be posted separately.
Also, the off-by-one in reserving the top of address space has been
2007 Apr 18
15
[PATCH 0 of 13] Basic infrastructure patches for a paravirtualized kernel
[ REPOST: Apologies to anyone who has seen this before. It
didn't make it onto any of the lists it should have. -J ]
Hi Andrew,
This series of patches lays the basic ground work for the
paravirtualized kernel patches coming later on. I think this lot is
ready for the rough-and-tumble world of the -mm tree.
For the most part, these patches do nothing or very little. The
patches should
2007 Apr 18
15
[PATCH 0 of 13] Basic infrastructure patches for a paravirtualized kernel
[ REPOST: Apologies to anyone who has seen this before. It
didn't make it onto any of the lists it should have. -J ]
Hi Andrew,
This series of patches lays the basic ground work for the
paravirtualized kernel patches coming later on. I think this lot is
ready for the rough-and-tumble world of the -mm tree.
For the most part, these patches do nothing or very little. The
patches should
2019 Apr 26
33
[PATCH 00/10] s390: virtio: support protected virtualization
Enhanced virtualization protection technology may require the use of
bounce buffers for I/O. While support for this was built into the virtio
core, virtio-ccw wasn't changed accordingly.
Some background on technology (not part of this series) and the
terminology used.
* Protected Virtualization (PV):
Protected Virtualization guarantees, that non-shared memory of a guest
that operates in PV
2019 Apr 26
33
[PATCH 00/10] s390: virtio: support protected virtualization
Enhanced virtualization protection technology may require the use of
bounce buffers for I/O. While support for this was built into the virtio
core, virtio-ccw wasn't changed accordingly.
Some background on technology (not part of this series) and the
terminology used.
* Protected Virtualization (PV):
Protected Virtualization guarantees, that non-shared memory of a guest
that operates in PV