Displaying 20 results from an estimated 121 matches for "get_zeroed_pag".
Did you mean:
get_zeroed_page
2009 Sep 21
0
[PATCH 4/5] lguest: cleanup for map_switcher()
From: Xiao Guangrong <xiaoguangrong at cn.fujitsu.com>
We can use alloc_page() instead of get_zeroed_page() and virt_to_page()
Signed-off-by: Xiao Guangrong <xiaoguangrong at cn.fujitsu.com>
Signed-off-by: Rusty Russell <rusty at rustcorp.com.au>
---
drivers/lguest/core.c | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/drivers/lguest/core.c b/drivers/lguest/co...
2009 Sep 21
0
[PATCH 4/5] lguest: cleanup for map_switcher()
From: Xiao Guangrong <xiaoguangrong at cn.fujitsu.com>
We can use alloc_page() instead of get_zeroed_page() and virt_to_page()
Signed-off-by: Xiao Guangrong <xiaoguangrong at cn.fujitsu.com>
Signed-off-by: Rusty Russell <rusty at rustcorp.com.au>
---
drivers/lguest/core.c | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/drivers/lguest/core.c b/drivers/lguest/co...
2020 Jun 22
1
[PATCH 14/16] mm/thp: add THP allocation helper
...long addr);
> +#else
> +static inline struct page *alloc_transhugepage(struct vm_area_struct *vma,
> + unsigned long addr)
> +{
> + return NULL;
> +}
> +#endif
>
> extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
> extern unsigned long get_zeroed_page(gfp_t gfp_mask);
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 25d95f7b1e98..f749633ed350 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -775,6 +775,22 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
> return __do_huge_pmd_anonymous_pa...
2018 Nov 05
2
[PATCH 1/5] VSOCK: support fill mergeable rx buffer in guest
...static struct virtio_vsock *virtio_vsock_get(void)
@@ -256,6 +257,25 @@ static int virtio_transport_send_pkt_loopback(struct virtio_vsock *vsock,
return 0;
}
+static int fill_mergeable_rx_buff(struct virtqueue *vq)
+{
+ void *page = NULL;
+ struct scatterlist sg;
+ int err;
+
+ page = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ sg_init_one(&sg, page, PAGE_SIZE);
+
+ err = virtqueue_add_inbuf(vq, &sg, 1, page, GFP_KERNEL);
+ if (err < 0)
+ free_page((unsigned long) page);
+
+ return err;
+}
+
static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
{
int...
2018 Nov 05
2
[PATCH 1/5] VSOCK: support fill mergeable rx buffer in guest
...static struct virtio_vsock *virtio_vsock_get(void)
@@ -256,6 +257,25 @@ static int virtio_transport_send_pkt_loopback(struct virtio_vsock *vsock,
return 0;
}
+static int fill_mergeable_rx_buff(struct virtqueue *vq)
+{
+ void *page = NULL;
+ struct scatterlist sg;
+ int err;
+
+ page = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!page)
+ return -ENOMEM;
+
+ sg_init_one(&sg, page, PAGE_SIZE);
+
+ err = virtqueue_add_inbuf(vq, &sg, 1, page, GFP_KERNEL);
+ if (err < 0)
+ free_page((unsigned long) page);
+
+ return err;
+}
+
static void virtio_vsock_rx_fill(struct virtio_vsock *vsock)
{
int...
2007 Apr 18
1
[PATCH 0/7] Using %gs for per-cpu areas on x86
OK, here it is. Benchmarks still coming. This is against Andi's
2.6.18-rc7-git3 tree, and replaces the patches between (and not
including) i386-pda-asm-offsets and i386-early-fault.
One patch is identical, one is mildly modified, the rest are
re-implemented but inspired by Jeremy's PDA work.
Thanks,
Rusty.
--
Help! Save Australia from the worst of the DMCA: http://linux.org.au/law
2007 Apr 18
1
[PATCH 0/7] Using %gs for per-cpu areas on x86
OK, here it is. Benchmarks still coming. This is against Andi's
2.6.18-rc7-git3 tree, and replaces the patches between (and not
including) i386-pda-asm-offsets and i386-early-fault.
One patch is identical, one is mildly modified, the rest are
re-implemented but inspired by Jeremy's PDA work.
Thanks,
Rusty.
--
Help! Save Australia from the worst of the DMCA: http://linux.org.au/law
2018 Nov 06
1
[PATCH 1/5] VSOCK: support fill mergeable rx buffer in guest
..._pkt_loopback(struct virtio_vsock *vsock,
>> return 0;
>> }
>>
>> +static int fill_mergeable_rx_buff(struct virtqueue *vq)
>> +{
>> + void *page = NULL;
>> + struct scatterlist sg;
>> + int err;
>> +
>> + page = (void *)get_zeroed_page(GFP_KERNEL);
>
>
> Any reason to use zeroed page?
In previous version, the entire structure of virtio_vsock_pkt is preallocated
in guest use kzalloc, it is a contiguous zeroed physical memory, but host only
need to fill virtio_vsock_hdr size.
However, in mergeable rx buffer version, w...
2009 Apr 16
1
NULL pointer dereference at __switch_to() ( __unlazy_fpu ) with lguest PAE patch
...;t map it in. */
+ if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
+ return 0;
+
+ /* Now look at the matching shadow entry. */
+ spmd = spmd_addr(cpu, *spgd, vaddr);
+
+ if (!(pmd_flags(*spmd) & _PAGE_PRESENT)) {
+ /* No shadow entry: allocate a new shadow PTE page. */
+ unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
+
+ /* This is not really the Guest's fault, but killing it is
+ * simple for this corner case. */
+ if (!ptepage) {
+ kill_guest(cpu, "out of memory allocating pte page");
+ return 0;
+ }
+
+ /* We check that the Guest pmd is OK. */
+ check_gpmd(cpu, gpmd);
+...
2009 Apr 16
1
NULL pointer dereference at __switch_to() ( __unlazy_fpu ) with lguest PAE patch
...;t map it in. */
+ if (!(pmd_flags(gpmd) & _PAGE_PRESENT))
+ return 0;
+
+ /* Now look at the matching shadow entry. */
+ spmd = spmd_addr(cpu, *spgd, vaddr);
+
+ if (!(pmd_flags(*spmd) & _PAGE_PRESENT)) {
+ /* No shadow entry: allocate a new shadow PTE page. */
+ unsigned long ptepage = get_zeroed_page(GFP_KERNEL);
+
+ /* This is not really the Guest's fault, but killing it is
+ * simple for this corner case. */
+ if (!ptepage) {
+ kill_guest(cpu, "out of memory allocating pte page");
+ return 0;
+ }
+
+ /* We check that the Guest pmd is OK. */
+ check_gpmd(cpu, gpmd);
+...
2007 Apr 18
3
[patch] paravirt: VDSO page is essential
On Mon, 2007-03-05 at 13:06 +0100, Ingo Molnar wrote:
> Subject: [patch] paravirt: VDSO page is essential
> From: Ingo Molnar <mingo@elte.hu>
>
> commit 3bbf54725467d604698721384d858b5983b87e8f disables the VDSO for
> CONFIG_PARAVIRT kernels. This #ifdeffery was a bad change: the VDSO is
> an essential component of Linux, and this change forces all of them to
> use
2007 Apr 18
3
[patch] paravirt: VDSO page is essential
On Mon, 2007-03-05 at 13:06 +0100, Ingo Molnar wrote:
> Subject: [patch] paravirt: VDSO page is essential
> From: Ingo Molnar <mingo@elte.hu>
>
> commit 3bbf54725467d604698721384d858b5983b87e8f disables the VDSO for
> CONFIG_PARAVIRT kernels. This #ifdeffery was a bad change: the VDSO is
> an essential component of Linux, and this change forces all of them to
> use
2007 Apr 18
1
[PATCH] Gerd Hoffman's move-vsyscall-into-user-address-range patch
...gt;
#include <asm/msr.h>
@@ -45,23 +46,88 @@ void enable_sep_cpu(void)
*/
extern const char vsyscall_int80_start, vsyscall_int80_end;
extern const char vsyscall_sysenter_start, vsyscall_sysenter_end;
+static void *syscall_page;
int __init sysenter_setup(void)
{
- void *page = (void *)get_zeroed_page(GFP_ATOMIC);
-
- __set_fixmap(FIX_VSYSCALL, __pa(page), PAGE_READONLY_EXEC);
+ syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
if (!boot_cpu_has(X86_FEATURE_SEP)) {
- memcpy(page,
+ memcpy(syscall_page,
&vsyscall_int80_start,
&vsyscall_int80_end - &vsyscal...
2007 Apr 18
1
[PATCH] Gerd Hoffman's move-vsyscall-into-user-address-range patch
...gt;
#include <asm/msr.h>
@@ -45,23 +46,88 @@ void enable_sep_cpu(void)
*/
extern const char vsyscall_int80_start, vsyscall_int80_end;
extern const char vsyscall_sysenter_start, vsyscall_sysenter_end;
+static void *syscall_page;
int __init sysenter_setup(void)
{
- void *page = (void *)get_zeroed_page(GFP_ATOMIC);
-
- __set_fixmap(FIX_VSYSCALL, __pa(page), PAGE_READONLY_EXEC);
+ syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
if (!boot_cpu_has(X86_FEATURE_SEP)) {
- memcpy(page,
+ memcpy(syscall_page,
&vsyscall_int80_start,
&vsyscall_int80_end - &vsyscal...
2008 Mar 20
0
[RFC/PATCH 08/15] kvm-s390: intercepts for privileged instructions
...;& fc > 0)
+ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
+
+ switch (fc) {
+ case 0:
+ vcpu->arch.guest_gprs[0] = 3 << 28;
+ vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
+ return 0;
+ case 1: /* same handling for 1 and 2 */
+ case 2:
+ mem = get_zeroed_page(GFP_KERNEL);
+ if (!mem)
+ goto out_fail;
+ if (stsi((void *) mem, fc, sel1, sel2) == -ENOSYS)
+ goto out_mem;
+ break;
+ case 3:
+ if (sel1 != 2 || sel2 != 2)
+ goto out_fail;
+ mem = get_zeroed_page(GFP_KERNEL);
+ if (!mem)
+ goto out_fail;
+ handle_stsi_3_2_2(vcpu, (void *) mem);...
2007 Apr 18
4
[patch 0/2] Updates to compat VDSOs
Hi Andi,
Here's a couple of patches to fix up COMPAT_VDSO:
The first is a straightforward implementation of Jan's original idea
of relocating the VDSO to match its mapped location. Unlike Jan and
Zach's version, I changed it to relocate based on the phdrs rather than
the sections; the result is pleasantly compact.
The second patch takes advantage of the fact that all the
2007 Apr 18
4
[patch 0/2] Updates to compat VDSOs
Hi Andi,
Here's a couple of patches to fix up COMPAT_VDSO:
The first is a straightforward implementation of Jan's original idea
of relocating the VDSO to match its mapped location. Unlike Jan and
Zach's version, I changed it to relocate based on the phdrs rather than
the sections; the result is pleasantly compact.
The second patch takes advantage of the fact that all the
2020 Jun 19
0
[PATCH 14/16] mm/thp: add THP allocation helper
...epage(struct vm_area_struct *vma,
+ unsigned long addr);
+#else
+static inline struct page *alloc_transhugepage(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ return NULL;
+}
+#endif
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
extern unsigned long get_zeroed_page(gfp_t gfp_mask);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 25d95f7b1e98..f749633ed350 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -775,6 +775,22 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
return __do_huge_pmd_anonymous_page(vmf, page, gfp);
}
+#ifd...
2020 Nov 06
0
[PATCH v3 4/6] mm/thp: add THP allocation helper
...epage(struct vm_area_struct *vma,
+ unsigned long addr);
+#else
+static inline struct page *alloc_transhugepage(struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ return NULL;
+}
+#endif
extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
extern unsigned long get_zeroed_page(gfp_t gfp_mask);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index a073e66d0ee2..c2c1d3e7c35f 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -765,6 +765,20 @@ vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf)
return __do_huge_pmd_anonymous_page(vmf, page, gfp);
}
+stru...
2007 Apr 18
1
[PATCH 3/3] Gdt hotplug
As suggested by Andi Kleen, don't allocate a GDT page if there is already one
present. Needed for CPU hotplug.
Signed-off-by: Zachary Amsden <zach@vmware.com>
Index: linux-2.6.14-rc1/arch/i386/kernel/smpboot.c
===================================================================
--- linux-2.6.14-rc1.orig/arch/i386/kernel/smpboot.c 2005-09-20 20:38:22.000000000 -0700
+++