Displaying 20 results from an estimated 160 matches for "__get_free_pages".
2011 Feb 11
1
[PATCH 1/3]: Staging: hv: Use native page allocation/free functions
...@@ -180,8 +180,9 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
newchannel->channel_callback_context = context;
/* Allocate the ring buffer */
- out = osd_page_alloc((send_ringbuffer_size + recv_ringbuffer_size)
- >> PAGE_SHIFT);
+ out = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
+ get_order(send_ringbuffer_size + recv_ringbuffer_size));
+
if (!out)
return -ENOMEM;
@@ -300,8 +301,8 @@ Cleanup:
errorout:
ringbuffer_cleanup(&newchannel->outbound);
ringbuffer_cleanup(&newchannel->inbound);
- osd_page_free(out, (send_ringbuffer_...
2011 Feb 11
1
[PATCH 1/3]: Staging: hv: Use native page allocation/free functions
...@@ -180,8 +180,9 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
newchannel->channel_callback_context = context;
/* Allocate the ring buffer */
- out = osd_page_alloc((send_ringbuffer_size + recv_ringbuffer_size)
- >> PAGE_SHIFT);
+ out = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
+ get_order(send_ringbuffer_size + recv_ringbuffer_size));
+
if (!out)
return -ENOMEM;
@@ -300,8 +301,8 @@ Cleanup:
errorout:
ringbuffer_cleanup(&newchannel->outbound);
ringbuffer_cleanup(&newchannel->inbound);
- osd_page_free(out, (send_ringbuffer_...
2010 May 05
5
[Pv-ops][PATCH 0/4 v4] Netback multiple threads support
...etbk structures.
Main Changes from v2:
1. Merge "group" and "idx" into "netif->mapping", therefore
page_ext is not used now.
2. Put netbk_add_netif() and netbk_remove_netif() into
__netif_up() and __netif_down().
3. Change the usage of kthread_should_stop().
4. Use __get_free_pages() to replace kzalloc().
5. Modify the changes to netif_be_dbg().
6. Use MODPARM_netback_kthread to determine whether using
tasklet or kernel thread.
7. Put small fields in the front, and large arrays in the end of struct
xen_netbk.
8. Add more checks in netif_page_release().
Current netback uses o...
2018 Jun 19
2
[virtio-dev] Re: [PATCH v33 2/4] virtio-balloon: VIRTIO_BALLOON_F_FREE_PAGE_HINT
On Tue, Jun 19, 2018 at 08:13:37PM +0800, Wei Wang wrote:
> On 06/19/2018 11:05 AM, Michael S. Tsirkin wrote:
> > On Tue, Jun 19, 2018 at 01:06:48AM +0000, Wang, Wei W wrote:
> > > On Monday, June 18, 2018 10:29 AM, Michael S. Tsirkin wrote:
> > > > On Sat, Jun 16, 2018 at 01:09:44AM +0000, Wang, Wei W wrote:
> > > > > Not necessarily, I think. We have
2008 Jul 22
2
pv_ops - 2.6.26 - unable to handle kernel paging request
...<c015e2ea>] move_freepages_block+0x6a/0x80
[<c015e5d9>] __rmqueue+0x1a9/0x1e0
[<c015e651>] rmqueue_bulk+0x41/0x70
[<c015eae4>] get_page_from_freelist+0x464/0x490
[<c015ebba>] __alloc_pages_internal+0xaa/0x460
[<c015ef8f>] __alloc_pages+0xf/0x20
[<c015f4bf>] __get_free_pages+0xf/0x20
[<c01c015f>] proc_file_read+0x8f/0x2a0
[<c01c00d0>] proc_file_read+0x0/0x2a0
[<c01bb7ca>] proc_reg_read+0x5a/0x90
[<c01801f1>] vfs_read+0xa1/0x160
[<c01bb770>] proc_reg_read+0x0/0x90
[<c0180551>] sys_read+0x41/0x70
[<c0107256>] syscall_call+0x7/0xb...
2008 Jul 22
2
pv_ops - 2.6.26 - unable to handle kernel paging request
...<c015e2ea>] move_freepages_block+0x6a/0x80
[<c015e5d9>] __rmqueue+0x1a9/0x1e0
[<c015e651>] rmqueue_bulk+0x41/0x70
[<c015eae4>] get_page_from_freelist+0x464/0x490
[<c015ebba>] __alloc_pages_internal+0xaa/0x460
[<c015ef8f>] __alloc_pages+0xf/0x20
[<c015f4bf>] __get_free_pages+0xf/0x20
[<c01c015f>] proc_file_read+0x8f/0x2a0
[<c01c00d0>] proc_file_read+0x0/0x2a0
[<c01bb7ca>] proc_reg_read+0x5a/0x90
[<c01801f1>] vfs_read+0xa1/0x160
[<c01bb770>] proc_reg_read+0x0/0x90
[<c0180551>] sys_read+0x41/0x70
[<c0107256>] syscall_call+0x7/0xb...
2007 Aug 31
2
sles10: installing module-binaries: "error: Failed dependencies: ksym(panic) .."
...i try to install the modules form 1.6.2/sles10-i686 it says : sles2:/usr/src/lustre # rpm -i lustre-modules-1.6.2-2.6.16_46_0.14_lustre.1.6.2bigsmp.i686.rpm error: Failed dependencies: ksym(panic) = 1075bf0 is needed by lustre-modules-1.6.2-2.6.16_46_0.14_lustre.1.6.2bigsmp.i686 ksym(__get_free_pages) = 107d6ba3 is needed by lustre-modules-1.6.2-2.6.16_46_0.14_lustre.1.6.2bigsmp.i686 ksym(malloc_sizes) = 1095f2e0 is needed by lustre-modules-1.6.2-2.6.16_46_0.14_lustre.1.6.2bigsmp.i686 ksym(proc_symlink) = 1109102e is needed by lustre-modules-1.6.2-2.6.16_46_0.14_lustre.1.6.2bigsmp...
2015 Nov 02
1
[PATCH 1/3] Provide simple noop dma ops
On Fri, Oct 30, 2015 at 02:20:35PM +0100, Christian Borntraeger wrote:
> +static void *dma_noop_alloc(struct device *dev, size_t size,
> + dma_addr_t *dma_handle, gfp_t gfp,
> + struct dma_attrs *attrs)
> +{
> + void *ret;
> +
> + ret = (void *)__get_free_pages(gfp, get_order(size));
> + if (ret) {
> + memset(ret, 0, size);
There is no need to zero out the memory here. If the user wants
initialized memory it can call dma_zalloc_coherent. Having the memset
here means to clear the memory twice in the dma_zalloc_coherent path.
Otherwise it looks goo...
2018 Jun 20
0
[virtio-dev] Re: [PATCH v33 2/4] virtio-balloon: VIRTIO_BALLOON_F_FREE_PAGE_HINT
...method always steal the first free page blocks, and sure can
> > > > be changed to take more. But all these can be achieved via kmalloc
> > > I'd do get_user_pages really. You don't want pages split, etc.
>
> Oops sorry. I meant get_free_pages .
Yes, we can use __get_free_pages, and the max allocation is MAX_ORDER - 1, which can report up to 2TB free memory.
"getting two pages isn't harder", do you mean passing two arrays (two allocations by get_free_pages(,MAX_ORDER -1)) to the mm API?
Please see if the following logic aligns to what you think:...
2015 Nov 02
1
[PATCH 1/3] Provide simple noop dma ops
On Fri, Oct 30, 2015 at 02:20:35PM +0100, Christian Borntraeger wrote:
> +static void *dma_noop_alloc(struct device *dev, size_t size,
> + dma_addr_t *dma_handle, gfp_t gfp,
> + struct dma_attrs *attrs)
> +{
> + void *ret;
> +
> + ret = (void *)__get_free_pages(gfp, get_order(size));
> + if (ret) {
> + memset(ret, 0, size);
There is no need to zero out the memory here. If the user wants
initialized memory it can call dma_zalloc_coherent. Having the memset
here means to clear the memory twice in the dma_zalloc_coherent path.
Otherwise it looks goo...
2018 Jun 26
0
[PATCH v34 2/4] virtio-balloon: VIRTIO_BALLOON_F_FREE_PAGE_HINT
...; + max_array_num = max_entries / entries_per_array +
>> + !!(max_entries % entries_per_array);
>> + arrays = kmalloc_array(max_array_num, sizeof(__le64 *), GFP_KERNEL);
> Instead of all this mess, how about get_free_pages here as well?
Sounds good, will replace kmalloc_array with __get_free_pages(), but
still need the above calculation to get max_array_num.
>
> Also why do we need GFP_KERNEL for this?
I guess it is better to use "__GFP_ATOMIC | __GFP_NOMEMALLOC", thanks.
>
>
>> + if (!arrays)
>> + return NULL;
>> +
>> + for (i = 0; i < m...
2018 Jun 26
2
[PATCH v34 2/4] virtio-balloon: VIRTIO_BALLOON_F_FREE_PAGE_HINT
...(i = 0; i < max_array_num; i++) {
So we are getting a ton of memory here just to free it up a bit later.
Why doesn't get_from_free_page_list get the pages from free list for us?
We could also avoid the 1st allocation then - just build a list
of these.
> + arrays[i] =
> + (__le64 *)__get_free_pages(__GFP_ATOMIC | __GFP_NOMEMALLOC,
> + ARRAY_ALLOC_ORDER);
Coding style says:
Descendants are always substantially shorter than the parent and
are placed substantially to the right.
> + if (!arrays[i]) {
Also if it does fail (small guest), shall we try with less arrays?
> + /*...
2018 Jun 26
2
[PATCH v34 2/4] virtio-balloon: VIRTIO_BALLOON_F_FREE_PAGE_HINT
...(i = 0; i < max_array_num; i++) {
So we are getting a ton of memory here just to free it up a bit later.
Why doesn't get_from_free_page_list get the pages from free list for us?
We could also avoid the 1st allocation then - just build a list
of these.
> + arrays[i] =
> + (__le64 *)__get_free_pages(__GFP_ATOMIC | __GFP_NOMEMALLOC,
> + ARRAY_ALLOC_ORDER);
Coding style says:
Descendants are always substantially shorter than the parent and
are placed substantially to the right.
> + if (!arrays[i]) {
Also if it does fail (small guest), shall we try with less arrays?
> + /*...
2020 Jun 22
1
[PATCH 14/16] mm/thp: add THP allocation helper
...struct page *alloc_transhugepage(struct vm_area_struct *vma,
> + unsigned long addr);
> +#else
> +static inline struct page *alloc_transhugepage(struct vm_area_struct *vma,
> + unsigned long addr)
> +{
> + return NULL;
> +}
> +#endif
>
> extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
> extern unsigned long get_zeroed_page(gfp_t gfp_mask);
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 25d95f7b1e98..f749633ed350 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -775,6 +775,22 @@ vm_fault_t do_huge_pm...
2018 Jun 26
2
[PATCH v34 2/4] virtio-balloon: VIRTIO_BALLOON_F_FREE_PAGE_HINT
...ries / entries_per_array +
> > > + !!(max_entries % entries_per_array);
> > > + arrays = kmalloc_array(max_array_num, sizeof(__le64 *), GFP_KERNEL);
> > Instead of all this mess, how about get_free_pages here as well?
>
> Sounds good, will replace kmalloc_array with __get_free_pages(),
Or alloc_pages, __ APIs are better avoided if possible.
> but still
> need the above calculation to get max_array_num.
Maybe alloc_pages?
> >
> > Also why do we need GFP_KERNEL for this?
>
> I guess it is better to use "__GFP_ATOMIC | __GFP_NOMEMALLOC", th...
2018 Jun 26
2
[PATCH v34 2/4] virtio-balloon: VIRTIO_BALLOON_F_FREE_PAGE_HINT
...ries / entries_per_array +
> > > + !!(max_entries % entries_per_array);
> > > + arrays = kmalloc_array(max_array_num, sizeof(__le64 *), GFP_KERNEL);
> > Instead of all this mess, how about get_free_pages here as well?
>
> Sounds good, will replace kmalloc_array with __get_free_pages(),
Or alloc_pages, __ APIs are better avoided if possible.
> but still
> need the above calculation to get max_array_num.
Maybe alloc_pages?
> >
> > Also why do we need GFP_KERNEL for this?
>
> I guess it is better to use "__GFP_ATOMIC | __GFP_NOMEMALLOC", th...
2011 Feb 14
2
[PATCH 1/2] staging: hv: Remove dead code from netvsc.c
..._buf(struct hv_device *device)
"device being destroyed?");
return -1;
}
- /* ASSERT(netDevice->ReceiveBufferSize > 0); */
- /* page-size grandularity */
- /* ASSERT((netDevice->ReceiveBufferSize & (PAGE_SIZE - 1)) == 0); */
net_device->recv_buf =
(void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
@@ -234,9 +227,6 @@ static int netvsc_init_recv_buf(struct hv_device *device)
ret = -1;
goto cleanup;
}
- /* page-aligned buffer */
- /* ASSERT(((unsigned long)netDevice->ReceiveBuffer & (PAGE_SIZE - 1)) == */
- /* 0); */
DPRINT_INFO(NETVSC, "Establis...
2011 Feb 14
2
[PATCH 1/2] staging: hv: Remove dead code from netvsc.c
..._buf(struct hv_device *device)
"device being destroyed?");
return -1;
}
- /* ASSERT(netDevice->ReceiveBufferSize > 0); */
- /* page-size grandularity */
- /* ASSERT((netDevice->ReceiveBufferSize & (PAGE_SIZE - 1)) == 0); */
net_device->recv_buf =
(void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
@@ -234,9 +227,6 @@ static int netvsc_init_recv_buf(struct hv_device *device)
ret = -1;
goto cleanup;
}
- /* page-aligned buffer */
- /* ASSERT(((unsigned long)netDevice->ReceiveBuffer & (PAGE_SIZE - 1)) == */
- /* 0); */
DPRINT_INFO(NETVSC, "Establis...
2011 Jul 01
1
[79030.229547] motion: page allocation failure: order:6, mode:0xd4
...warn_alloc_failed+0xf3/0x140
[79030.229606] [<ffffffff810080df>] ? xen_restore_fl_direct_reloc+0x4/0x4
[79030.229616] [<ffffffff810e22b3>] __alloc_pages_nodemask+0x533/0x700
[79030.229625] [<ffffffff810080f2>] ? check_events+0x12/0x20
[79030.229635] [<ffffffff810e2547>] __get_free_pages+0x17/0x80
[79030.229645] [<ffffffff813f0af6>] xen_swiotlb_alloc_coherent+0x56/0x140
[79030.229656] [<ffffffff814ea68e>] ? usb_alloc_urb+0x1e/0x50
[79030.229666] [<ffffffff814f00f5>] hcd_buffer_alloc+0x95/0x150
[79030.229676] [<ffffffff814e1806>] usb_alloc_coherent+0x26/0...
2011 Feb 18
6
[Bug 34430] New: nouveau driver does not return VGA connector status breaking upower
https://bugs.freedesktop.org/show_bug.cgi?id=34430
Summary: nouveau driver does not return VGA connector status
breaking upower
Product: xorg
Version: unspecified
Platform: x86-64 (AMD64)
OS/Version: Linux (All)
Status: NEW
Severity: normal
Priority: medium
Component: Driver/nouveau