search for: kmem_cache_free

Displaying 20 results from an estimated 173 matches for "kmem_cache_free".

2007 Apr 18
0
[PATCH 2/2] Use page present for pae pdpes
...if (!pmd) goto out_oom; SetPagePDE(virt_to_page(pmd)); - set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); + set_pgd(&pgd[i], __pgd(_PAGE_PRESENT | __pa(pmd))); } return pgd; out_oom: for (i--; i >= 0; i--) { ClearPagePDE(pfn_to_page(pgd_val(pgd[i]) >> PAGE_SHIFT)); - kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); + kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i]) & PAGE_MASK)); } kmem_cache_free(pgd_cache, pgd); return NULL; @@ -268,7 +268,7 @@ void pgd_free(pgd_t *pgd) if (PTRS_PER_PMD > 1) for (i = 0; i < USER_PTRS_PER_PGD; ++i) {...
2007 Apr 18
0
[PATCH 2/2] Use page present for pae pdpes
...if (!pmd) goto out_oom; SetPagePDE(virt_to_page(pmd)); - set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); + set_pgd(&pgd[i], __pgd(_PAGE_PRESENT | __pa(pmd))); } return pgd; out_oom: for (i--; i >= 0; i--) { ClearPagePDE(pfn_to_page(pgd_val(pgd[i]) >> PAGE_SHIFT)); - kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); + kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i]) & PAGE_MASK)); } kmem_cache_free(pgd_cache, pgd); return NULL; @@ -268,7 +268,7 @@ void pgd_free(pgd_t *pgd) if (PTRS_PER_PMD > 1) for (i = 0; i < USER_PTRS_PER_PGD; ++i) {...
2007 Apr 18
1
[PATCH 1/5] Add pagetable allocation notifiers
...alloc(pmd_cache, GFP_KERNEL); if (!pmd) goto out_oom; + SetPagePDE(virt_to_page(pmd)); set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); } return pgd; out_oom: - for (i--; i >= 0; i--) + for (i--; i >= 0; i--) { + ClearPagePDE(pfn_to_page(pgd_val(pgd[i]) >> PAGE_SHIFT)); kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); + } kmem_cache_free(pgd_cache, pgd); return NULL; } @@ -261,8 +266,10 @@ void pgd_free(pgd_t *pgd) /* in the PAE case user pgd entries are overwritten before usage */ if (PTRS_PER_PMD > 1) - for (i = 0; i < USER_PTRS_PER_PGD; ++i) - k...
2007 Apr 18
1
[PATCH 1/5] Add pagetable allocation notifiers
...alloc(pmd_cache, GFP_KERNEL); if (!pmd) goto out_oom; + SetPagePDE(virt_to_page(pmd)); set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); } return pgd; out_oom: - for (i--; i >= 0; i--) + for (i--; i >= 0; i--) { + ClearPagePDE(pfn_to_page(pgd_val(pgd[i]) >> PAGE_SHIFT)); kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); + } kmem_cache_free(pgd_cache, pgd); return NULL; } @@ -261,8 +266,10 @@ void pgd_free(pgd_t *pgd) /* in the PAE case user pgd entries are overwritten before usage */ if (PTRS_PER_PMD > 1) - for (i = 0; i < USER_PTRS_PER_PGD; ++i) - k...
2007 Apr 18
0
[PATCH 1/5] Paravirt page alloc.patch
...+281,18 @@ pgd_t *pgd_alloc(struct mm_struct *mm) pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); if (!pmd) goto out_oom; + paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT); set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); } return pgd; out_oom: - for (i--; i >= 0; i--) - kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); + for (i--; i >= 0; i--) { + pgd_t pgdent = pgd[i]; + void* pmd = (void *)__va(pgd_val(pgdent)-1); + paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT); + kmem_cache_free(pmd_cache, pmd); + } kmem_cache_free(pgd_cache, pgd); return NULL; } @@...
2007 Apr 18
0
[PATCH 1/5] Paravirt page alloc.patch
...+281,18 @@ pgd_t *pgd_alloc(struct mm_struct *mm) pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); if (!pmd) goto out_oom; + paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT); set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); } return pgd; out_oom: - for (i--; i >= 0; i--) - kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); + for (i--; i >= 0; i--) { + pgd_t pgdent = pgd[i]; + void* pmd = (void *)__va(pgd_val(pgdent)-1); + paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT); + kmem_cache_free(pmd_cache, pmd); + } kmem_cache_free(pgd_cache, pgd); return NULL; } @@...
2007 Apr 18
0
[PATCH 1/6] Page allocation hooks for VMI backend
...+281,18 @@ pgd_t *pgd_alloc(struct mm_struct *mm) pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); if (!pmd) goto out_oom; + paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT); set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); } return pgd; out_oom: - for (i--; i >= 0; i--) - kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); + for (i--; i >= 0; i--) { + pgd_t pgdent = pgd[i]; + void* pmd = (void *)__va(pgd_val(pgdent)-1); + paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT); + kmem_cache_free(pmd_cache, pmd); + } kmem_cache_free(pgd_cache, pgd); return NULL; } @@...
2007 Apr 18
0
[PATCH 1/6] Page allocation hooks for VMI backend
...+281,18 @@ pgd_t *pgd_alloc(struct mm_struct *mm) pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); if (!pmd) goto out_oom; + paravirt_alloc_pd(__pa(pmd) >> PAGE_SHIFT); set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); } return pgd; out_oom: - for (i--; i >= 0; i--) - kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); + for (i--; i >= 0; i--) { + pgd_t pgdent = pgd[i]; + void* pmd = (void *)__va(pgd_val(pgdent)-1); + paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT); + kmem_cache_free(pmd_cache, pmd); + } kmem_cache_free(pgd_cache, pgd); return NULL; } @@...
2005 Sep 09
7
[PATCH 0/6] jbd cleanup
The following 6 patches cleanup the jbd code and kill about 200 lines. First of 4 patches can apply to 2.6.13-git8 and 2.6.13-mm2. The rest of them can apply to 2.6.13-mm2. fs/jbd/checkpoint.c | 179 +++++++++++-------------------------------- fs/jbd/commit.c | 101 ++++++++++-------------- fs/jbd/journal.c | 11 +- fs/jbd/revoke.c | 158
2007 Apr 18
2
pgd_alloc and [cd]tors
Is there any real use in having a ctor/dtor for the pgd cache? Given that all pgd allocation happens via pgd_alloc/pgd_free, why not just fold the [cd]tor in? I'm asking because Xen wants pgd[3] to be unshared in the PAE case, and it looks to me like the easiest way to handle that is by making pgd_alloc/free pv-ops and doing the appropriate thing in the Xen code. Would need to sort out the
2007 Apr 18
2
pgd_alloc and [cd]tors
Is there any real use in having a ctor/dtor for the pgd cache? Given that all pgd allocation happens via pgd_alloc/pgd_free, why not just fold the [cd]tor in? I'm asking because Xen wants pgd[3] to be unshared in the PAE case, and it looks to me like the easiest way to handle that is by making pgd_alloc/free pv-ops and doing the appropriate thing in the Xen code. Would need to sort out the
2001 May 02
4
oops 2.2.19 ext3 0.0.6b prune_dcache
...anymore pipe it through ksymoops - I ensured klogd got the correct System.map so the result should be reliable. Apr 25 17:03:10 Unable to handle kernel paging request at virtual address 8efd1fc8 current->tss.cr3 = 0981e000, %%cr3 = 0981e000 *pde = 00000000 Oops: 0000 CPU: 0 EIP: 0010:[kmem_cache_free+76/620] EFLAGS: 00010082 eax: cfffc620 ebx: cefd13bc ecx: cefd1360 edx: cefd13a0 esi: cfffc620 edi: 8efd1fc0 ebp: cefd1340 esp: cd511d1c ds: 0018 es: 0018 ss: 0018 Process md5sum (pid: 23655, process nr: 150, stackpage=cd511000) Stack: 00000409 ffff7659 cefd1f40 ca2d3280 ca77b8a0 00...
2011 Mar 28
22
[PATCH 00/22] Staging: hv: Cleanup-storage-drivers-phase-III
This patch-set deals with some of the style isues in blkvsc_drv.c. We also get rid most of the "dead code" in this file: 1) Get rid of most of the forward declarations in this file. The only remaining forward declarations are to deal with circular dependencies. 2) Get rid of most of the dead code in the file. Some of the functions in this file are place holders - they
2011 Mar 28
22
[PATCH 00/22] Staging: hv: Cleanup-storage-drivers-phase-III
This patch-set deals with some of the style isues in blkvsc_drv.c. We also get rid most of the "dead code" in this file: 1) Get rid of most of the forward declarations in this file. The only remaining forward declarations are to deal with circular dependencies. 2) Get rid of most of the dead code in the file. Some of the functions in this file are place holders - they
2013 Jan 16
6
[PATCH V2] mm/slab: add a leak decoder callback
...cache_create_memcg(struct mem_cgroup *, const char *, size_t, size_t, unsigned long, void (*)(void *), struct kmem_cache *); void kmem_cache_destroy(struct kmem_cache *); +void kmem_cache_destroy_decoder(struct kmem_cache *, void (*)(void *)); int kmem_cache_shrink(struct kmem_cache *); void kmem_cache_free(struct kmem_cache *, void *); diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 8bb6e0e..7ca8309 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -48,6 +48,7 @@ struct kmem_cache { /* constructor func */ void (*ctor)(void *obj); + void (*decoder)(...
2011 Apr 06
20
[RESEND][PATCH 00/22] Staging: hv: Cleanup storage drivers - Phase IV
The latest upstream merge changed struct block_device_operations: This merge got rid of blkvsc_media_changed and introduced the function blkvsc_check_events. This broke all the patches that were sent after the tree was closed the last time. This is a resend of this patch-set to account for this change in the kernel. More cleanup. In this patch-set we deal with the following issues: 1) While a
2011 Apr 06
20
[RESEND][PATCH 00/22] Staging: hv: Cleanup storage drivers - Phase IV
The latest upstream merge changed struct block_device_operations: This merge got rid of blkvsc_media_changed and introduced the function blkvsc_check_events. This broke all the patches that were sent after the tree was closed the last time. This is a resend of this patch-set to account for this change in the kernel. More cleanup. In this patch-set we deal with the following issues: 1) While a
2017 Mar 01
2
[PATCH] drm: virtio: use kmem_cache
...vbuf(struct virtio_gpu_device *vgdev, if (vbuf->resp_size > MAX_INLINE_RESP_SIZE) kfree(vbuf->resp_buf); kfree(vbuf->data_buf); - spin_lock(&vgdev->free_vbufs_lock); - list_add(&vbuf->list, &vgdev->free_vbufs); - spin_unlock(&vgdev->free_vbufs_lock); + kmem_cache_free(vgdev->vbufs, vbuf); } static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list) -- 1.8.3.1
2017 Mar 01
2
[PATCH] drm: virtio: use kmem_cache
...vbuf(struct virtio_gpu_device *vgdev, if (vbuf->resp_size > MAX_INLINE_RESP_SIZE) kfree(vbuf->resp_buf); kfree(vbuf->data_buf); - spin_lock(&vgdev->free_vbufs_lock); - list_add(&vbuf->list, &vgdev->free_vbufs); - spin_unlock(&vgdev->free_vbufs_lock); + kmem_cache_free(vgdev->vbufs, vbuf); } static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list) -- 1.8.3.1
2008 May 20
4
[PATCH O/4] BIO tracking take2
Hi all, With this series of patches, you can determine the owners of any type of I/Os. I ported the previous version to linux-2.6.26-rc2-mm1. This makes dm-ioband -- I/O bandwidth controller -- be able to control the Block I/O bandwidths even when it accepts delayed write requests. Dm-ioband can find the owner cgroup of each request. It is also possible that OpenVz team and NEC Uchida-san team