Displaying 18 results from an estimated 18 matches for "pa_section_shift".
2020 May 08
4
[PATCH 2/2] nouveau: fix dependencies for DEVICE_PRIVATE
...unmet direct dependencies detected for DEVICE_PRIVATE
Depends on [n]: ZONE_DEVICE [=n]
Selected by [y]:
- DRM_NOUVEAU_SVM [=y] && HAS_IOMEM [=y] && DRM_NOUVEAU [=y] && MMU [=y] && STAGING [=y]
kernel/resource.c:1653:28: error: use of undeclared identifier 'PA_SECTION_SHIFT'
size = ALIGN(size, 1UL << PA_SECTION_SHIFT);
^
kernel/resource.c:1654:48: error: use of undeclared identifier 'MAX_PHYSMEM_BITS'
Add a dependency for Nouveau to avoid broken randconfig builds.
Fixes: d2c63df2242e ("mm/hmm: make CON...
2020 May 09
1
linux-next 20200508 - build failure in kernel/resource.c w/ SPARSEMEM=n
...,
from ./include/linux/mmdebug.h:5,
from ./include/linux/gfp.h:5,
from ./include/linux/slab.h:15,
from kernel/resource.c:17:
kernel/resource.c: In function '__request_free_mem_region':
kernel/resource.c:1653:28: error: 'PA_SECTION_SHIFT' undeclared (first use in this function); did you mean 'SECTION_SHIFT'?
size = ALIGN(size, 1UL << PA_SECTION_SHIFT);
^~~~~~~~~~~~~~~~
./include/uapi/linux/kernel.h:11:47: note: in definition of macro '__ALIGN_KERNEL_MASK'
#define __ALIGN_KERN...
2020 May 10
1
[PATCH hmm v2 1/5] mm/hmm: make CONFIG_DEVICE_PRIVATE into a select
...VICE_PRIVATE in that driver, allowing users to avoid enabling this if
> they don't want the overhead.
>
I'm not too sure what's going on here, but i386 allmodconfig broke.
kernel/resource.c: In function '__request_free_mem_region':
kernel/resource.c:1653:28: error: 'PA_SECTION_SHIFT' undeclared (first use in this function); did you mean 'SECTIONS_PGSHIFT'?
size = ALIGN(size, 1UL << PA_SECTION_SHIFT);
because in current mainline, allmodconfig produces
CONFIG_DEVICE_PRIVATE=n but in current linux-next, allmodconfig
produces CONFIG_DEVICE_PRIVATE=y. But CONF...
2019 Jun 13
0
[PATCH 06/22] mm: factor out a devm_request_free_mem_region helper
...t can later be hotpluged as ZONE_DEVICE
+ * memory, which in turn allocates struct pages.
+ */
+struct resource *devm_request_free_mem_region(struct device *dev,
+ struct resource *base, unsigned long size)
+{
+ resource_size_t end, addr;
+ struct resource *res;
+
+ size = ALIGN(size, 1UL << PA_SECTION_SHIFT);
+ end = min_t(unsigned long, base->end, (1UL << MAX_PHYSMEM_BITS) - 1);
+ addr = end - size + 1UL;
+
+ for (; addr > size && addr >= base->start; addr -= size) {
+ if (region_intersects(addr, size, 0, IORES_DESC_NONE) !=
+ REGION_DISJOINT)
+ continue;
+
+ res = de...
2019 Jun 17
0
[PATCH 06/25] mm: factor out a devm_request_free_mem_region helper
...can later be hotplugged as ZONE_DEVICE
+ * memory, which in turn allocates struct pages.
+ */
+struct resource *devm_request_free_mem_region(struct device *dev,
+ struct resource *base, unsigned long size)
+{
+ resource_size_t end, addr;
+ struct resource *res;
+
+ size = ALIGN(size, 1UL << PA_SECTION_SHIFT);
+ end = min_t(unsigned long, base->end, (1UL << MAX_PHYSMEM_BITS) - 1);
+ addr = end - size + 1UL;
+
+ for (; addr > size && addr >= base->start; addr -= size) {
+ if (region_intersects(addr, size, 0, IORES_DESC_NONE) !=
+ REGION_DISJOINT)
+ continue;
+
+ res = de...
2019 Jun 15
1
[PATCH 06/22] mm: factor out a devm_request_free_mem_region helper
...+ * memory, which in turn allocates struct pages.
> + */
> +struct resource *devm_request_free_mem_region(struct device *dev,
> + struct resource *base, unsigned long size)
> +{
> + resource_size_t end, addr;
> + struct resource *res;
> +
> + size = ALIGN(size, 1UL << PA_SECTION_SHIFT);
> + end = min_t(unsigned long, base->end, (1UL << MAX_PHYSMEM_BITS) - 1);
> + addr = end - size + 1UL;
> +
> + for (; addr > size && addr >= base->start; addr -= size) {
> + if (region_intersects(addr, size, 0, IORES_DESC_NONE) !=
> + REGION_DISJOIN...
2019 Jun 13
0
[PATCH 09/22] memremap: lift the devmap_enable manipulation into devm_memremap_pages
...e bool put_devmap_managed_page(struct page *page)
{
return false;
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 94b830b6eca5..6a3183cac764 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -17,6 +17,37 @@ static DEFINE_XARRAY(pgmap_array);
#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
#define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
+#ifdef CONFIG_DEV_PAGEMAP_OPS
+DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
+EXPORT_SYMBOL(devmap_managed_key);
+static atomic_t devmap_enable;
+
+static void dev_pagemap_put_ops(void *data)
+{
+ if (atomic_dec_and_test(&devmap_enab...
2019 Jun 17
0
[PATCH 10/25] memremap: lift the devmap_enable manipulation into devm_memremap_pages
...e bool put_devmap_managed_page(struct page *page)
{
return false;
diff --git a/kernel/memremap.c b/kernel/memremap.c
index ba7156bd52d1..7272027fbdd7 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -17,6 +17,35 @@ static DEFINE_XARRAY(pgmap_array);
#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
#define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
+#ifdef CONFIG_DEV_PAGEMAP_OPS
+DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
+EXPORT_SYMBOL(devmap_managed_key);
+static atomic_t devmap_enable;
+
+static void dev_pagemap_put_ops(void *data)
+{
+ if (atomic_dec_and_test(&devmap_enab...
2019 Jun 26
0
[PATCH 11/25] memremap: lift the devmap_enable manipulation into devm_memremap_pages
...e bool put_devmap_managed_page(struct page *page)
{
return false;
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 00c1ceb60c19..3219a4c91d07 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -17,6 +17,35 @@ static DEFINE_XARRAY(pgmap_array);
#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
#define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
+#ifdef CONFIG_DEV_PAGEMAP_OPS
+DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
+EXPORT_SYMBOL(devmap_managed_key);
+static atomic_t devmap_managed_enable;
+
+static void devmap_managed_enable_put(void *data)
+{
+ if (atomic_dec_and_test(&a...
2019 Jun 26
1
[PATCH 11/25] memremap: lift the devmap_enable manipulation into devm_memremap_pages
...ge *page)
> {
> return false;
> diff --git a/kernel/memremap.c b/kernel/memremap.c
> index 00c1ceb60c19..3219a4c91d07 100644
> --- a/kernel/memremap.c
> +++ b/kernel/memremap.c
> @@ -17,6 +17,35 @@ static DEFINE_XARRAY(pgmap_array);
> #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
> #define SECTION_SIZE (1UL << PA_SECTION_SHIFT)
>
> +#ifdef CONFIG_DEV_PAGEMAP_OPS
> +DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
> +EXPORT_SYMBOL(devmap_managed_key);
> +static atomic_t devmap_managed_enable;
> +
> +static void devmap_managed_enable_put(voi...
2019 Jun 13
1
[PATCH 06/22] mm: factor out a devm_request_free_mem_region helper
...+ * memory, which in turn allocates struct pages.
> + */
> +struct resource *devm_request_free_mem_region(struct device *dev,
> + struct resource *base, unsigned long size)
> +{
> + resource_size_t end, addr;
> + struct resource *res;
> +
> + size = ALIGN(size, 1UL << PA_SECTION_SHIFT);
> + end = min_t(unsigned long, base->end, (1UL << MAX_PHYSMEM_BITS) - 1);
Even fixed it to use min_t
> + addr = end - size + 1UL;
> + for (; addr > size && addr >= base->start; addr -= size) {
> + if (region_intersects(addr, size, 0, IORES_DESC_NONE) !=
&g...
2019 Jun 13
0
[PATCH 10/22] memremap: add a migrate callback to struct dev_pagemap_ops
...cac764..7167e717647d 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -11,7 +11,6 @@
#include <linux/types.h>
#include <linux/wait_bit.h>
#include <linux/xarray.h>
-#include <linux/hmm.h>
static DEFINE_XARRAY(pgmap_array);
#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
@@ -48,36 +47,6 @@ static inline int dev_pagemap_enable(struct device *dev)
}
#endif /* CONFIG_DEV_PAGEMAP_OPS */
-#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
-vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
- unsigned long addr,
- swp_entry_t entry,
-...
2019 Jun 26
0
[PATCH 12/25] memremap: add a migrate_to_ram method to struct dev_pagemap_ops
...c91d07..c06a5487dda7 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -11,7 +11,6 @@
#include <linux/types.h>
#include <linux/wait_bit.h>
#include <linux/xarray.h>
-#include <linux/hmm.h>
static DEFINE_XARRAY(pgmap_array);
#define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
@@ -46,36 +45,6 @@ static int devmap_managed_enable_get(struct device *dev, struct dev_pagemap *pgm
}
#endif /* CONFIG_DEV_PAGEMAP_OPS */
-#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
-vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
- unsigned long addr,
- swp_e...
2019 Jun 13
1
[PATCH 10/22] memremap: add a migrate callback to struct dev_pagemap_ops
...> +++ b/kernel/memremap.c
> @@ -11,7 +11,6 @@
> #include <linux/types.h>
> #include <linux/wait_bit.h>
> #include <linux/xarray.h>
> -#include <linux/hmm.h>
>
> static DEFINE_XARRAY(pgmap_array);
> #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
> @@ -48,36 +47,6 @@ static inline int dev_pagemap_enable(struct device *dev)
> }
> #endif /* CONFIG_DEV_PAGEMAP_OPS */
>
> -#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
> -vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
> - unsigned long addr,
&...
2019 Jun 13
57
dev_pagemap related cleanups
Hi Dan, Jérôme and Jason,
below is a series that cleans up the dev_pagemap interface so that
it is more easily usable, which removes the need to wrap it in hmm
and thus allowing to kill a lot of code
Diffstat:
22 files changed, 245 insertions(+), 802 deletions(-)
Git tree:
git://git.infradead.org/users/hch/misc.git hmm-devmem-cleanup
Gitweb:
2019 Jun 17
34
dev_pagemap related cleanups v2
Hi Dan, Jérôme and Jason,
below is a series that cleans up the dev_pagemap interface so that
it is more easily usable, which removes the need to wrap it in hmm
and thus allowing to kill a lot of code
Note: this series is on top of the rdma/hmm branch + the dev_pagemap
releas fix series from Dan that went into 5.2-rc5.
Git tree:
git://git.infradead.org/users/hch/misc.git
2019 Jun 26
41
dev_pagemap related cleanups v3
Hi Dan, Jérôme and Jason,
below is a series that cleans up the dev_pagemap interface so that
it is more easily usable, which removes the need to wrap it in hmm
and thus allowing to kill a lot of code
Note: this series is on top of Linux 5.2-rc5 and has some minor
conflicts with the hmm tree that are easy to resolve.
Diffstat summary:
32 files changed, 361 insertions(+), 1012 deletions(-)
Git
2020 May 01
13
[PATCH hmm v2 0/5] Adjust hmm_range_fault() API
From: Jason Gunthorpe <jgg at mellanox.com>
The API is a bit complicated for the uses we actually have, and
disucssions for simplifying have come up a number of times.
This small series removes the customizable pfn format and simplifies the
return code of hmm_range_fault()
All the drivers are adjusted to process in the simplified format.
I would appreciated tested-by's for the two