Displaying 20 results from an estimated 71 matches for "lookup_address".
2008 May 15
0
[PATCH] linux/x86: utilize lookup_address() for virt_to_ptep()
...==================================================
--- head-2008-05-08.orig/arch/x86_64/mm/pageattr-xen.c 2008-05-15 13:44:37.000000000 +0200
+++ head-2008-05-08/arch/x86_64/mm/pageattr-xen.c 2008-05-15 14:44:59.000000000 +0200
@@ -276,7 +276,7 @@
}
#endif /* CONFIG_XEN */
-static inline pte_t *lookup_address(unsigned long address)
+pte_t *lookup_address(unsigned long address)
{
pgd_t *pgd = pgd_offset_k(address);
pud_t *pud;
Index: head-2008-05-08/include/asm-i386/mach-xen/asm/pgtable.h
===================================================================
--- head-2008-05-08.orig/include/asm-i386...
2016 Mar 03
1
RFC: [PATCH] x86/kmmio: fix mmiotrace for hugepages
...age_table[KMMIO_PAGE_TABLE_SIZE];
static LIST_HEAD(kmmio_probes);
-static struct list_head *kmmio_page_list(unsigned long page)
+static struct list_head *kmmio_page_list(unsigned long addr)
{
- return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
+ unsigned int l;
+ pte_t *pte = lookup_address(addr, &l);
+
+ if (!pte)
+ return NULL;
+ addr &= page_level_mask(l);
+
+ return &kmmio_page_table[hash_long(addr, KMMIO_PAGE_HASH_BITS)];
}
/* Accessed per-cpu */
@@ -98,15 +105,19 @@ static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
}
/* You must be holding RCU r...
2016 May 03
0
[PATCH 4.5 160/200] x86/mm/kmmio: Fix mmiotrace for hugepages
...age_table[KMMIO_PAGE_TABLE_SIZE];
static LIST_HEAD(kmmio_probes);
-static struct list_head *kmmio_page_list(unsigned long page)
+static struct list_head *kmmio_page_list(unsigned long addr)
{
- return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
+ unsigned int l;
+ pte_t *pte = lookup_address(addr, &l);
+
+ if (!pte)
+ return NULL;
+ addr &= page_level_mask(l);
+
+ return &kmmio_page_table[hash_long(addr, KMMIO_PAGE_HASH_BITS)];
}
/* Accessed per-cpu */
@@ -98,15 +105,19 @@ static struct kmmio_probe *get_kmmio_pro
}
/* You must be holding RCU read lock. */
-static s...
2016 May 03
0
[PATCH 4.4 137/163] x86/mm/kmmio: Fix mmiotrace for hugepages
...age_table[KMMIO_PAGE_TABLE_SIZE];
static LIST_HEAD(kmmio_probes);
-static struct list_head *kmmio_page_list(unsigned long page)
+static struct list_head *kmmio_page_list(unsigned long addr)
{
- return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
+ unsigned int l;
+ pte_t *pte = lookup_address(addr, &l);
+
+ if (!pte)
+ return NULL;
+ addr &= page_level_mask(l);
+
+ return &kmmio_page_table[hash_long(addr, KMMIO_PAGE_HASH_BITS)];
}
/* Accessed per-cpu */
@@ -98,15 +105,19 @@ static struct kmmio_probe *get_kmmio_pro
}
/* You must be holding RCU read lock. */
-static s...
2016 Jul 12
0
[added to the 4.1 stable tree] x86/mm/kmmio: Fix mmiotrace for hugepages
...age_table[KMMIO_PAGE_TABLE_SIZE];
static LIST_HEAD(kmmio_probes);
-static struct list_head *kmmio_page_list(unsigned long page)
+static struct list_head *kmmio_page_list(unsigned long addr)
{
- return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
+ unsigned int l;
+ pte_t *pte = lookup_address(addr, &l);
+
+ if (!pte)
+ return NULL;
+ addr &= page_level_mask(l);
+
+ return &kmmio_page_table[hash_long(addr, KMMIO_PAGE_HASH_BITS)];
}
/* Accessed per-cpu */
@@ -98,15 +105,19 @@ static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
}
/* You must be holding RCU r...
2016 Jul 12
0
[added to the 3.18 stable tree] x86/mm/kmmio: Fix mmiotrace for hugepages
...age_table[KMMIO_PAGE_TABLE_SIZE];
static LIST_HEAD(kmmio_probes);
-static struct list_head *kmmio_page_list(unsigned long page)
+static struct list_head *kmmio_page_list(unsigned long addr)
{
- return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
+ unsigned int l;
+ pte_t *pte = lookup_address(addr, &l);
+
+ if (!pte)
+ return NULL;
+ addr &= page_level_mask(l);
+
+ return &kmmio_page_table[hash_long(addr, KMMIO_PAGE_HASH_BITS)];
}
/* Accessed per-cpu */
@@ -98,15 +105,19 @@ static struct kmmio_probe *get_kmmio_probe(unsigned long addr)
}
/* You must be holding RCU r...
2016 Feb 09
0
mmiotrace fix
...age_table[KMMIO_PAGE_TABLE_SIZE];
static LIST_HEAD(kmmio_probes);
-static struct list_head *kmmio_page_list(unsigned long page)
+static struct list_head *kmmio_page_list(unsigned long addr)
{
- return &kmmio_page_table[hash_long(page, KMMIO_PAGE_HASH_BITS)];
+ unsigned int l;
+ pte_t *pte = lookup_address(addr, &l);
+ if (!pte)
+ return NULL;
+
+ switch (l) {
+ case PG_LEVEL_4K:
+ addr &= (~(u64)(0x0)) << 12;
+ break;
+ case PG_LEVEL_2M:
+ addr &= (~(u64)(0x0)) << 21;
+ break;
+ case PG_LEVEL_1G:
+ addr &= (~(u64)(0x0)) << 30;
+ break;
+ default:
+ return NU...
2017 Nov 27
0
[PATCH] x86/mm/kmmio: Fix mmiotrace for page unaligned addresses
...r = p->addr & PAGE_MASK;
const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
unsigned int l;
pte_t *pte;
spin_lock_irqsave(&kmmio_lock, flags);
- if (get_kmmio_probe(p->addr)) {
+ if (get_kmmio_probe(addr)) {
ret = -EEXIST;
goto out;
}
- pte = lookup_address(p->addr, &l);
+ pte = lookup_address(addr, &l);
if (!pte) {
ret = -EINVAL;
goto out;
@@ -454,7 +455,7 @@ int register_kmmio_probe(struct kmmio_probe *p)
kmmio_count++;
list_add_rcu(&p->list, &kmmio_probes);
while (size < size_lim) {
- if (add_kmmio_fault_page...
2018 Feb 23
0
[PATCH 4.14 148/159] x86/mm/kmmio: Fix mmiotrace for page unaligned addresses
...r = p->addr & PAGE_MASK;
const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
unsigned int l;
pte_t *pte;
spin_lock_irqsave(&kmmio_lock, flags);
- if (get_kmmio_probe(p->addr)) {
+ if (get_kmmio_probe(addr)) {
ret = -EEXIST;
goto out;
}
- pte = lookup_address(p->addr, &l);
+ pte = lookup_address(addr, &l);
if (!pte) {
ret = -EINVAL;
goto out;
@@ -454,7 +455,7 @@ int register_kmmio_probe(struct kmmio_pr
kmmio_count++;
list_add_rcu(&p->list, &kmmio_probes);
while (size < size_lim) {
- if (add_kmmio_fault_page(p->...
2018 Feb 23
0
[PATCH 3.18 54/58] x86/mm/kmmio: Fix mmiotrace for page unaligned addresses
...r = p->addr & PAGE_MASK;
const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
unsigned int l;
pte_t *pte;
spin_lock_irqsave(&kmmio_lock, flags);
- if (get_kmmio_probe(p->addr)) {
+ if (get_kmmio_probe(addr)) {
ret = -EEXIST;
goto out;
}
- pte = lookup_address(p->addr, &l);
+ pte = lookup_address(addr, &l);
if (!pte) {
ret = -EINVAL;
goto out;
@@ -453,7 +454,7 @@ int register_kmmio_probe(struct kmmio_pr
kmmio_count++;
list_add_rcu(&p->list, &kmmio_probes);
while (size < size_lim) {
- if (add_kmmio_fault_page(p->...
2018 Feb 23
0
[PATCH 4.4 059/193] x86/mm/kmmio: Fix mmiotrace for page unaligned addresses
...r = p->addr & PAGE_MASK;
const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
unsigned int l;
pte_t *pte;
spin_lock_irqsave(&kmmio_lock, flags);
- if (get_kmmio_probe(p->addr)) {
+ if (get_kmmio_probe(addr)) {
ret = -EEXIST;
goto out;
}
- pte = lookup_address(p->addr, &l);
+ pte = lookup_address(addr, &l);
if (!pte) {
ret = -EINVAL;
goto out;
@@ -453,7 +454,7 @@ int register_kmmio_probe(struct kmmio_pr
kmmio_count++;
list_add_rcu(&p->list, &kmmio_probes);
while (size < size_lim) {
- if (add_kmmio_fault_page(p->...
2018 Feb 23
0
[PATCH 4.9 083/145] x86/mm/kmmio: Fix mmiotrace for page unaligned addresses
...r = p->addr & PAGE_MASK;
const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
unsigned int l;
pte_t *pte;
spin_lock_irqsave(&kmmio_lock, flags);
- if (get_kmmio_probe(p->addr)) {
+ if (get_kmmio_probe(addr)) {
ret = -EEXIST;
goto out;
}
- pte = lookup_address(p->addr, &l);
+ pte = lookup_address(addr, &l);
if (!pte) {
ret = -EINVAL;
goto out;
@@ -453,7 +454,7 @@ int register_kmmio_probe(struct kmmio_pr
kmmio_count++;
list_add_rcu(&p->list, &kmmio_probes);
while (size < size_lim) {
- if (add_kmmio_fault_page(p->...
2018 Jan 28
0
[PATCH AUTOSEL for 4.14 095/100] x86/mm/kmmio: Fix mmiotrace for page unaligned addresses
...r = p->addr & PAGE_MASK;
const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
unsigned int l;
pte_t *pte;
spin_lock_irqsave(&kmmio_lock, flags);
- if (get_kmmio_probe(p->addr)) {
+ if (get_kmmio_probe(addr)) {
ret = -EEXIST;
goto out;
}
- pte = lookup_address(p->addr, &l);
+ pte = lookup_address(addr, &l);
if (!pte) {
ret = -EINVAL;
goto out;
@@ -454,7 +455,7 @@ int register_kmmio_probe(struct kmmio_probe *p)
kmmio_count++;
list_add_rcu(&p->list, &kmmio_probes);
while (size < size_lim) {
- if (add_kmmio_fault_page...
2018 Jan 28
0
[PATCH AUTOSEL for 4.4 34/36] x86/mm/kmmio: Fix mmiotrace for page unaligned addresses
...r = p->addr & PAGE_MASK;
const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
unsigned int l;
pte_t *pte;
spin_lock_irqsave(&kmmio_lock, flags);
- if (get_kmmio_probe(p->addr)) {
+ if (get_kmmio_probe(addr)) {
ret = -EEXIST;
goto out;
}
- pte = lookup_address(p->addr, &l);
+ pte = lookup_address(addr, &l);
if (!pte) {
ret = -EINVAL;
goto out;
@@ -453,7 +454,7 @@ int register_kmmio_probe(struct kmmio_probe *p)
kmmio_count++;
list_add_rcu(&p->list, &kmmio_probes);
while (size < size_lim) {
- if (add_kmmio_fault_page...
2018 Jan 28
0
[PATCH AUTOSEL for 4.9 46/49] x86/mm/kmmio: Fix mmiotrace for page unaligned addresses
...r = p->addr & PAGE_MASK;
const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
unsigned int l;
pte_t *pte;
spin_lock_irqsave(&kmmio_lock, flags);
- if (get_kmmio_probe(p->addr)) {
+ if (get_kmmio_probe(addr)) {
ret = -EEXIST;
goto out;
}
- pte = lookup_address(p->addr, &l);
+ pte = lookup_address(addr, &l);
if (!pte) {
ret = -EINVAL;
goto out;
@@ -453,7 +454,7 @@ int register_kmmio_probe(struct kmmio_probe *p)
kmmio_count++;
list_add_rcu(&p->list, &kmmio_probes);
while (size < size_lim) {
- if (add_kmmio_fault_page...
2018 Jan 28
0
[PATCH AUTOSEL for 3.18 23/25] x86/mm/kmmio: Fix mmiotrace for page unaligned addresses
...r = p->addr & PAGE_MASK;
const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK);
unsigned int l;
pte_t *pte;
spin_lock_irqsave(&kmmio_lock, flags);
- if (get_kmmio_probe(p->addr)) {
+ if (get_kmmio_probe(addr)) {
ret = -EEXIST;
goto out;
}
- pte = lookup_address(p->addr, &l);
+ pte = lookup_address(addr, &l);
if (!pte) {
ret = -EINVAL;
goto out;
@@ -453,7 +454,7 @@ int register_kmmio_probe(struct kmmio_probe *p)
kmmio_count++;
list_add_rcu(&p->list, &kmmio_probes);
while (size < size_lim) {
- if (add_kmmio_fault_page...
2008 Jan 18
6
[PATCH 0/10] Tree fixes for PARAVIRT
Hi,
This small series provides some more fixes towards the goal
to have the PARAVIRT selectable for x86_64. After that, just
some more small steps are needed.
The first fix is not even specific for PARAVIRT, and it's actually
preventing the whole tree from booting.
2008 Jan 18
6
[PATCH 0/10] Tree fixes for PARAVIRT
Hi,
This small series provides some more fixes towards the goal
to have the PARAVIRT selectable for x86_64. After that, just
some more small steps are needed.
The first fix is not even specific for PARAVIRT, and it's actually
preventing the whole tree from booting.
2007 Jul 24
1
lguest doesn't work on kernel 2.6.23-rc1
...ssor 2800+ stepping 02
BUG: unable to handle kernel NULL pointer dereference at virtual address 00=
000001
printing eip:
c010fc23
*pde =3D 00000000
Oops: 0002 [#1]
Modules linked in:
CPU: 0
EIP: 0061:[<c010fc23>] Not tainted VLI
EFLAGS: 00010086 (2.6.23-rc1 #3)
EIP is at lookup_address+0x23/0x70
eax: 00001067 ebx: c04c2c00 ecx: 00000001 edx: c04509a4
esi: c010fc23 edi: c010fc24 ebp: c010fc23 esp: c0491f70
ds: 0069 es: 0069 fs: 0000 gs: 0000 ss: 0069
Process swapper (pid: 0, ti=3Dc0490000 task=3Dc044c280 task.ti=3Dc0490000)
Stack: 00000001 c04509a4 c0107ca2...
2007 Jul 24
1
lguest doesn't work on kernel 2.6.23-rc1
...ssor 2800+ stepping 02
BUG: unable to handle kernel NULL pointer dereference at virtual address 00=
000001
printing eip:
c010fc23
*pde =3D 00000000
Oops: 0002 [#1]
Modules linked in:
CPU: 0
EIP: 0061:[<c010fc23>] Not tainted VLI
EFLAGS: 00010086 (2.6.23-rc1 #3)
EIP is at lookup_address+0x23/0x70
eax: 00001067 ebx: c04c2c00 ecx: 00000001 edx: c04509a4
esi: c010fc23 edi: c010fc24 ebp: c010fc23 esp: c0491f70
ds: 0069 es: 0069 fs: 0000 gs: 0000 ss: 0069
Process swapper (pid: 0, ti=3Dc0490000 task=3Dc044c280 task.ti=3Dc0490000)
Stack: 00000001 c04509a4 c0107ca2...