Displaying 20 results from an estimated 63 matches for "lockdep_assert_held".
2019 May 20
5
[PATCH 1/2] drm: Add drm_gem_vram_{pin/unpin}_reserved() and convert mgag200
...gt; + * 0 on success, or
> + * a negative error code otherwise.
> + */
> +int drm_gem_vram_pin_reserved(struct drm_gem_vram_object *gbo,
> + unsigned long pl_flag)
> +{
> + int i, ret;
> + struct ttm_operation_ctx ctx = { false, false };
I think would be good to have a lockdep_assert_held here for the ww_mutex.
Also general thing: _reserved is kinda ttm lingo, for dma-buf reservations
we call the structure tracking the fences+lock the "reservation", but the
naming scheme used is _lock/_unlock.
I think would be good to be consistent with that, and use _locked here.
Especi...
2019 May 20
5
[PATCH 1/2] drm: Add drm_gem_vram_{pin/unpin}_reserved() and convert mgag200
...gt; + * 0 on success, or
> + * a negative error code otherwise.
> + */
> +int drm_gem_vram_pin_reserved(struct drm_gem_vram_object *gbo,
> + unsigned long pl_flag)
> +{
> + int i, ret;
> + struct ttm_operation_ctx ctx = { false, false };
I think would be good to have a lockdep_assert_held here for the ww_mutex.
Also general thing: _reserved is kinda ttm lingo, for dma-buf reservations
we call the structure tracking the fences+lock the "reservation", but the
naming scheme used is _lock/_unlock.
I think would be good to be consistent with that, and use _locked here.
Especi...
2019 Nov 12
0
[PATCH v3 03/14] mm/hmm: allow hmm_range to be used with a mmu_interval_notifier or hmm_mirror
...(struct hmm_range *range, unsigned int flags)
const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
unsigned long start = range->start, end;
struct hmm_vma_walk hmm_vma_walk;
- struct hmm *hmm = range->hmm;
+ struct mm_struct *mm;
struct vm_area_struct *vma;
int ret;
- lockdep_assert_held(&hmm->mmu_notifier.mm->mmap_sem);
+ if (range->notifier)
+ mm = range->notifier->mm;
+ else
+ mm = range->hmm->mmu_notifier.mm;
+
+ lockdep_assert_held(&mm->mmap_sem);
do {
/* If range is no longer valid force retry. */
- if (!range->valid)
+ if (needs_...
2018 Jul 20
4
Memory Read Only Enforcement: VMM assisted kernel rootkit mitigation for KVM V4
Here is change log from V3 To V4:
- Fixing spelling/grammar mistakes suggested by Randy Dunlap
- Changing the hypercall interface to be able to process multiple pages
per one hypercall also suggested by Randy Dunlap. It turns out that
this will save lots of vmexist/memory slot flushes when protecting many
pages.
[PATCH RFC V4 1/3] KVM: X86: Memory ROE documentation
[PATCH RFC V4 2/3] KVM:
2018 Jul 19
0
[PATCH 3/3] [RFC V3] KVM: X86: Adding skeleton for Memory ROE
...kvm->slots_lock otherwise tlb flush would be missed.
*/
@@ -5301,7 +5351,7 @@ void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
false, NULL);
spin_unlock(&kvm->mmu_lock);
- /* see kvm_mmu_slot_remove_write_access */
+ /* see kvm_mmu_slot_apply_write_access*/
lockdep_assert_held(&kvm->slots_lock);
if (flush)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0046aa70205a..9addc46d75be 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4177,7 +4177,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
/*
* All th...
2019 May 21
0
[PATCH 1/2] drm: Add drm_gem_vram_{pin/unpin}_reserved() and convert mgag200
Hi,
> I think would be good to have a lockdep_assert_held here for the ww_mutex.
>
> Also general thing: _reserved is kinda ttm lingo, for dma-buf reservations
> we call the structure tracking the fences+lock the "reservation", but the
> naming scheme used is _lock/_unlock.
>
> I think would be good to be consistent with that...
2020 Apr 08
0
[RFC PATCH 15/26] x86/alternatives: Non-emulated text poking
On Tue, Apr 07, 2020 at 10:03:12PM -0700, Ankur Arora wrote:
> +static int __maybe_unused text_poke_late(patch_worker_t worker, void *stage)
> +{
> + int ret;
> +
> + lockdep_assert_held(&text_mutex);
> +
> + if (system_state != SYSTEM_RUNNING)
> + return -EINVAL;
> +
> + text_poke_state.stage = stage;
> + text_poke_state.num_acks = cpumask_weight(cpu_online_mask);
> + text_poke_state.head = &alt_modules;
> +
> + text_poke_state.patch_worker = wo...
2014 May 14
0
[RFC PATCH v1 04/16] drm/nouveau: require reservations for nouveau_fence_sync and nouveau_bo_fence
...4525a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1431,6 +1431,8 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
struct nouveau_fence *new_fence = nouveau_fence_ref(fence);
struct nouveau_fence *old_fence = NULL;
+ lockdep_assert_held(&nvbo->bo.resv->lock.base);
+
spin_lock(&nvbo->bo.bdev->fence_lock);
old_fence = nvbo->bo.sync_obj;
nvbo->bo.sync_obj = new_fence;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index da764a4ed958..61b8c3375135 1006...
2014 Jul 09
0
[PATCH 05/17] drm/ttm: call ttm_bo_wait while inside a reservation
...ret = ttm_bo_wait(bo, false, false, true);
+ WARN_ON(ret);
+ }
+ spin_unlock(&bdev->fence_lock);
if (ret || unlikely(list_empty(&bo->ddestroy))) {
__ttm_bo_unreserve(bo);
@@ -1539,6 +1536,8 @@ int ttm_bo_wait(struct ttm_buffer_object *bo,
void *sync_obj;
int ret = 0;
+ lockdep_assert_held(&bo->resv->lock.base);
+
if (likely(bo->sync_obj == NULL))
return 0;
2018 Jul 20
0
[PATCH RFC V4 3/3] KVM: X86: Adding skeleton for Memory ROE
...kvm->slots_lock otherwise tlb flush would be missed.
*/
@@ -5301,7 +5351,7 @@ void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
false, NULL);
spin_unlock(&kvm->mmu_lock);
- /* see kvm_mmu_slot_remove_write_access */
+ /* see kvm_mmu_slot_apply_write_access*/
lockdep_assert_held(&kvm->slots_lock);
if (flush)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2b812b3c5088..de58ddaf5260 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4179,7 +4179,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
/*
* All th...
2019 May 20
1
[PATCH 1/2] drm: Add drm_gem_vram_{pin/unpin}_reserved() and convert mgag200
...gt; + */
>>> +int drm_gem_vram_pin_reserved(struct drm_gem_vram_object *gbo,
>>> + unsigned long pl_flag)
>>> +{
>>> + int i, ret;
>>> + struct ttm_operation_ctx ctx = { false, false };
>> I think would be good to have a lockdep_assert_held here for the ww_mutex.
>>
>> Also general thing: _reserved is kinda ttm lingo, for dma-buf reservations
>> we call the structure tracking the fences+lock the "reservation", but the
>> naming scheme used is _lock/_unlock.
>>
>> I think would be good to b...
2018 Jul 19
8
Memory Read Only Enforcement: VMM assisted kernel rootkit mitigation for KVM
Hi,
This is my first set of patches that works as I would expect, and the
third revision I sent to mailing lists.
Following up with my previous discussions about kernel rootkit mitigation
via placing R/O protection on critical data structure, static data,
privileged registers with static content. These patches present the
first part where it is only possible to place these protections on
memory
2018 Jul 19
8
Memory Read Only Enforcement: VMM assisted kernel rootkit mitigation for KVM
Hi,
This is my first set of patches that works as I would expect, and the
third revision I sent to mailing lists.
Following up with my previous discussions about kernel rootkit mitigation
via placing R/O protection on critical data structure, static data,
privileged registers with static content. These patches present the
first part where it is only possible to place these protections on
memory
2014 Jan 14
1
[PATCH 1/2] drm/nouveau: hold mutex while syncing to kernel channel
Not holding the mutex potentially causes corruption of the kernel
channel when page flipping.
Cc: stable at vger.kernel.org #3.13
Signed-off-by: Maarten Lankhorst <maarten.lankhorst at canonical.com>
---
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 29c3efdfc7dd..76e3cf025c10 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
2019 Jul 26
0
[PATCH v2 5/7] mm/hmm: make full use of walk_page_range()
...unsigned long start = range->start, end;
- struct hmm_vma_walk hmm_vma_walk;
+ unsigned long start = range->start;
+ struct hmm_vma_walk hmm_vma_walk = {};
struct hmm *hmm = range->hmm;
- struct vm_area_struct *vma;
- struct mm_walk mm_walk;
+ struct mm_walk mm_walk = {};
int ret;
lockdep_assert_held(&hmm->mm->mmap_sem);
- do {
- /* If range is no longer valid force retry. */
- if (!range->valid)
- return -EBUSY;
+ hmm_vma_walk.range = range;
+ hmm_vma_walk.last = start;
+ hmm_vma_walk.flags = flags;
+ mm_walk.private = &hmm_vma_walk;
- vma = find_vma(hmm->mm, start...
2019 Sep 11
0
[PATCH 1/4] mm/hmm: make full use of walk_page_range()
...nsigned long start = range->start, end;
- struct hmm_vma_walk hmm_vma_walk;
+ unsigned long start = range->start;
+ struct hmm_vma_walk hmm_vma_walk = {
+ .range = range,
+ .last = start,
+ .flags = flags,
+ };
struct hmm *hmm = range->hmm;
- struct vm_area_struct *vma;
int ret;
lockdep_assert_held(&hmm->mmu_notifier.mm->mmap_sem);
do {
- /* If range is no longer valid force retry. */
- if (!range->valid)
- return -EBUSY;
-
- vma = find_vma(hmm->mmu_notifier.mm, start);
- if (vma == NULL || (vma->vm_flags & device_vma))
- return -EFAULT;
-
- if (!(vma->v...
2019 Apr 10
0
[PATCH v5 3/6] libnvdimm: add dax_dev sync flag
...x_write_cache_enabled(struct dax_device *dax_dev)
}
EXPORT_SYMBOL_GPL(dax_write_cache_enabled);
+bool dax_synchronous(struct dax_device *dax_dev)
+{
+ return test_bit(DAXDEV_SYNC, &dax_dev->flags);
+}
+EXPORT_SYMBOL_GPL(dax_synchronous);
+
bool dax_alive(struct dax_device *dax_dev)
{
lockdep_assert_held(&dax_srcu);
@@ -511,7 +519,7 @@ static void dax_add_host(struct dax_device *dax_dev, const char *host)
}
struct dax_device *alloc_dax(void *private, const char *__host,
- const struct dax_operations *ops)
+ const struct dax_operations *ops, bool sync)
{
struct dax_device *dax_dev;
co...
2019 Apr 26
0
[PATCH v7 3/6] libnvdimm: add dax_dev sync flag
...x_write_cache_enabled(struct dax_device *dax_dev)
}
EXPORT_SYMBOL_GPL(dax_write_cache_enabled);
+bool dax_synchronous(struct dax_device *dax_dev)
+{
+ return test_bit(DAXDEV_SYNC, &dax_dev->flags);
+}
+EXPORT_SYMBOL_GPL(dax_synchronous);
+
bool dax_alive(struct dax_device *dax_dev)
{
lockdep_assert_held(&dax_srcu);
@@ -511,7 +519,7 @@ static void dax_add_host(struct dax_device *dax_dev, const char *host)
}
struct dax_device *alloc_dax(void *private, const char *__host,
- const struct dax_operations *ops)
+ const struct dax_operations *ops, bool sync)
{
struct dax_device *dax_dev;
co...
2019 May 10
0
[PATCH v8 3/6] libnvdimm: add dax_dev sync flag
...x_write_cache_enabled(struct dax_device *dax_dev)
}
EXPORT_SYMBOL_GPL(dax_write_cache_enabled);
+bool dax_synchronous(struct dax_device *dax_dev)
+{
+ return test_bit(DAXDEV_SYNC, &dax_dev->flags);
+}
+EXPORT_SYMBOL_GPL(dax_synchronous);
+
bool dax_alive(struct dax_device *dax_dev)
{
lockdep_assert_held(&dax_srcu);
@@ -508,7 +516,7 @@ static void dax_add_host(struct dax_device *dax_dev, const char *host)
}
struct dax_device *alloc_dax(void *private, const char *__host,
- const struct dax_operations *ops)
+ const struct dax_operations *ops, unsigned long flags)
{
struct dax_device *dax...
2020 Mar 20
0
[PATCH 3/4] mm: simplify device private page handling in hmm_range_fault
...understand.
> If you change tools/testing/selftests/vm/hmm-tests.c line 868 to
> ASSERT_EQ(ret, -1);
> Then the test will abort, core dump, and cause two problems,
> 1) the migrated page will be faulted back to system memory in order to write
> it to the core dump. This triggers lockdep_assert_held(&walk.mm->mmap_sem)
> in walk_page_range().
Has the migration stuff become entangled with the xarray?
> [ 137.980718] Code: 80 2f 1a 83 c6 05 e9 8d 7b 01 01 e8 3e b1 b1 fe e9 05 ff ff ff 66 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 00 41 56 41 55 41 54 55 <48> 89 fd 53 4c 8d 6d 1...