search for: 016llx

Displaying 20 results from an estimated 94 matches for "016llx".

2014 Nov 22
1
Get rid of printf format warning format ‘%llx’ expects type ‘long long unsigned int’, but argument 2 has type ‘uint64_t’
...i-memory.c +++ b/com32/hdt/hdt-cli-memory.c @@ -47,7 +47,7 @@ static void show_memory_e820(int argc __unused, char **argv __unused, more_printf("BIOS-provided physical RAM e820 map:\n"); for (int i = 0; i < count; i++) { get_type(map[i].type, type, 14); - more_printf("%016llx - %016llx %016llx (%s)\n", + more_printf("%016" PRIx64 " - %016" PRIx64 " %016" PRIx64 " (%s)\n", map[i].addr, map[i].size, map[i].addr + map[i].size, remove_spaces(type)); } @@ -60,7 +60,7 @@ static void show_memory_e820(int argc __un...
2023 Jul 20
1
[PATCH drm-misc-next v8 02/12] drm: debugfs: provide infrastructure to dump a DRM GPU VA space
...uccess, -ENODEV if the &mgr is not initialized + */ +int drm_debugfs_gpuva_info(struct seq_file *m, + struct drm_gpuva_manager *mgr) +{ + struct drm_gpuva *va, *kva = &mgr->kernel_alloc_node; + + if (!mgr->name) + return -ENODEV; + + seq_printf(m, "DRM GPU VA space (%s) [0x%016llx;0x%016llx]\n", + mgr->name, mgr->mm_start, mgr->mm_start + mgr->mm_range); + seq_printf(m, "Kernel reserved node [0x%016llx;0x%016llx]\n", + kva->va.addr, kva->va.addr + kva->va.range); + seq_puts(m, "\n"); + seq_puts(m, " VAs | start...
2007 Apr 18
0
[RFC/PATCH LGUEST X86_64 04/13] Useful debugging
...*fmt, ...) +{ + va_list ap; + + if (!lguest_debug) + return; + + /* irq save? */ + va_start(ap, fmt); + lgdebug_vprint(fmt, ap); + va_end(ap); +} + +void lguest_dump_vcpu_regs(struct lguest_vcpu *vcpu) +{ + struct lguest_regs *regs = &vcpu->regs; + + printk("Printing VCPU %d regs cr3: %016llx\n", vcpu->id, regs->cr3); + printk("RIP: %04llx: ", regs->cs & 0xffff); + lguest_print_address(vcpu, regs->rip); + printk("RSP: %04llx:%016llx EFLAGS: %08llx\n", regs->ss, regs->rsp, + regs->rflags); + printk("RAX: %016llx RBX: %016llx RCX: %...
2007 Apr 18
0
[RFC/PATCH LGUEST X86_64 04/13] Useful debugging
...*fmt, ...) +{ + va_list ap; + + if (!lguest_debug) + return; + + /* irq save? */ + va_start(ap, fmt); + lgdebug_vprint(fmt, ap); + va_end(ap); +} + +void lguest_dump_vcpu_regs(struct lguest_vcpu *vcpu) +{ + struct lguest_regs *regs = &vcpu->regs; + + printk("Printing VCPU %d regs cr3: %016llx\n", vcpu->id, regs->cr3); + printk("RIP: %04llx: ", regs->cs & 0xffff); + lguest_print_address(vcpu, regs->rip); + printk("RSP: %04llx:%016llx EFLAGS: %08llx\n", regs->ss, regs->rsp, + regs->rflags); + printk("RAX: %016llx RBX: %016llx RCX: %...
2020 Jun 19
0
[PATCH 08/16] nouveau/hmm: fault one page at a time
...dow into a single update. */ start = buffer->fault[fi]->addr; - limit = start + (ARRAY_SIZE(args.phys) << PAGE_SHIFT); + limit = start + PAGE_SIZE; if (start < svmm->unmanaged.limit) limit = min_t(u64, limit, svmm->unmanaged.start); - SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit); - mm = svmm->notifier.mm; - if (!mmget_not_zero(mm)) { - nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]); - continue; - } - - /* Intersect fault window with the CPU VMA, cancelling - * the fault if the address is invalid. + /* + * Prepare th...
2020 Jul 01
0
[PATCH v3 1/5] nouveau/hmm: fault one page at a time
...dow into a single update. */ start = buffer->fault[fi]->addr; - limit = start + (ARRAY_SIZE(args.phys) << PAGE_SHIFT); + limit = start + PAGE_SIZE; if (start < svmm->unmanaged.limit) limit = min_t(u64, limit, svmm->unmanaged.start); - SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit); - mm = svmm->notifier.mm; - if (!mmget_not_zero(mm)) { - nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]); - continue; - } - - /* Intersect fault window with the CPU VMA, cancelling - * the fault if the address is invalid. + /* + * Prepare th...
2019 Oct 15
0
[PATCH hmm 11/15] nouveau: use mmu_range_notifier instead of hmm_mirror
...= 0; for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) { + struct svm_notifier notifier; struct mm_struct *mm; /* Cancel any faults from non-SVM channels. */ @@ -623,7 +648,6 @@ nouveau_svm_fault(struct nvif_notify *notify) continue; } SVMM_DBG(svmm, "addr %016llx", buffer->fault[fi]->addr); - mm = svmm->notifier.mm; /* We try and group handling of faults within a small * window into a single update. @@ -637,6 +661,12 @@ nouveau_svm_fault(struct nvif_notify *notify) start = max_t(u64, start, svmm->unmanaged.limit); SVMM_DBG(...
2020 Jan 13
0
[PATCH v6 5/6] nouveau: use new mmu interval notifiers
...", (s), ##a) #define SVMM_ERR(s,f,a...) \ @@ -236,6 +243,8 @@ nouveau_svmm_join(struct nouveau_svmm *svmm, u64 inst) static void nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit) { + SVMM_DBG(svmm, "invalidate %016llx-%016llx", start, limit); + if (limit > start) { bool super = svmm->vmm->vmm.object.client->super; svmm->vmm->vmm.object.client->super = true; @@ -248,58 +257,25 @@ nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit) } } -static int -nouvea...
2017 Nov 28
2
[PATCH] drm/nouveau/mmu: fix odd_ptr_err.cocci warnings (fwd)
...ons(+), 2 deletions(-) --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c @@ -107,8 +107,9 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvm return ret; if (IS_ERR((memory = nvkm_umem_search(client, handle)))) { - VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory)); - return PTR_ERR(memory); + VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, + PTR_ERR((memory = nvkm_umem_search(client, handle)))); + return PTR_ERR((memory = nvkm_umem_search(client, handle))); } mutex_lock(&vmm->mutex);
2008 Mar 14
4
[PATCH] vmx: fix debugctl handling
...-02-18 10:07:45.000000000 +0100 +++ 2008-03-05/xen/arch/x86/hvm/vmx/vmcs.c 2008-03-14 14:08:11.000000000 +0100 @@ -870,7 +870,7 @@ void vmcs_dump_vcpu(struct vcpu *v) x = (unsigned long long)vmr(TSC_OFFSET_HIGH) << 32; x |= (uint32_t)vmr(TSC_OFFSET); printk("TSC Offset = %016llx\n", x); - x = (unsigned long long)vmr(GUEST_IA32_DEBUGCTL) << 32; + x = (unsigned long long)vmr(GUEST_IA32_DEBUGCTL_HIGH) << 32; x |= (uint32_t)vmr(GUEST_IA32_DEBUGCTL); printk("DebugCtl=%016llx DebugExceptions=%016llx\n", x, (unsigned long l...
2019 Oct 15
0
[PATCH hmm 10/15] nouveau: use mmu_notifier directly for invalidate_range_start
...fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) { + struct mm_struct *mm; + /* Cancel any faults from non-SVM channels. */ if (!(svmm = buffer->fault[fi]->svmm)) { nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]); continue; } SVMM_DBG(svmm, "addr %016llx", buffer->fault[fi]->addr); + mm = svmm->notifier.mm; /* We try and group handling of faults within a small * window into a single update. @@ -609,11 +640,11 @@ nouveau_svm_fault(struct nvif_notify *notify) /* Intersect fault window with the CPU VMA, cancelling * the...
2019 Sep 17
1
[PATCH 1/6] drm/nouveau: fault: Store aperture in fault information
...nouveau/nvkm/engine/fifo/gk104.c > @@ -519,9 +519,10 @@ gk104_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info) > chan = nvkm_fifo_chan_inst_locked(&fifo->base, info->inst); > > nvkm_error(subdev, > - "fault %02x [%s] at %016llx engine %02x [%s] client %02x " > + "fault %02x [%s] at %016llx aperture %02x engine %02x [%s] client %02x " > "[%s%s] reason %02x [%s] on channel %d [%010llx %s]\n", > info->access, ea ? ea->name : "...
2014 Jul 31
0
[PATCH 09/19] drm/radeon: handle lockup in delayed work, v2
...v[0]; + + if (__radeon_fence_process(rdev, iring)) + wake_up_all(&rdev->fence_queue); + else if (radeon_ring_is_lockup(rdev, iring, &rdev->ring[iring])) { + /* good news we believe it's a lockup */ + dev_warn(rdev->dev, "GPU lockup (current fence id " + "0x%016llx last fence id 0x%016llx on ring %ld)\n", + (uint64_t)atomic64_read(&fence_drv->last_seq), + fence_drv->sync_seq[iring], iring); + + /* remember that we need an reset */ + rdev->needs_reset = true; + wake_up_all(&rdev->fence_queue); + } +} + +/** + * radeon_fence_pr...
2020 Jun 30
6
[PATCH v2 0/5] mm/hmm/nouveau: add PMD system memory mapping
The goal for this series is to introduce the hmm_range_fault() output array flags HMM_PFN_PMD and HMM_PFN_PUD. This allows a device driver to know that a given 4K PFN is actually mapped by the CPU using either a PMD sized or PUD sized CPU page table entry and therefore the device driver can safely map system memory using larger device MMU PTEs. The series is based on 5.8.0-rc3 and is intended for
2019 Nov 08
1
[PATCH] RFC: drm/nouveau: Make BAR1 support optional
...quot;> %s(object=%px, argv=%px, argc=%u, type=%px, addr=%px, size=%px)\n", __func__, object, argv, argc, type, addr, size); + + *type = NVKM_OBJECT_MAP_VA; + *addr = (u64)nvkm_kmap(chan->mem); + *size = chan->size; + + pr_info(" type: %d\n", *type); + pr_info(" addr: %016llx\n", *addr); + pr_info(" size: %016llx\n", *size); + pr_info("< %s()\n", __func__); + return 0; +} + +static int +nvkm_fifo_chan_mem_unmap(struct nvkm_object *object) +{ + struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object); + + pr_info("> %s(object=%px)\n"...
2014 Aug 01
2
[PATCH 09/19] drm/radeon: handle lockup in delayed work, v2
...nce_process(rdev, iring)) > + wake_up_all(&rdev->fence_queue); > + else if (radeon_ring_is_lockup(rdev, iring, &rdev->ring[iring])) { > + /* good news we believe it's a lockup */ > + dev_warn(rdev->dev, "GPU lockup (current fence id " > + "0x%016llx last fence id 0x%016llx on ring %ld)\n", > + (uint64_t)atomic64_read(&fence_drv->last_seq), > + fence_drv->sync_seq[iring], iring); > + > + /* remember that we need an reset */ > + rdev->needs_reset = true; > + wake_up_all(&rdev->fence_queue); &gt...
2020 Jul 01
8
[PATCH v3 0/5] mm/hmm/nouveau: add PMD system memory mapping
The goal for this series is to introduce the hmm_pfn_to_map_order() function. This allows a device driver to know that a given 4K PFN is actually mapped by the CPU using a larger sized CPU page table entry and therefore the device driver can safely map system memory using larger device MMU PTEs. The series is based on 5.8.0-rc3 and is intended for Jason Gunthorpe's hmm tree. These were
2017 Nov 30
1
[PATCH] drm/nouveau/mmu: fix odd_ptr_err.cocci warnings
...eau/nvkm/subdev/mmu/uvmm.c @@ -106,7 +106,8 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvmm, void *argv, u32 argc) } else return ret; - if (IS_ERR((memory = nvkm_umem_search(client, handle)))) { + memory = nvkm_umem_search(client, handle); + if (IS_ERR(memory)) { VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory)); return PTR_ERR(memory); } -- 2.13.6
2017 Nov 28
0
[PATCH] drm/nouveau/mmu: fix odd_ptr_err.cocci warnings (fwd)
...bdev/mmu/uvmm.c > +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/uvmm.c > @@ -107,8 +107,9 @@ nvkm_uvmm_mthd_map(struct nvkm_uvmm *uvm > return ret; > > if (IS_ERR((memory = nvkm_umem_search(client, handle)))) { > - VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, PTR_ERR(memory)); > - return PTR_ERR(memory); > + VMM_DEBUG(vmm, "memory %016llx %ld\n", handle, > + PTR_ERR((memory = nvkm_umem_search(client, handle)))); > + return PTR_ERR((memory = nvkm_...
2019 Sep 16
0
[PATCH 1/6] drm/nouveau: fault: Store aperture in fault information
.../nvkm/engine/fifo/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c @@ -519,9 +519,10 @@ gk104_fifo_fault(struct nvkm_fifo *base, struct nvkm_fault_data *info) chan = nvkm_fifo_chan_inst_locked(&fifo->base, info->inst); nvkm_error(subdev, - "fault %02x [%s] at %016llx engine %02x [%s] client %02x " + "fault %02x [%s] at %016llx aperture %02x engine %02x [%s] client %02x " "[%s%s] reason %02x [%s] on channel %d [%010llx %s]\n", info->access, ea ? ea->name : "", info->addr, + info->aperture,...