search for: last_pfn

Displaying 20 results from an estimated 219 matches for "last_pfn".

2012 Jul 04
3
[PATCH] xen: populate correct number of pages when across mem boundary
..._pfn = PFN_UP(entry->addr); /* If the E820 falls within the nr_pages, we want to start * at the nr_pages PFN. * If that would mean going past the E820 entry, skip it */ +again: if (s_pfn <= max_pfn) { capacity = e_pfn - max_pfn; dest_pfn = max_pfn; } else { - /* last_pfn MUST be within E820_RAM regions */ - if (*last_pfn && e_pfn >= *last_pfn) - s_pfn = *last_pfn; capacity = e_pfn - s_pfn; dest_pfn = s_pfn; } - /* If we had filled this E820_RAM entry, go to the next one. */ - if (capacity <= 0) - continue; - if (credits > cap...
2012 Jul 04
3
[PATCH] xen: populate correct number of pages when across mem boundary
..._pfn = PFN_UP(entry->addr); /* If the E820 falls within the nr_pages, we want to start * at the nr_pages PFN. * If that would mean going past the E820 entry, skip it */ +again: if (s_pfn <= max_pfn) { capacity = e_pfn - max_pfn; dest_pfn = max_pfn; } else { - /* last_pfn MUST be within E820_RAM regions */ - if (*last_pfn && e_pfn >= *last_pfn) - s_pfn = *last_pfn; capacity = e_pfn - s_pfn; dest_pfn = s_pfn; } - /* If we had filled this E820_RAM entry, go to the next one. */ - if (capacity <= 0) - continue; - if (credits > cap...
2020 Nov 03
0
[PATCH 1/2] Revert "vhost-vdpa: fix page pinning leakage in error path"
...host_dev *dev = &v->vdev; > struct vhost_iotlb *iotlb = dev->iotlb; > struct page **page_list; > - struct vm_area_struct **vmas; > + unsigned long list_size = PAGE_SIZE / sizeof(struct page *); > unsigned int gup_flags = FOLL_LONGTERM; > - unsigned long map_pfn, last_pfn = 0; > - unsigned long npages, lock_limit; > - unsigned long i, nmap = 0; > + unsigned long npages, cur_base, map_pfn, last_pfn = 0; > + unsigned long locked, lock_limit, pinned, i; > u64 iova = msg->iova; > - long pinned; > int ret = 0; > > if (vhost_iotlb...
2020 Oct 01
0
[PATCH] vhost-vdpa: fix page pinning leakage in error path
...*v, struct vhost_dev *dev = &v->vdev; struct vhost_iotlb *iotlb = dev->iotlb; struct page **page_list; - unsigned long list_size = PAGE_SIZE / sizeof(struct page *); + struct vm_area_struct **vmas; unsigned int gup_flags = FOLL_LONGTERM; - unsigned long npages, cur_base, map_pfn, last_pfn = 0; - unsigned long locked, lock_limit, pinned, i; + unsigned long map_pfn, last_pfn = 0; + unsigned long npages, lock_limit; + unsigned long i, nmap = 0; u64 iova = msg->iova; + long pinned; int ret = 0; if (vhost_iotlb_itree_first(iotlb, msg->iova, msg->iova + msg->s...
2020 Oct 01
0
[PATCH v2] vhost-vdpa: fix page pinning leakage in error path
...*v, struct vhost_dev *dev = &v->vdev; struct vhost_iotlb *iotlb = dev->iotlb; struct page **page_list; - unsigned long list_size = PAGE_SIZE / sizeof(struct page *); + struct vm_area_struct **vmas; unsigned int gup_flags = FOLL_LONGTERM; - unsigned long npages, cur_base, map_pfn, last_pfn = 0; - unsigned long locked, lock_limit, pinned, i; + unsigned long map_pfn, last_pfn = 0; + unsigned long npages, lock_limit; + unsigned long i, nmap = 0; u64 iova = msg->iova; + long pinned; int ret = 0; if (vhost_iotlb_itree_first(iotlb, msg->iova, msg->iova + msg->s...
2020 Aug 18
3
[PATCH V2 1/2] Add new flush_iotlb_range and handle freelists when using iommu_unmap_fast
...u/intel/iommu.c @@ -1160,17 +1160,17 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level, pages can only be freed after the IOTLB flush has been done. */ static struct page *domain_unmap(struct dmar_domain *domain, unsigned long start_pfn, - unsigned long last_pfn) + unsigned long last_pfn, + struct page *freelist) { - struct page *freelist; - BUG_ON(!domain_pfn_supported(domain, start_pfn)); BUG_ON(!domain_pfn_supported(domain, last_pfn)); BUG_ON(start_pfn > last_pfn); /* we don't need lock here; nobody else touches the iova range...
2020 Aug 18
3
[PATCH V2 1/2] Add new flush_iotlb_range and handle freelists when using iommu_unmap_fast
...u/intel/iommu.c @@ -1160,17 +1160,17 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level, pages can only be freed after the IOTLB flush has been done. */ static struct page *domain_unmap(struct dmar_domain *domain, unsigned long start_pfn, - unsigned long last_pfn) + unsigned long last_pfn, + struct page *freelist) { - struct page *freelist; - BUG_ON(!domain_pfn_supported(domain, start_pfn)); BUG_ON(!domain_pfn_supported(domain, last_pfn)); BUG_ON(start_pfn > last_pfn); /* we don't need lock here; nobody else touches the iova range...
2013 Jun 24
3
[konrad.wilk@oracle.com: [PATCH] drm/i915: make compact dma scatter lists creation work with SWIOTLB backend.]
...gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD; gfp &= ~(__GFP_IO | __GFP_WAIT); } - +#ifdef CONFIG_SWIOTLB + if (swiotlb_nr_tbl()) { + st->nents++; + sg_set_page(sg, page, PAGE_SIZE, 0); + sg = sg_next(sg); + continue; + } +#endif if (!i || page_to_pfn(page) != last_pfn + 1) { if (i) sg = sg_next(sg); @@ -1812,8 +1819,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) } last_pfn = page_to_pfn(page); } - - sg_mark_end(sg); +#ifdef CONFIG_SWIOTLB + if (!swiotlb_nr_tbl()) +#endif + sg_mark_end(sg); obj->pages = st; if (i91...
2020 Aug 17
1
[PATCH 1/2] Add new flush_iotlb_range and handle freelists when using iommu_unmap_fast
...u/intel/iommu.c @@ -1160,17 +1160,17 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level, pages can only be freed after the IOTLB flush has been done. */ static struct page *domain_unmap(struct dmar_domain *domain, unsigned long start_pfn, - unsigned long last_pfn) + unsigned long last_pfn, + struct page *freelist) { - struct page *freelist; - BUG_ON(!domain_pfn_supported(domain, start_pfn)); BUG_ON(!domain_pfn_supported(domain, last_pfn)); BUG_ON(start_pfn > last_pfn); /* we don't need lock here; nobody else touches the iova range...
2020 Aug 18
0
[PATCH V2 1/2] Add new flush_iotlb_range and handle freelists when using iommu_unmap_fast
...0,17 +1160,17 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level, > pages can only be freed after the IOTLB flush has been done. */ > static struct page *domain_unmap(struct dmar_domain *domain, > unsigned long start_pfn, > - unsigned long last_pfn) > + unsigned long last_pfn, > + struct page *freelist) > { > - struct page *freelist; > - > BUG_ON(!domain_pfn_supported(domain, start_pfn)); > BUG_ON(!domain_pfn_supported(domain, last_pfn)); > BUG_ON(start_pfn > last_pfn); > > /* we don...
2019 Dec 21
0
[PATCH 4/8] iommu: Handle freelists when using deferred flushing in iommu drivers
...mmu/intel-iommu.c @@ -1145,9 +1145,9 @@ static struct page *dma_pte_clear_level(struct dmar_domain *domain, int level, pages can only be freed after the IOTLB flush has been done. */ static struct page *domain_unmap(struct dmar_domain *domain, unsigned long start_pfn, - unsigned long last_pfn) + unsigned long last_pfn, + struct page *freelist) { - struct page *freelist; BUG_ON(!domain_pfn_supported(domain, start_pfn)); BUG_ON(!domain_pfn_supported(domain, last_pfn)); @@ -1155,7 +1155,8 @@ static struct page *domain_unmap(struct dmar_domain *domain, /* we don't nee...
2012 Jul 18
0
[Xen-devel] [PATCH -v2] xen: populate correct number of pages when across mem boundary
...ddr); /* If the E820 falls within the nr_pages, we want to start * at the nr_pages PFN. * If that would mean going past the E820 entry, skip it @@ -184,23 +183,19 @@ static unsigned long __init xen_populate_chunk( capacity = e_pfn - max_pfn; dest_pfn = max_pfn; } else { - /* last_pfn MUST be within E820_RAM regions */ - if (*last_pfn && e_pfn >= *last_pfn) - s_pfn = *last_pfn; capacity = e_pfn - s_pfn; dest_pfn = s_pfn; } - /* If we had filled this E820_RAM entry, go to the next one. */ - if (capacity <= 0) - continue; - if (credits > cap...
2012 Jul 18
0
[Xen-devel] [PATCH -v2] xen: populate correct number of pages when across mem boundary
...ddr); /* If the E820 falls within the nr_pages, we want to start * at the nr_pages PFN. * If that would mean going past the E820 entry, skip it @@ -184,23 +183,19 @@ static unsigned long __init xen_populate_chunk( capacity = e_pfn - max_pfn; dest_pfn = max_pfn; } else { - /* last_pfn MUST be within E820_RAM regions */ - if (*last_pfn && e_pfn >= *last_pfn) - s_pfn = *last_pfn; capacity = e_pfn - s_pfn; dest_pfn = s_pfn; } - /* If we had filled this E820_RAM entry, go to the next one. */ - if (capacity <= 0) - continue; - if (credits > cap...
2013 Jun 17
1
Kernel panic in pin_pagetable_pfn
...00a0000 (usable) [ 0.000000] Xen: 00000000000a0000 - 0000000000100000 (reserved) [ 0.000000] Xen: 0000000000100000 - 0000000008cc2000 (usable) [ 0.000000] Xen: 0000000008cc2000 - 0000000009bc5000 (reserved) [ 0.000000] Xen: 0000000009bc5000 - 00000001e0000000 (usable) [ 0.000000] last_pfn = 0x1e0000 max_arch_pfn = 0x3ffffffff [ 0.000000] last_pfn = 0x100000 max_arch_pfn = 0x3ffffffff [ 0.000000] init_memory_mapping [ 0.000000] last_map_addr: 100000000 end: 100000000 [ 0.000000] init_memory_mapping [ 0.000000] last_map_addr: 1e0000000 end: 1e0000000 [ 0.000000] RAMD...
2016 Nov 04
2
[PATCH kernel v4 7/7] virtio-balloon: tell host vm's unused page info
Please squish this and patch 5 together. It makes no sense to separate them. > +static void send_unused_pages_info(struct virtio_balloon *vb, > + unsigned long req_id) > +{ > + struct scatterlist sg_in; > + unsigned long pfn = 0, bmap_len, pfn_limit, last_pfn, nr_pfn; > + struct virtqueue *vq = vb->req_vq; > + struct virtio_balloon_resp_hdr *hdr = vb->resp_hdr; > + int ret = 1, used_nr_bmap = 0, i; > + > + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_PAGE_BITMAP) && > + vb->nr_page_bmap == 1) > + extend_pa...
2016 Nov 04
2
[PATCH kernel v4 7/7] virtio-balloon: tell host vm's unused page info
Please squish this and patch 5 together. It makes no sense to separate them. > +static void send_unused_pages_info(struct virtio_balloon *vb, > + unsigned long req_id) > +{ > + struct scatterlist sg_in; > + unsigned long pfn = 0, bmap_len, pfn_limit, last_pfn, nr_pfn; > + struct virtqueue *vq = vb->req_vq; > + struct virtio_balloon_resp_hdr *hdr = vb->resp_hdr; > + int ret = 1, used_nr_bmap = 0, i; > + > + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_PAGE_BITMAP) && > + vb->nr_page_bmap == 1) > + extend_pa...
2016 Nov 02
0
[PATCH kernel v4 7/7] virtio-balloon: tell host vm's unused page info
...t;pages list. @@ -552,6 +554,63 @@ static void update_balloon_stats(struct virtio_balloon *vb) pages_to_bytes(available)); } +static void send_unused_pages_info(struct virtio_balloon *vb, + unsigned long req_id) +{ + struct scatterlist sg_in; + unsigned long pfn = 0, bmap_len, pfn_limit, last_pfn, nr_pfn; + struct virtqueue *vq = vb->req_vq; + struct virtio_balloon_resp_hdr *hdr = vb->resp_hdr; + int ret = 1, used_nr_bmap = 0, i; + + if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_PAGE_BITMAP) && + vb->nr_page_bmap == 1) + extend_page_bitmap(vb); + + pfn_limit = PFNS...
2007 Jan 18
13
[PATCH 0/5] dump-core take 2:
The following dump-core patches changes its format into ELF, adds PFN-GMFN table, HVM support, and adds experimental IA64 support. - ELF format Program header and note section are adopted. - HVM domain support To know the memory area to dump, XENMEM_set_memory_map is added. XENMEM_memory_map hypercall is for current domain, so new one is created. and hvm domain builder tell xen its
2016 Nov 07
0
[PATCH kernel v4 7/7] virtio-balloon: tell host vm's unused page info
...squish this and patch 5 together. It makes no sense to separate them. > OK. > > +static void send_unused_pages_info(struct virtio_balloon *vb, > > + unsigned long req_id) > > +{ > > + struct scatterlist sg_in; > > + unsigned long pfn = 0, bmap_len, pfn_limit, last_pfn, nr_pfn; > > + struct virtqueue *vq = vb->req_vq; > > + struct virtio_balloon_resp_hdr *hdr = vb->resp_hdr; > > + int ret = 1, used_nr_bmap = 0, i; > > + > > + if (virtio_has_feature(vb->vdev, > VIRTIO_BALLOON_F_PAGE_BITMAP) && > > + vb->n...
2013 Dec 14
1
Can''t boot converted SLES11SP2/OES11 domU
...4 UTC 2013 (36fae46) [ 0.000000] Command line: root=/dev/xvda2 resume=/dev/xvda1 splash=silent showopts vga=0x317 [ 0.000000] Xen-provided physical RAM map: [ 0.000000] Xen: 0000000000000000 - 0000000100800000 (usable) [ 0.000000] NX (Execute Disable) protection: active [ 0.000000] last_pfn = 0x100800 max_arch_pfn = 0x80000000 [ 0.000000] last_pfn = 0x100000 max_arch_pfn = 0x80000000 [ 0.000000] init_memory_mapping: 0000000000000000-0000000100000000 [ 0.000000] init_memory_mapping: 0000000100000000-0000000100800000 [ 0.000000] nmi ring buffer: 262144 [ 0.000000] RAMDISK...