Displaying 20 results from an estimated 47 matches for "end_addr".
2020 Aug 05
1
[PATCH v3 35/38] virtio_mem: convert to LE accessors
...drivers/virtio/virtio_mem.c b/drivers/virtio/virtio_mem.c
index f26f5f64ae82..c08512fcea90 100644
--- a/drivers/virtio/virtio_mem.c
+++ b/drivers/virtio/virtio_mem.c
@@ -1530,21 +1530,21 @@ static void virtio_mem_refresh_config(struct virtio_mem *vm)
uint64_t new_plugged_size, usable_region_size, end_addr;
/* the plugged_size is just a reflection of what _we_ did previously */
- virtio_cread(vm->vdev, struct virtio_mem_config, plugged_size,
- &new_plugged_size);
+ virtio_cread_le(vm->vdev, struct virtio_mem_config, plugged_size,
+ &new_plugged_size);
if (WARN_ON_ONCE(new_...
2016 Nov 02
0
[PATCH v3 05/15] secboot: remove fixup_hs_desc hook
...cation of LS blob is
+ */
+struct hsflcn_acr_desc {
+ union {
+ u8 reserved_dmem[0x200];
+ u32 signatures[4];
+ } ucode_reserved_space;
+ u32 wpr_region_id;
+ u32 wpr_offset;
+ u32 mmu_mem_range;
+#define FLCN_ACR_MAX_REGIONS 2
+ struct {
+ u32 no_regions;
+ struct {
+ u32 start_addr;
+ u32 end_addr;
+ u32 region_id;
+ u32 read_mask;
+ u32 write_mask;
+ u32 client_mask;
+ } region_props[FLCN_ACR_MAX_REGIONS];
+ } regions;
+ u32 ucode_blob_size;
+ u64 ucode_blob_base __aligned(8);
+ struct {
+ u32 vpr_enabled;
+ u32 vpr_start;
+ u32 vpr_end;
+ u32 hdcp_policies;
+ } vpr_desc;
+};
+...
2019 Oct 04
0
[PATCH 07/11] vhost: convert vhost_umem_interval_tree to half closed intervals
..._virtqueue *vq,
>> {
>> const struct vhost_umem_node *node;
>> struct vhost_umem *umem = vq->iotlb;
>> - u64 s = 0, size, orig_addr = addr, last = addr + len - 1;
>> + u64 s = 0, size, orig_addr = addr, last = addr + len;
>
>maybe "end" or "end_addr" instead of "last".
>
>> diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
>> index e9ed2722b633..bb36cb9ed5ec 100644
>> --- a/drivers/vhost/vhost.h
>> +++ b/drivers/vhost/vhost.h
>> @@ -53,13 +53,13 @@ struct vhost_log {
>> };
>>...
2020 Mar 02
0
[PATCH v1 02/11] virtio-mem: Paravirtualized memory hotplug
...id, VIRTIO_MEM_MB_STATE_UNUSED);
+ }
+
+ return 0;
+}
+
+/*
+ * Update all parts of the config that could have changed.
+ */
+static void virtio_mem_refresh_config(struct virtio_mem *vm)
+{
+ const uint64_t phys_limit = 1UL << MAX_PHYSMEM_BITS;
+ uint64_t new_plugged_size, usable_region_size, end_addr;
+
+ /* the plugged_size is just a reflection of what _we_ did previously */
+ virtio_cread(vm->vdev, struct virtio_mem_config, plugged_size,
+ &new_plugged_size);
+ if (WARN_ON_ONCE(new_plugged_size != vm->plugged_size))
+ vm->plugged_size = new_plugged_size;
+
+ /* calculate t...
2019 May 10
3
[PATCH 05/10] s390/cio: introduce DMA pools to cio
On Fri, 10 May 2019 00:11:12 +0200
Halil Pasic <pasic at linux.ibm.com> wrote:
> On Thu, 9 May 2019 12:11:06 +0200
> Cornelia Huck <cohuck at redhat.com> wrote:
>
> > On Wed, 8 May 2019 23:22:10 +0200
> > Halil Pasic <pasic at linux.ibm.com> wrote:
> >
> > > On Wed, 8 May 2019 15:18:10 +0200 (CEST)
> > > Sebastian Ott <sebott
2019 May 10
3
[PATCH 05/10] s390/cio: introduce DMA pools to cio
On Fri, 10 May 2019 00:11:12 +0200
Halil Pasic <pasic at linux.ibm.com> wrote:
> On Thu, 9 May 2019 12:11:06 +0200
> Cornelia Huck <cohuck at redhat.com> wrote:
>
> > On Wed, 8 May 2019 23:22:10 +0200
> > Halil Pasic <pasic at linux.ibm.com> wrote:
> >
> > > On Wed, 8 May 2019 15:18:10 +0200 (CEST)
> > > Sebastian Ott <sebott
2019 May 13
2
[PATCH 05/10] s390/cio: introduce DMA pools to cio
...truct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages)
> static void __gp_dma_free_dma(struct gen_pool *pool,
> struct gen_pool_chunk *chunk, void *data)
> {
> - dma_free_coherent((struct device *) data, PAGE_SIZE,
> +
> + size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;
> +
> + dma_free_coherent((struct device *) data, chunk_size,
> (void *) chunk->start_addr,
> (dma_addr_t) chunk->phys_addr);
> }
> @@ -1088,13 +1091,15 @@ void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
> {...
2019 May 13
2
[PATCH 05/10] s390/cio: introduce DMA pools to cio
...truct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages)
> static void __gp_dma_free_dma(struct gen_pool *pool,
> struct gen_pool_chunk *chunk, void *data)
> {
> - dma_free_coherent((struct device *) data, PAGE_SIZE,
> +
> + size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;
> +
> + dma_free_coherent((struct device *) data, chunk_size,
> (void *) chunk->start_addr,
> (dma_addr_t) chunk->phys_addr);
> }
> @@ -1088,13 +1091,15 @@ void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
> {...
2019 May 23
0
[PATCH v2 2/8] s390/cio: introduce DMA pools to cio
...A_GFP);
+ if (!cpu_addr)
+ return gp_dma;
+ gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr,
+ dma_addr, PAGE_SIZE, -1);
+ }
+ return gp_dma;
+}
+
+static void __gp_dma_free_dma(struct gen_pool *pool,
+ struct gen_pool_chunk *chunk, void *data)
+{
+ size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;
+
+ dma_free_coherent((struct device *) data, chunk_size,
+ (void *) chunk->start_addr,
+ (dma_addr_t) chunk->phys_addr);
+}
+
+void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev)
+{
+ if (!gp_dma)
+ return;
+ /* this is qite ugly but no...
2019 May 29
0
[PATCH v3 2/8] s390/cio: introduce DMA pools to cio
...A_GFP);
+ if (!cpu_addr)
+ return gp_dma;
+ gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr,
+ dma_addr, PAGE_SIZE, -1);
+ }
+ return gp_dma;
+}
+
+static void __gp_dma_free_dma(struct gen_pool *pool,
+ struct gen_pool_chunk *chunk, void *data)
+{
+ size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;
+
+ dma_free_coherent((struct device *) data, chunk_size,
+ (void *) chunk->start_addr,
+ (dma_addr_t) chunk->phys_addr);
+}
+
+void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev)
+{
+ if (!gp_dma)
+ return;
+ /* this is qite ugly but no...
2019 May 12
0
[PATCH 05/10] s390/cio: introduce DMA pools to cio
...css.c
@@ -1063,7 +1063,10 @@ struct gen_pool *cio_gp_dma_create(struct device *dma_dev, int nr_pages)
static void __gp_dma_free_dma(struct gen_pool *pool,
struct gen_pool_chunk *chunk, void *data)
{
- dma_free_coherent((struct device *) data, PAGE_SIZE,
+
+ size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;
+
+ dma_free_coherent((struct device *) data, chunk_size,
(void *) chunk->start_addr,
(dma_addr_t) chunk->phys_addr);
}
@@ -1088,13 +1091,15 @@ void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
{
dma_addr_t dma_addr;
unsigned...
2019 Jun 06
0
[PATCH v4 2/8] s390/cio: introduce DMA pools to cio
...A_GFP);
+ if (!cpu_addr)
+ return gp_dma;
+ gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr,
+ dma_addr, PAGE_SIZE, -1);
+ }
+ return gp_dma;
+}
+
+static void __gp_dma_free_dma(struct gen_pool *pool,
+ struct gen_pool_chunk *chunk, void *data)
+{
+ size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;
+
+ dma_free_coherent((struct device *) data, chunk_size,
+ (void *) chunk->start_addr,
+ (dma_addr_t) chunk->phys_addr);
+}
+
+void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev)
+{
+ if (!gp_dma)
+ return;
+ /* this is qite ugly but no...
2019 Jun 12
0
[PATCH v5 2/8] s390/cio: introduce DMA pools to cio
...A_GFP);
+ if (!cpu_addr)
+ return gp_dma;
+ gen_pool_add_virt(gp_dma, (unsigned long) cpu_addr,
+ dma_addr, PAGE_SIZE, -1);
+ }
+ return gp_dma;
+}
+
+static void __gp_dma_free_dma(struct gen_pool *pool,
+ struct gen_pool_chunk *chunk, void *data)
+{
+ size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;
+
+ dma_free_coherent((struct device *) data, chunk_size,
+ (void *) chunk->start_addr,
+ (dma_addr_t) chunk->phys_addr);
+}
+
+void cio_gp_dma_destroy(struct gen_pool *gp_dma, struct device *dma_dev)
+{
+ if (!gp_dma)
+ return;
+ /* this is quite ugly but n...
2012 Nov 20
12
[PATCH v2 00/11] xen: Initial kexec/kdump implementation
Hi,
This set of patches contains initial kexec/kdump implementation for Xen v2
(previous version were posted to few people by mistake; sorry for that).
Currently only dom0 is supported, however, almost all infrustructure
required for domU support is ready.
Jan Beulich suggested to merge Xen x86 assembler code with baremetal x86 code.
This could simplify and reduce a bit size of kernel code.
2012 Nov 20
12
[PATCH v2 00/11] xen: Initial kexec/kdump implementation
Hi,
This set of patches contains initial kexec/kdump implementation for Xen v2
(previous version were posted to few people by mistake; sorry for that).
Currently only dom0 is supported, however, almost all infrustructure
required for domU support is ready.
Jan Beulich suggested to merge Xen x86 assembler code with baremetal x86 code.
This could simplify and reduce a bit size of kernel code.
2012 Nov 20
12
[PATCH v2 00/11] xen: Initial kexec/kdump implementation
Hi,
This set of patches contains initial kexec/kdump implementation for Xen v2
(previous version were posted to few people by mistake; sorry for that).
Currently only dom0 is supported, however, almost all infrustructure
required for domU support is ready.
Jan Beulich suggested to merge Xen x86 assembler code with baremetal x86 code.
This could simplify and reduce a bit size of kernel code.
2019 May 15
0
[PATCH 05/10] s390/cio: introduce DMA pools to cio
...ate(struct device *dma_dev, int nr_pages)
> > static void __gp_dma_free_dma(struct gen_pool *pool,
> > struct gen_pool_chunk *chunk, void *data)
> > {
> > - dma_free_coherent((struct device *) data, PAGE_SIZE,
> > +
> > + size_t chunk_size = chunk->end_addr - chunk->start_addr + 1;
> > +
> > + dma_free_coherent((struct device *) data, chunk_size,
> > (void *) chunk->start_addr,
> > (dma_addr_t) chunk->phys_addr);
> > }
> > @@ -1088,13 +1091,15 @@ void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, s...
2011 Sep 23
0
[xen-unstable test] 9061: regressions - FAIL
...t;lasse.collin@tukaani.org>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
changeset: 23868:28147fd781af
user: Jan Beulich <jbeulich@suse.com>
date: Thu Sep 22 18:32:34 2011 +0100
VT-d: fix off-by-one error in RMRR validation
(base_addr,end_addr) is an inclusive range, and hence there shouldn''t
be a subtraction of 1 in the second invocation of page_is_ram_type().
For RMRRs covering a single page that actually resulted in the
immediately preceding page to get checked (which could have resulted
in a false warning)....
2016 Dec 14
18
[PATCH v5 0/18] Secure Boot refactoring
Sending things in a smaller chunks since it makes their reviewing
easier.
This part part 2/3 of the secboot refactoring/PMU command support
patch series. Part 1 was the new falcon library which should be
merged soon now.
This series is mainly a refactoring/sanitization of the existing
secure boot code. It does not add new features (part 3 will).
Secure boot handling is now separated by NVIDIA
2003 Aug 22
3
PAE removal patch for testing
...ap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
+pmap_change_wiring(pmap, va, wired)
+ register pmap_t pmap;
+ vm_offset_t va;
+ boolean_t wired;
{
- pt_entry_t *pte;
+ register unsigned *pte;
if (pmap == NULL)
return;
@@ -2405,21 +2602,32 @@
vm_offset_t addr;
vm_offset_t end_addr = src_addr + len;
vm_offset_t pdnxt;
- vm_paddr_t src_frame;
+ unsigned src_frame, dst_frame;
vm_page_t m;
if (dst_addr != src_addr)
return;
- src_frame = src_pmap->pm_pdir[PTDPTDI] & PG_FRAME;
- if (src_frame != (PTDpde[0] & PG_FRAME)) {
+ src_frame = ((unsigned) src_pmap-&...