Displaying 20 results from an estimated 28 matches for "gen_pool_alloc".
2019 May 27
3
[PATCH v2 2/8] s390/cio: introduce DMA pools to cio
...on't
think that should happen unless things were really bad already?
> +}
> +
> +void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
> + size_t size)
> +{
> + dma_addr_t dma_addr;
> + unsigned long addr;
> + size_t chunk_size;
> +
> + addr = gen_pool_alloc(gp_dma, size);
> + while (!addr) {
> + chunk_size = round_up(size, PAGE_SIZE);
> + addr = (unsigned long) dma_alloc_coherent(dma_dev,
> + chunk_size, &dma_addr, CIO_DMA_GFP);
> + if (!addr)
> + return NULL;
> + gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size...
2019 May 27
3
[PATCH v2 2/8] s390/cio: introduce DMA pools to cio
...on't
think that should happen unless things were really bad already?
> +}
> +
> +void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
> + size_t size)
> +{
> + dma_addr_t dma_addr;
> + unsigned long addr;
> + size_t chunk_size;
> +
> + addr = gen_pool_alloc(gp_dma, size);
> + while (!addr) {
> + chunk_size = round_up(size, PAGE_SIZE);
> + addr = (unsigned long) dma_alloc_coherent(dma_dev,
> + chunk_size, &dma_addr, CIO_DMA_GFP);
> + if (!addr)
> + return NULL;
> + gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size...
2019 May 25
1
[PATCH v2 3/8] s390/cio: add basic protected virtualization support
...GFP_KERNEL | GFP_DMA);
> + if (!cdev->private)
> + goto err_priv;
> + cdev->dev.coherent_dma_mask = sch->dev.coherent_dma_mask;
> + cdev->dev.dma_mask = &cdev->dev.coherent_dma_mask;
> + dma_pool = cio_gp_dma_create(&cdev->dev, 1);
This can return NULL. gen_pool_alloc will panic in this case.
[...]
> +err_dma_area:
> + kfree(io_priv);
Indentation.
> +err_priv:
> + put_device(&sch->dev);
> + return ERR_PTR(-ENOMEM);
> }
[...]
> void ccw_device_update_sense_data(struct ccw_device *cdev)
> {
> memset(&cdev->id, 0, s...
2019 May 27
0
[PATCH v2 2/8] s390/cio: introduce DMA pools to cio
...e simply call panic()?
> > +}
> > +
> > +void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
> > + size_t size)
> > +{
> > + dma_addr_t dma_addr;
> > + unsigned long addr;
> > + size_t chunk_size;
> > +
> > + addr = gen_pool_alloc(gp_dma, size);
> > + while (!addr) {
> > + chunk_size = round_up(size, PAGE_SIZE);
> > + addr = (unsigned long) dma_alloc_coherent(dma_dev,
> > + chunk_size, &dma_addr, CIO_DMA_GFP);
> > + if (!addr)
> > + return NULL;
> > + gen_pool_add_virt...
2019 May 10
3
[PATCH 05/10] s390/cio: introduce DMA pools to cio
On Fri, 10 May 2019 00:11:12 +0200
Halil Pasic <pasic at linux.ibm.com> wrote:
> On Thu, 9 May 2019 12:11:06 +0200
> Cornelia Huck <cohuck at redhat.com> wrote:
>
> > On Wed, 8 May 2019 23:22:10 +0200
> > Halil Pasic <pasic at linux.ibm.com> wrote:
> >
> > > On Wed, 8 May 2019 15:18:10 +0200 (CEST)
> > > Sebastian Ott <sebott
2019 May 10
3
[PATCH 05/10] s390/cio: introduce DMA pools to cio
On Fri, 10 May 2019 00:11:12 +0200
Halil Pasic <pasic at linux.ibm.com> wrote:
> On Thu, 9 May 2019 12:11:06 +0200
> Cornelia Huck <cohuck at redhat.com> wrote:
>
> > On Wed, 8 May 2019 23:22:10 +0200
> > Halil Pasic <pasic at linux.ibm.com> wrote:
> >
> > > On Wed, 8 May 2019 15:18:10 +0200 (CEST)
> > > Sebastian Ott <sebott
2019 May 13
2
[PATCH 05/10] s390/cio: introduce DMA pools to cio
...ent((struct device *) data, chunk_size,
> (void *) chunk->start_addr,
> (dma_addr_t) chunk->phys_addr);
> }
> @@ -1088,13 +1091,15 @@ void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
> {
> dma_addr_t dma_addr;
> unsigned long addr = gen_pool_alloc(gp_dma, size);
> + size_t chunk_size;
>
> if (!addr) {
> + chunk_size = round_up(size, PAGE_SIZE);
Doesn't that mean that we still go up to chunks of at least PAGE_SIZE?
Or can vectors now share the same chunk?
> addr = (unsigned long) dma_alloc_coherent(dma_dev,
> -...
2019 May 13
2
[PATCH 05/10] s390/cio: introduce DMA pools to cio
...ent((struct device *) data, chunk_size,
> (void *) chunk->start_addr,
> (dma_addr_t) chunk->phys_addr);
> }
> @@ -1088,13 +1091,15 @@ void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
> {
> dma_addr_t dma_addr;
> unsigned long addr = gen_pool_alloc(gp_dma, size);
> + size_t chunk_size;
>
> if (!addr) {
> + chunk_size = round_up(size, PAGE_SIZE);
Doesn't that mean that we still go up to chunks of at least PAGE_SIZE?
Or can vectors now share the same chunk?
> addr = (unsigned long) dma_alloc_coherent(dma_dev,
> -...
2019 Apr 09
0
[RFC PATCH 04/12] s390/cio: introduce cio DMA pool
...cio_dma_flags);
> + if (!cpu_addr)
> + return;
> + gen_pool_add_virt(cio_dma_pool, (unsigned long) cpu_addr,
> + dma_addr, PAGE_SIZE, -1);
> + }
> +
> +}
> +
> +void *cio_dma_zalloc(size_t size)
> +{
> + dma_addr_t dma_addr;
> + unsigned long addr = gen_pool_alloc(cio_dma_pool, size);
> +
> + if (!addr) {
> + addr = (unsigned long) dma_alloc_coherent(cio_dma_css,
> + PAGE_SIZE, &dma_addr, cio_dma_flags);
> + if (!addr)
> + return NULL;
> + gen_pool_add_virt(cio_dma_pool, addr, dma_addr, PAGE_SIZE, -1);
> + addr = gen_poo...
2019 May 23
0
[PATCH v2 2/8] s390/cio: introduce DMA pools to cio
...oid)
+{
+ /* No need to free up the resources: compiled in */
+ cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1);
+}
+
+void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
+ size_t size)
+{
+ dma_addr_t dma_addr;
+ unsigned long addr;
+ size_t chunk_size;
+
+ addr = gen_pool_alloc(gp_dma, size);
+ while (!addr) {
+ chunk_size = round_up(size, PAGE_SIZE);
+ addr = (unsigned long) dma_alloc_coherent(dma_dev,
+ chunk_size, &dma_addr, CIO_DMA_GFP);
+ if (!addr)
+ return NULL;
+ gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1);
+ addr = gen_pool_alloc(gp_...
2019 Apr 09
0
[RFC PATCH 04/12] s390/cio: introduce cio DMA pool
..._dma_pool, (unsigned long) cpu_addr,
> > > + dma_addr, PAGE_SIZE, -1);
> > > + }
> > > +
> > > +}
> > > +
> > > +void *cio_dma_zalloc(size_t size)
> > > +{
> > > + dma_addr_t dma_addr;
> > > + unsigned long addr = gen_pool_alloc(cio_dma_pool, size);
> > > +
> > > + if (!addr) {
> > > + addr = (unsigned long) dma_alloc_coherent(cio_dma_css,
> > > + PAGE_SIZE, &dma_addr, cio_dma_flags);
> > > + if (!addr)
> > > + return NULL;
> > > + gen_pool_add_vir...
2019 Apr 26
0
[PATCH 05/10] s390/cio: introduce DMA pools to cio
...void __init cio_dma_pool_init(void)
+{
+ /* No need to free up the resources: compiled in */
+ cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1);
+}
+
+void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
+ size_t size)
+{
+ dma_addr_t dma_addr;
+ unsigned long addr = gen_pool_alloc(gp_dma, size);
+
+ if (!addr) {
+ addr = (unsigned long) dma_alloc_coherent(dma_dev,
+ PAGE_SIZE, &dma_addr, CIO_DMA_GFP);
+ if (!addr)
+ return NULL;
+ gen_pool_add_virt(gp_dma, addr, dma_addr, PAGE_SIZE, -1);
+ addr = gen_pool_alloc(gp_dma, size);
+ }
+ return (void *) addr;
+}
+
+v...
2019 May 29
0
[PATCH v3 2/8] s390/cio: introduce DMA pools to cio
...led in */
+ cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1);
+ if (!cio_dma_pool)
+ return -ENOMEM;
+ return 0;
+}
+
+void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
+ size_t size)
+{
+ dma_addr_t dma_addr;
+ unsigned long addr;
+ size_t chunk_size;
+
+ addr = gen_pool_alloc(gp_dma, size);
+ while (!addr) {
+ chunk_size = round_up(size, PAGE_SIZE);
+ addr = (unsigned long) dma_alloc_coherent(dma_dev,
+ chunk_size, &dma_addr, CIO_DMA_GFP);
+ if (!addr)
+ return NULL;
+ gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1);
+ addr = gen_pool_alloc(gp_...
2019 May 12
0
[PATCH 05/10] s390/cio: introduce DMA pools to cio
...;start_addr + 1;
+
+ dma_free_coherent((struct device *) data, chunk_size,
(void *) chunk->start_addr,
(dma_addr_t) chunk->phys_addr);
}
@@ -1088,13 +1091,15 @@ void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
{
dma_addr_t dma_addr;
unsigned long addr = gen_pool_alloc(gp_dma, size);
+ size_t chunk_size;
if (!addr) {
+ chunk_size = round_up(size, PAGE_SIZE);
addr = (unsigned long) dma_alloc_coherent(dma_dev,
- PAGE_SIZE, &dma_addr, CIO_DMA_GFP);
+ chunk_size, &dma_addr, CIO_DMA_GFP);
if (!addr)
return NULL;
- gen_pool_add_virt(gp_...
2019 Jun 06
0
[PATCH v4 2/8] s390/cio: introduce DMA pools to cio
...led in */
+ cio_dma_pool = cio_gp_dma_create(cio_get_dma_css_dev(), 1);
+ if (!cio_dma_pool)
+ return -ENOMEM;
+ return 0;
+}
+
+void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
+ size_t size)
+{
+ dma_addr_t dma_addr;
+ unsigned long addr;
+ size_t chunk_size;
+
+ addr = gen_pool_alloc(gp_dma, size);
+ while (!addr) {
+ chunk_size = round_up(size, PAGE_SIZE);
+ addr = (unsigned long) dma_alloc_coherent(dma_dev,
+ chunk_size, &dma_addr, CIO_DMA_GFP);
+ if (!addr)
+ return NULL;
+ gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1);
+ addr = gen_pool_alloc(gp_...
2019 Jun 12
0
[PATCH v5 2/8] s390/cio: introduce DMA pools to cio
..._create(cio_get_dma_css_dev(), 1);
+ if (!cio_dma_pool)
+ return -ENOMEM;
+ return 0;
+}
+
+void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
+ size_t size)
+{
+ dma_addr_t dma_addr;
+ unsigned long addr;
+ size_t chunk_size;
+
+ if (!cio_dma_pool)
+ return NULL;
+ addr = gen_pool_alloc(gp_dma, size);
+ while (!addr) {
+ chunk_size = round_up(size, PAGE_SIZE);
+ addr = (unsigned long) dma_alloc_coherent(dma_dev,
+ chunk_size, &dma_addr, CIO_DMA_GFP);
+ if (!addr)
+ return NULL;
+ gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1);
+ addr = gen_pool_alloc(gp_...
2019 May 15
0
[PATCH 05/10] s390/cio: introduce DMA pools to cio
...ize,
> > (void *) chunk->start_addr,
> > (dma_addr_t) chunk->phys_addr);
> > }
> > @@ -1088,13 +1091,15 @@ void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
> > {
> > dma_addr_t dma_addr;
> > unsigned long addr = gen_pool_alloc(gp_dma, size);
> > + size_t chunk_size;
> >
> > if (!addr) {
> > + chunk_size = round_up(size, PAGE_SIZE);
>
> Doesn't that mean that we still go up to chunks of at least PAGE_SIZE?
> Or can vectors now share the same chunk?
Exactly! We put the allocated...
2013 Mar 15
4
[PATCHv3 vringh] caif_virtio: Introduce caif over virtio
...stots in the virtio ring,
+ * re-establish memory reserved and open up tx queues.
+ */
+ if (cfv->vq_tx->num_free <= cfv->watermark_tx)
+ continue;
+
+ /* Re-establish memory reserve */
+ if (cfv->reserved_mem == 0 && cfv->genpool)
+ cfv->reserved_mem =
+ gen_pool_alloc(cfv->genpool,
+ cfv->reserved_size);
+
+ /* Open up the tx queues */
+ if (cfv->reserved_mem) {
+ cfv->watermark_tx =
+ virtqueue_get_vring_size(cfv->vq_tx);
+ netif_tx_wake_all_queues(cfv->ndev);
+ /* Buffers are recycled in cfv_netdev_tx, so
+ * disable...
2013 Mar 15
4
[PATCHv3 vringh] caif_virtio: Introduce caif over virtio
...stots in the virtio ring,
+ * re-establish memory reserved and open up tx queues.
+ */
+ if (cfv->vq_tx->num_free <= cfv->watermark_tx)
+ continue;
+
+ /* Re-establish memory reserve */
+ if (cfv->reserved_mem == 0 && cfv->genpool)
+ cfv->reserved_mem =
+ gen_pool_alloc(cfv->genpool,
+ cfv->reserved_size);
+
+ /* Open up the tx queues */
+ if (cfv->reserved_mem) {
+ cfv->watermark_tx =
+ virtqueue_get_vring_size(cfv->vq_tx);
+ netif_tx_wake_all_queues(cfv->ndev);
+ /* Buffers are recycled in cfv_netdev_tx, so
+ * disable...
2019 May 23
18
[PATCH v2 0/8] s390: virtio: support protected virtualization
Enhanced virtualization protection technology may require the use of
bounce buffers for I/O. While support for this was built into the virtio
core, virtio-ccw wasn't changed accordingly.
Some background on technology (not part of this series) and the
terminology used.
* Protected Virtualization (PV):
Protected Virtualization guarantees, that non-shared memory of a guest
that operates in PV