Displaying 20 results from an estimated 21 matches for "1741,8".
Did you mean:
1741,7
2019 Sep 05
1
[PATCH 1/8] drm/ttm: turn ttm_bo_device.vma_manager into a pointer
...ss_space *mapping,
+ struct drm_vma_offset_manager *vma_manager,
bool need_dma32)
{
struct ttm_bo_global *glob = &ttm_bo_glob;
int ret;
+ if (!vma_manager)
+ vma_manager = &bdev->_vma_manager;
+
ret = ttm_bo_global_init();
if (ret)
return ret;
@@ -1737,7 +1741,8 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
if (unlikely(ret != 0))
goto out_no_sys;
- drm_vma_offset_manager_init(&bdev->vma_manager,
+ bdev->vma_manager = vma_manager;
+ drm_vma_offset_manager_init(&bdev->_vma_manager,
DRM_FILE_PAGE_OFFSET_START,...
2019 Sep 05
1
[PATCH 1/8] drm/ttm: turn ttm_bo_device.vma_manager into a pointer
...ss_space *mapping,
+ struct drm_vma_offset_manager *vma_manager,
bool need_dma32)
{
struct ttm_bo_global *glob = &ttm_bo_glob;
int ret;
+ if (!vma_manager)
+ vma_manager = &bdev->_vma_manager;
+
ret = ttm_bo_global_init();
if (ret)
return ret;
@@ -1737,7 +1741,8 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
if (unlikely(ret != 0))
goto out_no_sys;
- drm_vma_offset_manager_init(&bdev->vma_manager,
+ bdev->vma_manager = vma_manager;
+ drm_vma_offset_manager_init(&bdev->_vma_manager,
DRM_FILE_PAGE_OFFSET_START,...
2019 Sep 05
1
[PATCH 1/8] drm/ttm: turn ttm_bo_device.vma_manager into a pointer
...ss_space *mapping,
+ struct drm_vma_offset_manager *vma_manager,
bool need_dma32)
{
struct ttm_bo_global *glob = &ttm_bo_glob;
int ret;
+ if (!vma_manager)
+ vma_manager = &bdev->_vma_manager;
+
ret = ttm_bo_global_init();
if (ret)
return ret;
@@ -1737,7 +1741,8 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
if (unlikely(ret != 0))
goto out_no_sys;
- drm_vma_offset_manager_init(&bdev->vma_manager,
+ bdev->vma_manager = vma_manager;
+ drm_vma_offset_manager_init(&bdev->_vma_manager,
DRM_FILE_PAGE_OFFSET_START,...
2013 Dec 17
0
[PATCH net-next 2/3] virtio-net: use per-receive queue page frag alloc for mergeable bufs
...et_info *vi)
{
void *buf;
@@ -1706,8 +1715,7 @@ free_recv_bufs:
free_vqs:
cancel_delayed_work_sync(&vi->refill);
virtnet_del_vqs(vi);
- if (vi->alloc_frag.page)
- put_page(vi->alloc_frag.page);
+ free_receive_page_frags(vi);
free_stats:
free_percpu(vi->stats);
free:
@@ -1741,8 +1749,7 @@ static void virtnet_remove(struct virtio_device *vdev)
unregister_netdev(vi->dev);
remove_vq_common(vi);
- if (vi->alloc_frag.page)
- put_page(vi->alloc_frag.page);
+ free_receive_page_frags(vi);
flush_work(&vi->config_work);
--
1.8.5.1
2014 Jan 16
0
[PATCH net-next v4 2/6] virtio-net: use per-receive queue page frag alloc for mergeable bufs
..._del_vqs(vi);
- if (vi->alloc_frag.page)
- put_page(vi->alloc_frag.page);
free_stats:
free_percpu(vi->stats);
free:
@@ -1724,6 +1725,8 @@ static void remove_vq_common(struct virtnet_info *vi)
free_receive_bufs(vi);
+ free_receive_page_frags(vi);
+
virtnet_del_vqs(vi);
}
@@ -1741,8 +1744,6 @@ static void virtnet_remove(struct virtio_device *vdev)
unregister_netdev(vi->dev);
remove_vq_common(vi);
- if (vi->alloc_frag.page)
- put_page(vi->alloc_frag.page);
flush_work(&vi->config_work);
--
1.8.5.2
2013 Dec 23
2
[PATCH net-next 2/3] virtio-net: use per-receive queue page frag alloc for mergeable bufs
...free_recv_bufs:
> free_vqs:
> cancel_delayed_work_sync(&vi->refill);
> virtnet_del_vqs(vi);
> - if (vi->alloc_frag.page)
> - put_page(vi->alloc_frag.page);
> + free_receive_page_frags(vi);
> free_stats:
> free_percpu(vi->stats);
> free:
> @@ -1741,8 +1749,7 @@ static void virtnet_remove(struct virtio_device *vdev)
> unregister_netdev(vi->dev);
>
> remove_vq_common(vi);
> - if (vi->alloc_frag.page)
> - put_page(vi->alloc_frag.page);
> + free_receive_page_frags(vi);
>
> flush_work(&vi->config_...
2013 Dec 23
2
[PATCH net-next 2/3] virtio-net: use per-receive queue page frag alloc for mergeable bufs
...free_recv_bufs:
> free_vqs:
> cancel_delayed_work_sync(&vi->refill);
> virtnet_del_vqs(vi);
> - if (vi->alloc_frag.page)
> - put_page(vi->alloc_frag.page);
> + free_receive_page_frags(vi);
> free_stats:
> free_percpu(vi->stats);
> free:
> @@ -1741,8 +1749,7 @@ static void virtnet_remove(struct virtio_device *vdev)
> unregister_netdev(vi->dev);
>
> remove_vq_common(vi);
> - if (vi->alloc_frag.page)
> - put_page(vi->alloc_frag.page);
> + free_receive_page_frags(vi);
>
> flush_work(&vi->config_...
2013 Dec 17
15
[PATCH net-next 1/3] net: allow > 0 order atomic page alloc in skb_page_frag_refill
skb_page_frag_refill currently permits only order-0 page allocs
unless GFP_WAIT is used. Change skb_page_frag_refill to attempt
higher-order page allocations whether or not GFP_WAIT is used. If
memory cannot be allocated, the allocator will fall back to
successively smaller page allocs (down to order-0 page allocs).
This change brings skb_page_frag_refill in line with the existing
page allocation
2013 Dec 17
15
[PATCH net-next 1/3] net: allow > 0 order atomic page alloc in skb_page_frag_refill
skb_page_frag_refill currently permits only order-0 page allocs
unless GFP_WAIT is used. Change skb_page_frag_refill to attempt
higher-order page allocations whether or not GFP_WAIT is used. If
memory cannot be allocated, the allocator will fall back to
successively smaller page allocs (down to order-0 page allocs).
This change brings skb_page_frag_refill in line with the existing
page allocation
2014 Jan 16
13
[PATCH net-next v4 1/6] net: allow > 0 order atomic page alloc in skb_page_frag_refill
skb_page_frag_refill currently permits only order-0 page allocs
unless GFP_WAIT is used. Change skb_page_frag_refill to attempt
higher-order page allocations whether or not GFP_WAIT is used. If
memory cannot be allocated, the allocator will fall back to
successively smaller page allocs (down to order-0 page allocs).
This change brings skb_page_frag_refill in line with the existing
page allocation
2014 Jan 16
13
[PATCH net-next v4 1/6] net: allow > 0 order atomic page alloc in skb_page_frag_refill
skb_page_frag_refill currently permits only order-0 page allocs
unless GFP_WAIT is used. Change skb_page_frag_refill to attempt
higher-order page allocations whether or not GFP_WAIT is used. If
memory cannot be allocated, the allocator will fall back to
successively smaller page allocs (down to order-0 page allocs).
This change brings skb_page_frag_refill in line with the existing
page allocation
2014 Jan 17
7
[PATCH net-next v5 0/6] virtio-net: mergeable rx buffer size auto-tuning
The virtio-net device currently uses aligned MTU-sized mergeable receive
packet buffers. Network throughput for workloads with large average
packet size can be improved by posting larger receive packet buffers.
However, due to SKB truesize effects, posting large (e.g, PAGE_SIZE)
buffers reduces the throughput of workloads that do not benefit from GRO
and have no large inbound packets.
This
2014 Jan 17
7
[PATCH net-next v5 0/6] virtio-net: mergeable rx buffer size auto-tuning
The virtio-net device currently uses aligned MTU-sized mergeable receive
packet buffers. Network throughput for workloads with large average
packet size can be improved by posting larger receive packet buffers.
However, due to SKB truesize effects, posting large (e.g, PAGE_SIZE)
buffers reduces the throughput of workloads that do not benefit from GRO
and have no large inbound packets.
This
2014 Jan 16
6
[PATCH net-next v3 1/5] net: allow > 0 order atomic page alloc in skb_page_frag_refill
skb_page_frag_refill currently permits only order-0 page allocs
unless GFP_WAIT is used. Change skb_page_frag_refill to attempt
higher-order page allocations whether or not GFP_WAIT is used. If
memory cannot be allocated, the allocator will fall back to
successively smaller page allocs (down to order-0 page allocs).
This change brings skb_page_frag_refill in line with the existing
page allocation
2014 Jan 16
6
[PATCH net-next v3 1/5] net: allow > 0 order atomic page alloc in skb_page_frag_refill
skb_page_frag_refill currently permits only order-0 page allocs
unless GFP_WAIT is used. Change skb_page_frag_refill to attempt
higher-order page allocations whether or not GFP_WAIT is used. If
memory cannot be allocated, the allocator will fall back to
successively smaller page allocs (down to order-0 page allocs).
This change brings skb_page_frag_refill in line with the existing
page allocation
2018 Mar 13
32
[PATCH v2 00/27] x86: PIE support and option to extend KASLR randomization
Changes:
- patch v2:
- Adapt patch to work post KPTI and compiler changes
- Redo all performance testing with latest configs and compilers
- Simplify mov macro on PIE (MOVABS now)
- Reduce GOT footprint
- patch v1:
- Simplify ftrace implementation.
- Use gcc mstack-protector-guard-reg=%gs with PIE when possible.
- rfc v3:
- Use --emit-relocs instead of -pie to reduce
2018 Mar 13
32
[PATCH v2 00/27] x86: PIE support and option to extend KASLR randomization
Changes:
- patch v2:
- Adapt patch to work post KPTI and compiler changes
- Redo all performance testing with latest configs and compilers
- Simplify mov macro on PIE (MOVABS now)
- Reduce GOT footprint
- patch v1:
- Simplify ftrace implementation.
- Use gcc mstack-protector-guard-reg=%gs with PIE when possible.
- rfc v3:
- Use --emit-relocs instead of -pie to reduce
2014 Jan 07
10
[PATCH net-next v2 1/4] net: allow > 0 order atomic page alloc in skb_page_frag_refill
skb_page_frag_refill currently permits only order-0 page allocs
unless GFP_WAIT is used. Change skb_page_frag_refill to attempt
higher-order page allocations whether or not GFP_WAIT is used. If
memory cannot be allocated, the allocator will fall back to
successively smaller page allocs (down to order-0 page allocs).
This change brings skb_page_frag_refill in line with the existing
page allocation
2014 Jan 07
10
[PATCH net-next v2 1/4] net: allow > 0 order atomic page alloc in skb_page_frag_refill
skb_page_frag_refill currently permits only order-0 page allocs
unless GFP_WAIT is used. Change skb_page_frag_refill to attempt
higher-order page allocations whether or not GFP_WAIT is used. If
memory cannot be allocated, the allocator will fall back to
successively smaller page allocs (down to order-0 page allocs).
This change brings skb_page_frag_refill in line with the existing
page allocation
2020 Jul 22
34
[RFC PATCH v1 00/34] VM introspection - EPT Views and Virtualization Exceptions
This patch series is based on the VM introspection patches
(https://lore.kernel.org/kvm/20200721210922.7646-1-alazar at bitdefender.com/),
extending the introspection API with EPT Views and Virtualization
Exceptions (#VE) support.
The purpose of this series is to get an initial feedback and to see if
we are on the right track, especially because the changes made to add
the EPT views are not small