Displaying 15 results from an estimated 15 matches for "687,13".
Did you mean:
67,13
2006 Nov 25
3
[PATCH] HTTP accept filter support for FreeBSD
...ude/sys/socket.h as accept_filter_arg
+ unless `/sbin/sysctl -nq net.inet.accf.http`.empty?
+ $tcp_defer_accept_opts = [Socket::SOL_SOCKET, Socket::SO_ACCEPTFILTER, [''httpready'', nil].pack(''a16a240'')]
+ end
end
end
@@ -680,13 +687,13 @@
configure_socket_options
- @socket.setsockopt(Socket::SOL_TCP, $tcp_defer_accept_opts[0], $tcp_defer_accept_opts[1]) if $tcp_defer_accept_opts
+ @socket.setsockopt(*$tcp_defer_accept_opts) if $tcp_defer_accept_opts
@acceptor = Thread.new do
while true...
2016 Mar 11
0
[PATCH v1 06/19] zsmalloc: clean up many BUG_ON
...;objects;
@@ -654,7 +655,7 @@ static void insert_zspage(struct page *first_page, struct size_class *class,
{
struct page **head;
- BUG_ON(!is_first_page(first_page));
+ VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
if (fullness >= _ZS_NR_FULLNESS_GROUPS)
return;
@@ -686,13 +687,13 @@ static void remove_zspage(struct page *first_page, struct size_class *class,
{
struct page **head;
- BUG_ON(!is_first_page(first_page));
+ VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
if (fullness >= _ZS_NR_FULLNESS_GROUPS)
return;
head = &class->fullness_...
2017 Feb 14
3
[PATCH v2 0/2] cifs.upcall: allow cifs.upcall to grab $KRB5CCNAME from initiating process
Small respin of the patches that I posted a few days ago. The main
difference is the reordering of the series to make it do the group
and grouplist manipulation first, and then the patch that makes
it grab the KRB5CCNAME from the initiating process.
I think the code is sound, my main question is whether we really
need the command-line switch for this. Should this just be the
default mode of
2020 Aug 19
0
[PATCH 23/28] lib82596: convert from dma_cache_sync to dma_sync_single_for_device
...Issuing RX_START\n", dev->name));
dma->scb.command = SWAP16(RX_START);
dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
- DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
+ dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
ca(dev);
@@ -659,13 +687,13 @@ static inline int i596_rx(struct net_device *dev)
rfd = lp->rfd_head; /* Ref next frame to check */
- DMA_INV(dev, rfd, sizeof(struct i596_rfd));
+ dma_sync_cpu(dev, rfd, sizeof(struct i596_rfd));
while (rfd->stat & SWAP16(STAT_C)) { /* Loop while complete frames */
if (...
2020 Sep 15
0
[PATCH 11/18] lib82596: convert to dma_alloc_noncoherent
...Issuing RX_START\n", dev->name));
dma->scb.command = SWAP16(RX_START);
dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
- DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
+ dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
ca(dev);
@@ -659,13 +687,13 @@ static inline int i596_rx(struct net_device *dev)
rfd = lp->rfd_head; /* Ref next frame to check */
- DMA_INV(dev, rfd, sizeof(struct i596_rfd));
+ dma_sync_cpu(dev, rfd, sizeof(struct i596_rfd));
while (rfd->stat & SWAP16(STAT_C)) { /* Loop while complete frames */
if (...
2020 Sep 14
2
[PATCH 11/17] sgiseeq: convert to dma_alloc_noncoherent
...Issuing RX_START\n", dev->name));
dma->scb.command = SWAP16(RX_START);
dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
- DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
+ dma_sync_dev(dev, &(dma->scb), sizeof(struct i596_scb));
ca(dev);
@@ -659,13 +687,13 @@ static inline int i596_rx(struct net_device *dev)
rfd = lp->rfd_head; /* Ref next frame to check */
- DMA_INV(dev, rfd, sizeof(struct i596_rfd));
+ dma_sync_cpu(dev, rfd, sizeof(struct i596_rfd));
while (rfd->stat & SWAP16(STAT_C)) { /* Loop while complete frames */
if (...
2014 Sep 01
6
[PATCH v4 0/4] virtio: Clean up scatterlists and use the DMA API
This fixes virtio on Xen guests as well as on any other platform
that uses virtio_pci on which physical addresses don't match bus
addresses.
This can be tested with:
virtme-run --xen xen --kimg arch/x86/boot/bzImage --console
using virtme from here:
https://git.kernel.org/cgit/utils/kernel/virtme/virtme.git
Without these patches, the guest hangs forever. With these patches,
2014 Sep 01
6
[PATCH v4 0/4] virtio: Clean up scatterlists and use the DMA API
This fixes virtio on Xen guests as well as on any other platform
that uses virtio_pci on which physical addresses don't match bus
addresses.
This can be tested with:
virtme-run --xen xen --kimg arch/x86/boot/bzImage --console
using virtme from here:
https://git.kernel.org/cgit/utils/kernel/virtme/virtme.git
Without these patches, the guest hangs forever. With these patches,
2014 Aug 28
6
[PATCH v3 0/5] virtio: Clean up scatterlists and use the DMA API
This fixes virtio on Xen guests as well as on any other platform
that uses virtio_pci on which physical addresses don't match bus
addresses.
This can be tested with:
virtme-run --xen xen --kimg arch/x86/boot/bzImage --console
using virtme from here:
https://git.kernel.org/cgit/utils/kernel/virtme/virtme.git
Without these patches, the guest hangs forever. With these patches,
2014 Aug 28
6
[PATCH v3 0/5] virtio: Clean up scatterlists and use the DMA API
This fixes virtio on Xen guests as well as on any other platform
that uses virtio_pci on which physical addresses don't match bus
addresses.
This can be tested with:
virtme-run --xen xen --kimg arch/x86/boot/bzImage --console
using virtme from here:
https://git.kernel.org/cgit/utils/kernel/virtme/virtme.git
Without these patches, the guest hangs forever. With these patches,
2020 Sep 14
20
a saner API for allocating DMA addressable pages v2
Hi all,
this series replaced the DMA_ATTR_NON_CONSISTENT flag to dma_alloc_attrs
with a separate new dma_alloc_pages API, which is available on all
platforms. In addition to cleaning up the convoluted code path, this
ensures that other drivers that have asked for better support for
non-coherent DMA to pages with incurring bounce buffering over can finally
be properly supported.
I'm still a
2020 Sep 15
32
a saner API for allocating DMA addressable pages v3
Hi all,
this series replaced the DMA_ATTR_NON_CONSISTENT flag to dma_alloc_attrs
with a separate new dma_alloc_pages API, which is available on all
platforms. In addition to cleaning up the convoluted code path, this
ensures that other drivers that have asked for better support for
non-coherent DMA to pages with incurring bounce buffering over can finally
be properly supported.
As a follow up I
2016 Mar 11
31
[PATCH v1 00/19] Support non-lru page migration
Recently, I got many reports about perfermance degradation
in embedded system(Android mobile phone, webOS TV and so on)
and failed to fork easily.
The problem was fragmentation caused by zram and GPU driver
pages. Their pages cannot be migrated so compaction cannot
work well, either so reclaimer ends up shrinking all of working
set pages. It made system very slow and even to fail to fork
easily.
2016 Mar 11
31
[PATCH v1 00/19] Support non-lru page migration
Recently, I got many reports about perfermance degradation
in embedded system(Android mobile phone, webOS TV and so on)
and failed to fork easily.
The problem was fragmentation caused by zram and GPU driver
pages. Their pages cannot be migrated so compaction cannot
work well, either so reclaimer ends up shrinking all of working
set pages. It made system very slow and even to fail to fork
easily.
2020 Aug 19
39
a saner API for allocating DMA addressable pages
Hi all,
this series replaced the DMA_ATTR_NON_CONSISTENT flag to dma_alloc_attrs
with a separate new dma_alloc_pages API, which is available on all
platforms. In addition to cleaning up the convoluted code path, this
ensures that other drivers that have asked for better support for
non-coherent DMA to pages with incurring bounce buffering over can finally
be properly supported.
I'm still a