Displaying 17 results from an estimated 17 matches for "bytes_to_copy".
2023 Mar 06
0
[RFC PATCH v2 1/4] virtio/vsock: fix 'rx_bytes'/'fwd_cnt' calculation
...s->rx_queue);
if (!skb)
break;
hdr = virtio_vsock_hdr(skb);
+ pkt_len = (size_t)le32_to_cpu(hdr->len);
if (dequeued_len >= 0) {
- size_t pkt_len;
size_t bytes_to_copy;
- pkt_len = (size_t)le32_to_cpu(hdr->len);
bytes_to_copy = min(user_buf_len, pkt_len);
if (bytes_to_copy) {
@@ -484,7 +481,7 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,...
2023 Mar 10
0
[RFC PATCH v4 2/4] virtio/vsock: remove redundant 'skb_pull()' call
...0fd9906..9a411475e201 100644
>--- a/net/vmw_vsock/virtio_transport_common.c
>+++ b/net/vmw_vsock/virtio_transport_common.c
>@@ -465,7 +465,6 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk,
> dequeued_len = err;
> } else {
> user_buf_len -= bytes_to_copy;
>- skb_pull(skb, bytes_to_copy);
> }
>
> spin_lock_bh(&vvs->rx_lock);
>--
>2.25.1
>
2013 Mar 27
0
[PATCH 04/22] block: Convert bio_for_each_segment() to bvec_iter
...e_address(bvec.bv_page) + bvec.bv_offset,
+ bvec.bv_len);
+ offset += bvec.bv_len;
}
} else {
dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
@@ -2103,19 +2104,19 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
u32 offset = 0;
u32 bytes_to_copy =
le16_to_cpu(mpi_reply->ResponseDataLength);
- bio_for_each_segment(bvec, rsp->bio, i) {
- if (bytes_to_copy <= bvec->bv_len) {
- memcpy(page_address(bvec->bv_page) +
- bvec->bv_offset, pci_addr_in +
+ bio_for_each_segment(bvec, rsp->bio, iter) {
+...
2013 Mar 27
0
[PATCH 04/22] block: Convert bio_for_each_segment() to bvec_iter
...e_address(bvec.bv_page) + bvec.bv_offset,
+ bvec.bv_len);
+ offset += bvec.bv_len;
}
} else {
dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
@@ -2103,19 +2104,19 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
u32 offset = 0;
u32 bytes_to_copy =
le16_to_cpu(mpi_reply->ResponseDataLength);
- bio_for_each_segment(bvec, rsp->bio, i) {
- if (bytes_to_copy <= bvec->bv_len) {
- memcpy(page_address(bvec->bv_page) +
- bvec->bv_offset, pci_addr_in +
+ bio_for_each_segment(bvec, rsp->bio, iter) {
+...
2013 Aug 07
0
[PATCH 07/22] block: Convert bio_for_each_segment() to bvec_iter
...e_address(bvec.bv_page) + bvec.bv_offset,
+ bvec.bv_len);
+ offset += bvec.bv_len;
}
} else {
dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
@@ -2103,19 +2104,19 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
u32 offset = 0;
u32 bytes_to_copy =
le16_to_cpu(mpi_reply->ResponseDataLength);
- bio_for_each_segment(bvec, rsp->bio, i) {
- if (bytes_to_copy <= bvec->bv_len) {
- memcpy(page_address(bvec->bv_page) +
- bvec->bv_offset, pci_addr_in +
+ bio_for_each_segment(bvec, rsp->bio, iter) {
+...
2013 Aug 07
0
[PATCH 07/22] block: Convert bio_for_each_segment() to bvec_iter
...e_address(bvec.bv_page) + bvec.bv_offset,
+ bvec.bv_len);
+ offset += bvec.bv_len;
}
} else {
dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
@@ -2103,19 +2104,19 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
u32 offset = 0;
u32 bytes_to_copy =
le16_to_cpu(mpi_reply->ResponseDataLength);
- bio_for_each_segment(bvec, rsp->bio, i) {
- if (bytes_to_copy <= bvec->bv_len) {
- memcpy(page_address(bvec->bv_page) +
- bvec->bv_offset, pci_addr_in +
+ bio_for_each_segment(bvec, rsp->bio, iter) {
+...
2013 Aug 07
0
[PATCH 07/22] block: Convert bio_for_each_segment() to bvec_iter
...e_address(bvec.bv_page) + bvec.bv_offset,
+ bvec.bv_len);
+ offset += bvec.bv_len;
}
} else {
dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
@@ -2103,19 +2104,19 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
u32 offset = 0;
u32 bytes_to_copy =
le16_to_cpu(mpi_reply->ResponseDataLength);
- bio_for_each_segment(bvec, rsp->bio, i) {
- if (bytes_to_copy <= bvec->bv_len) {
- memcpy(page_address(bvec->bv_page) +
- bvec->bv_offset, pci_addr_in +
+ bio_for_each_segment(bvec, rsp->bio, iter) {
+...
2013 Oct 29
0
[PATCH 07/23] block: Convert bio_for_each_segment() to bvec_iter
...e_address(bvec.bv_page) + bvec.bv_offset,
+ bvec.bv_len);
+ offset += bvec.bv_len;
}
} else {
dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
@@ -2106,19 +2107,19 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
u32 offset = 0;
u32 bytes_to_copy =
le16_to_cpu(mpi_reply->ResponseDataLength);
- bio_for_each_segment(bvec, rsp->bio, i) {
- if (bytes_to_copy <= bvec->bv_len) {
- memcpy(page_address(bvec->bv_page) +
- bvec->bv_offset, pci_addr_in +
+ bio_for_each_segment(bvec, rsp->bio, iter) {
+...
2013 Oct 29
0
[PATCH 07/23] block: Convert bio_for_each_segment() to bvec_iter
...e_address(bvec.bv_page) + bvec.bv_offset,
+ bvec.bv_len);
+ offset += bvec.bv_len;
}
} else {
dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
@@ -2106,19 +2107,19 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
u32 offset = 0;
u32 bytes_to_copy =
le16_to_cpu(mpi_reply->ResponseDataLength);
- bio_for_each_segment(bvec, rsp->bio, i) {
- if (bytes_to_copy <= bvec->bv_len) {
- memcpy(page_address(bvec->bv_page) +
- bvec->bv_offset, pci_addr_in +
+ bio_for_each_segment(bvec, rsp->bio, iter) {
+...
2013 Oct 29
0
[PATCH 07/23] block: Convert bio_for_each_segment() to bvec_iter
...e_address(bvec.bv_page) + bvec.bv_offset,
+ bvec.bv_len);
+ offset += bvec.bv_len;
}
} else {
dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
@@ -2106,19 +2107,19 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
u32 offset = 0;
u32 bytes_to_copy =
le16_to_cpu(mpi_reply->ResponseDataLength);
- bio_for_each_segment(bvec, rsp->bio, i) {
- if (bytes_to_copy <= bvec->bv_len) {
- memcpy(page_address(bvec->bv_page) +
- bvec->bv_offset, pci_addr_in +
+ bio_for_each_segment(bvec, rsp->bio, iter) {
+...
2013 Jun 09
0
[PATCH 06/26] block: Convert bio_for_each_segment() to bvec_iter
...e_address(bvec.bv_page) + bvec.bv_offset,
+ bvec.bv_len);
+ offset += bvec.bv_len;
}
} else {
dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
@@ -2103,19 +2104,19 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
u32 offset = 0;
u32 bytes_to_copy =
le16_to_cpu(mpi_reply->ResponseDataLength);
- bio_for_each_segment(bvec, rsp->bio, i) {
- if (bytes_to_copy <= bvec->bv_len) {
- memcpy(page_address(bvec->bv_page) +
- bvec->bv_offset, pci_addr_in +
+ bio_for_each_segment(bvec, rsp->bio, iter) {
+...
2013 Jun 09
0
[PATCH 06/26] block: Convert bio_for_each_segment() to bvec_iter
...e_address(bvec.bv_page) + bvec.bv_offset,
+ bvec.bv_len);
+ offset += bvec.bv_len;
}
} else {
dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
@@ -2103,19 +2104,19 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
u32 offset = 0;
u32 bytes_to_copy =
le16_to_cpu(mpi_reply->ResponseDataLength);
- bio_for_each_segment(bvec, rsp->bio, i) {
- if (bytes_to_copy <= bvec->bv_len) {
- memcpy(page_address(bvec->bv_page) +
- bvec->bv_offset, pci_addr_in +
+ bio_for_each_segment(bvec, rsp->bio, iter) {
+...
2013 Jun 09
0
[PATCH 06/26] block: Convert bio_for_each_segment() to bvec_iter
...e_address(bvec.bv_page) + bvec.bv_offset,
+ bvec.bv_len);
+ offset += bvec.bv_len;
}
} else {
dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
@@ -2103,19 +2104,19 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
u32 offset = 0;
u32 bytes_to_copy =
le16_to_cpu(mpi_reply->ResponseDataLength);
- bio_for_each_segment(bvec, rsp->bio, i) {
- if (bytes_to_copy <= bvec->bv_len) {
- memcpy(page_address(bvec->bv_page) +
- bvec->bv_offset, pci_addr_in +
+ bio_for_each_segment(bvec, rsp->bio, iter) {
+...
2013 Nov 27
0
[PATCH 07/25] block: Convert bio_for_each_segment() to bvec_iter
...e_address(bvec.bv_page) + bvec.bv_offset,
+ bvec.bv_len);
+ offset += bvec.bv_len;
}
} else {
dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
@@ -2106,19 +2107,19 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
u32 offset = 0;
u32 bytes_to_copy =
le16_to_cpu(mpi_reply->ResponseDataLength);
- bio_for_each_segment(bvec, rsp->bio, i) {
- if (bytes_to_copy <= bvec->bv_len) {
- memcpy(page_address(bvec->bv_page) +
- bvec->bv_offset, pci_addr_in +
+ bio_for_each_segment(bvec, rsp->bio, iter) {
+...
2013 Nov 27
0
[PATCH 07/25] block: Convert bio_for_each_segment() to bvec_iter
...e_address(bvec.bv_page) + bvec.bv_offset,
+ bvec.bv_len);
+ offset += bvec.bv_len;
}
} else {
dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
@@ -2106,19 +2107,19 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
u32 offset = 0;
u32 bytes_to_copy =
le16_to_cpu(mpi_reply->ResponseDataLength);
- bio_for_each_segment(bvec, rsp->bio, i) {
- if (bytes_to_copy <= bvec->bv_len) {
- memcpy(page_address(bvec->bv_page) +
- bvec->bv_offset, pci_addr_in +
+ bio_for_each_segment(bvec, rsp->bio, iter) {
+...
2013 Nov 27
0
[PATCH 07/25] block: Convert bio_for_each_segment() to bvec_iter
...e_address(bvec.bv_page) + bvec.bv_offset,
+ bvec.bv_len);
+ offset += bvec.bv_len;
}
} else {
dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
@@ -2106,19 +2107,19 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
u32 offset = 0;
u32 bytes_to_copy =
le16_to_cpu(mpi_reply->ResponseDataLength);
- bio_for_each_segment(bvec, rsp->bio, i) {
- if (bytes_to_copy <= bvec->bv_len) {
- memcpy(page_address(bvec->bv_page) +
- bvec->bv_offset, pci_addr_in +
+ bio_for_each_segment(bvec, rsp->bio, iter) {
+...
2023 Mar 06
0
[RFC PATCH v2 2/4] virtio/vsock: remove all data from sk_buff
...it to `pkt_len` anyway, as in the proposal I sent for patch 1),
otherwise we have to go every time to check if skb_* functions touch
skb->len.
E.g. skb_pull() decrease skb->len, so I'm not sure we can call
virtio_transport_dec_rx_pkt(skb->len) if we don't remove `skb_pull(skb,
bytes_to_copy);` inside the loop.
Thanks,
Stefano