Jorgen Hansen
2021-Jan-11 12:18 UTC
[PATCH] VMCI: Enforce queuepair max size for IOCTL_VMCI_QUEUEPAIR_ALLOC
When create the VMCI queue pair tracking data structures on the host side, the IOCTL for creating the VMCI queue pair didn't validate the queue pair size parameters. This change adds checks for this. This avoids a memory allocation issue in qp_host_alloc_queue, as reported by nslusarek at gmx.net. The check in qp_host_alloc_queue has also been updated to enforce the maximum queue pair size as defined by VMCI_MAX_GUEST_QP_MEMORY. The fix has been verified using sample code supplied by nslusarek at gmx.net. Reported-by: nslusarek at gmx.net Reviewed-by: Vishnu Dasa <vdasa at vmware.com> Signed-off-by: Jorgen Hansen <jhansen at vmware.com> --- drivers/misc/vmw_vmci/vmci_queue_pair.c | 12 ++++++++---- include/linux/vmw_vmci_defs.h | 4 ++-- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index 525ef96..39d2f191 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -237,7 +237,9 @@ static struct qp_list qp_guest_endpoints = { #define QPE_NUM_PAGES(_QPE) ((u32) \ (DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \ DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2)) - +#define QP_SIZES_ARE_VALID(_prod_qsize, _cons_qsize) \ + ((_prod_qsize) + (_cons_qsize) >= max(_prod_qsize, _cons_qsize) && \ + (_prod_qsize) + (_cons_qsize) <= VMCI_MAX_GUEST_QP_MEMORY) /* * Frees kernel VA space for a given queue and its queue header, and @@ -528,7 +530,7 @@ static struct vmci_queue *qp_host_alloc_queue(u64 size) u64 num_pages; const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if)); - if (size > SIZE_MAX - PAGE_SIZE) + if (size > min(VMCI_MAX_GUEST_QP_MEMORY, SIZE_MAX - PAGE_SIZE)) return NULL; num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; if (num_pages > (SIZE_MAX - queue_size) / @@ -1929,6 +1931,9 @@ int vmci_qp_broker_alloc(struct vmci_handle handle, struct vmci_qp_page_store *page_store, struct vmci_ctx *context) { + if (!QP_SIZES_ARE_VALID(produce_size, consume_size)) + return VMCI_ERROR_NO_RESOURCES; + return qp_broker_alloc(handle, peer, flags, priv_flags, produce_size, consume_size, page_store, context, NULL, NULL, NULL, NULL); @@ -2685,8 +2690,7 @@ int vmci_qpair_alloc(struct vmci_qp **qpair, * used by the device is NO_RESOURCES, so use that here too. */ - if (produce_qsize + consume_qsize < max(produce_qsize, consume_qsize) || - produce_qsize + consume_qsize > VMCI_MAX_GUEST_QP_MEMORY) + if (!QP_SIZES_ARE_VALID(produce_qsize, consume_qsize)) return VMCI_ERROR_NO_RESOURCES; retval = vmci_route(&src, &dst, false, &route); diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h index be0afe6..e36cb11 100644 --- a/include/linux/vmw_vmci_defs.h +++ b/include/linux/vmw_vmci_defs.h @@ -66,7 +66,7 @@ enum { * consists of at least two pages, the memory limit also dictates the * number of queue pairs a guest can create. */ -#define VMCI_MAX_GUEST_QP_MEMORY (128 * 1024 * 1024) +#define VMCI_MAX_GUEST_QP_MEMORY ((size_t)(128 * 1024 * 1024)) #define VMCI_MAX_GUEST_QP_COUNT (VMCI_MAX_GUEST_QP_MEMORY / PAGE_SIZE / 2) /* @@ -80,7 +80,7 @@ enum { * too much kernel memory (especially on vmkernel). We limit a queuepair to * 32 KB, or 16 KB per queue for symmetrical pairs. */ -#define VMCI_MAX_PINNED_QP_MEMORY (32 * 1024) +#define VMCI_MAX_PINNED_QP_MEMORY ((size_t)(32 * 1024)) /* * We have a fixed set of resource IDs available in the VMX. -- 2.6.2
Jorgen Hansen
2021-Jan-11 12:18 UTC
[PATCH] VMCI: Stop log spew when qp allocation isn't possible
VMCI queue pair allocation is disabled, if a VM is in FT mode. In these cases, VMware Tools may still once in a while attempt to create a vSocket stream connection, resulting in multiple warnings in the kernel logs. Therefore downgrade the error log to a debug log. Reviewed-by: Vishnu Dasa <vdasa at vmware.com> Signed-off-by: Jorgen Hansen <jhansen at vmware.com> --- drivers/misc/vmw_vmci/vmci_queue_pair.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index c490658..a3691c1 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -1207,7 +1207,7 @@ static int qp_alloc_guest_work(struct vmci_handle *handle, } else { result = qp_alloc_hypercall(queue_pair_entry); if (result < VMCI_SUCCESS) { - pr_warn("qp_alloc_hypercall result = %d\n", result); + pr_devel("qp_alloc_hypercall result = %d\n", result); goto error; } } -- 2.6.2
Jorgen Hansen
2021-Jan-11 12:18 UTC
[PATCH] VMCI: Use set_page_dirty_lock() when unregistering guest memory
When the VMCI host support releases guest memory in the case where the VM was killed, the pinned guest pages aren't locked. Use set_page_dirty_lock() instead of set_page_dirty(). Testing done: Killed VM while having an active VMCI based vSocket connection and observed warning from ext4. With this fix, no warning was observed. Ran various vSocket tests without issues. Fixes: 06164d2b72aa ("VMCI: queue pairs implementation.") Reviewed-by: Vishnu Dasa <vdasa at vmware.com> Signed-off-by: Jorgen Hansen <jhansen at vmware.com> --- drivers/misc/vmw_vmci/vmci_queue_pair.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index a3691c1..525ef96 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -630,7 +630,7 @@ static void qp_release_pages(struct page **pages, for (i = 0; i < num_pages; i++) { if (dirty) - set_page_dirty(pages[i]); + set_page_dirty_lock(pages[i]); put_page(pages[i]); pages[i] = NULL; -- 2.6.2
Greg KH
2021-Jan-11 12:46 UTC
[PATCH] VMCI: Enforce queuepair max size for IOCTL_VMCI_QUEUEPAIR_ALLOC
On Mon, Jan 11, 2021 at 04:18:53AM -0800, Jorgen Hansen wrote:> When create the VMCI queue pair tracking data structures on the host > side, the IOCTL for creating the VMCI queue pair didn't validate > the queue pair size parameters. This change adds checks for this. > > This avoids a memory allocation issue in qp_host_alloc_queue, as > reported by nslusarek at gmx.net. The check in qp_host_alloc_queue > has also been updated to enforce the maximum queue pair size > as defined by VMCI_MAX_GUEST_QP_MEMORY. > > The fix has been verified using sample code supplied by > nslusarek at gmx.net. > > Reported-by: nslusarek at gmx.net > Reviewed-by: Vishnu Dasa <vdasa at vmware.com> > Signed-off-by: Jorgen Hansen <jhansen at vmware.com> > --- > drivers/misc/vmw_vmci/vmci_queue_pair.c | 12 ++++++++---- > include/linux/vmw_vmci_defs.h | 4 ++-- > 2 files changed, 10 insertions(+), 6 deletions(-)Hi, This is the friendly patch-bot of Greg Kroah-Hartman. You have sent him a patch that has triggered this response. He used to manually respond to these common problems, but in order to save his sanity (he kept writing the same thing over and over, yet to different people), I was created. Hopefully you will not take offence and will fix the problem in your patch and resubmit it so that it can be accepted into the Linux kernel tree. You are receiving this message because of the following common error(s) as indicated below: - You sent multiple patches, yet no indication of which ones should be applied in which order. Greg could just guess, but if you are receiving this email, he guessed wrong and the patches didn't apply. Please read the section entitled "The canonical patch format" in the kernel file, Documentation/SubmittingPatches for a description of how to do this so that Greg has a chance to apply these correctly. If you wish to discuss this problem further, or you have questions about how to resolve this issue, please feel free to respond to this email and Greg will reply once he has dug out from the pending patches received from other developers. thanks, greg k-h's patch email bot
kernel test robot
2021-Jan-11 15:45 UTC
[PATCH] VMCI: Enforce queuepair max size for IOCTL_VMCI_QUEUEPAIR_ALLOC
Hi Jorgen, Thank you for the patch! Perhaps something to improve: [auto build test WARNING on linux/master] [also build test WARNING on char-misc/char-misc-testing linus/master v5.11-rc3 next-20210111] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch] url: https://github.com/0day-ci/linux/commits/Jorgen-Hansen/VMCI-Enforce-queuepair-max-size-for-IOCTL_VMCI_QUEUEPAIR_ALLOC/20210111-204259 base: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git 09162bc32c880a791c6c0668ce0745cf7958f576 config: i386-randconfig-s001-20210111 (attached as .config) compiler: gcc-9 (Debian 9.3.0-15) 9.3.0 reproduce: # apt-get install sparse # sparse version: v0.6.3-208-g46a52ca4-dirty # https://github.com/0day-ci/linux/commit/0923aeac7af9635dd6bf0141e8188f4815e573d2 git remote add linux-review https://github.com/0day-ci/linux git fetch --no-tags linux-review Jorgen-Hansen/VMCI-Enforce-queuepair-max-size-for-IOCTL_VMCI_QUEUEPAIR_ALLOC/20210111-204259 git checkout 0923aeac7af9635dd6bf0141e8188f4815e573d2 # save the attached .config to linux build tree make W=1 C=1 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' ARCH=i386 If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot <lkp at intel.com> "sparse warnings: (new ones prefixed by >>)">> drivers/misc/vmw_vmci/vmci_queue_pair.c:533:20: sparse: sparse: incompatible types in comparison expression (different type sizes): >> drivers/misc/vmw_vmci/vmci_queue_pair.c:533:20: sparse: unsigned int * >> drivers/misc/vmw_vmci/vmci_queue_pair.c:533:20: sparse: unsigned long *vim +533 drivers/misc/vmw_vmci/vmci_queue_pair.c 519 520 /* 521 * Allocates kernel VA space of specified size plus space for the queue 522 * and kernel interface. This is different from the guest queue allocator, 523 * because we do not allocate our own queue header/data pages here but 524 * share those of the guest. 525 */ 526 static struct vmci_queue *qp_host_alloc_queue(u64 size) 527 { 528 struct vmci_queue *queue; 529 size_t queue_page_size; 530 u64 num_pages; 531 const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if)); 532 > 533 if (size > min(VMCI_MAX_GUEST_QP_MEMORY, SIZE_MAX - PAGE_SIZE)) 534 return NULL; 535 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; 536 if (num_pages > (SIZE_MAX - queue_size) / 537 sizeof(*queue->kernel_if->u.h.page)) 538 return NULL; 539 540 queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page); 541 542 queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL); 543 if (queue) { 544 queue->q_header = NULL; 545 queue->saved_header = NULL; 546 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1); 547 queue->kernel_if->host = true; 548 queue->kernel_if->mutex = NULL; 549 queue->kernel_if->num_pages = num_pages; 550 queue->kernel_if->u.h.header_page 551 (struct page **)((u8 *)queue + queue_size); 552 queue->kernel_if->u.h.page 553 &queue->kernel_if->u.h.header_page[1]; 554 } 555 556 return queue; 557 } 558 --- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all at lists.01.org -------------- next part -------------- A non-text attachment was scrubbed... Name: .config.gz Type: application/gzip Size: 35562 bytes Desc: not available URL: <http://lists.linuxfoundation.org/pipermail/virtualization/attachments/20210111/800d2a51/attachment-0001.gz>
Stefano Garzarella
2021-Jan-18 11:27 UTC
[PATCH] VMCI: Enforce queuepair max size for IOCTL_VMCI_QUEUEPAIR_ALLOC
+Cc: Norbert Slusarek <nslusarek at gmx.net> On Mon, Jan 11, 2021 at 04:18:53AM -0800, Jorgen Hansen wrote:>When create the VMCI queue pair tracking data structures on the host >side, the IOCTL for creating the VMCI queue pair didn't validate >the queue pair size parameters. This change adds checks for this. > >This avoids a memory allocation issue in qp_host_alloc_queue, as >reported by nslusarek at gmx.net. The check in qp_host_alloc_queue >has also been updated to enforce the maximum queue pair size >as defined by VMCI_MAX_GUEST_QP_MEMORY. > >The fix has been verified using sample code supplied by >nslusarek at gmx.net. > >Reported-by: nslusarek at gmx.net >Reviewed-by: Vishnu Dasa <vdasa at vmware.com> >Signed-off-by: Jorgen Hansen <jhansen at vmware.com> >--- > drivers/misc/vmw_vmci/vmci_queue_pair.c | 12 ++++++++---- > include/linux/vmw_vmci_defs.h | 4 ++-- > 2 files changed, 10 insertions(+), 6 deletions(-) > >diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c >index 525ef96..39d2f191 100644 >--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c >+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c >@@ -237,7 +237,9 @@ static struct qp_list qp_guest_endpoints = { > #define QPE_NUM_PAGES(_QPE) ((u32) \ > (DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \ > DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2)) >- >+#define QP_SIZES_ARE_VALID(_prod_qsize, _cons_qsize) \ >+ ((_prod_qsize) + (_cons_qsize) >= max(_prod_qsize, _cons_qsize) && \ >+ (_prod_qsize) + (_cons_qsize) <= VMCI_MAX_GUEST_QP_MEMORY) > > /* > * Frees kernel VA space for a given queue and its queue header, and >@@ -528,7 +530,7 @@ static struct vmci_queue *qp_host_alloc_queue(u64 size) > u64 num_pages; > const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if)); > >- if (size > SIZE_MAX - PAGE_SIZE) >+ if (size > min(VMCI_MAX_GUEST_QP_MEMORY, SIZE_MAX - PAGE_SIZE)) > return NULL; > num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1; > if (num_pages > (SIZE_MAX - queue_size) / >@@ -1929,6 +1931,9 @@ int vmci_qp_broker_alloc(struct vmci_handle handle, > struct vmci_qp_page_store *page_store, > struct vmci_ctx *context) > { >+ if (!QP_SIZES_ARE_VALID(produce_size, consume_size)) >+ return VMCI_ERROR_NO_RESOURCES; >+ > return qp_broker_alloc(handle, peer, flags, priv_flags, > produce_size, consume_size, > page_store, context, NULL, NULL, NULL, NULL); >@@ -2685,8 +2690,7 @@ int vmci_qpair_alloc(struct vmci_qp **qpair, > * used by the device is NO_RESOURCES, so use that here too. > */ > >- if (produce_qsize + consume_qsize < max(produce_qsize, consume_qsize) || >- produce_qsize + consume_qsize > VMCI_MAX_GUEST_QP_MEMORY) >+ if (!QP_SIZES_ARE_VALID(produce_qsize, consume_qsize)) > return VMCI_ERROR_NO_RESOURCES; > > retval = vmci_route(&src, &dst, false, &route); >diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h >index be0afe6..e36cb11 100644 >--- a/include/linux/vmw_vmci_defs.h >+++ b/include/linux/vmw_vmci_defs.h >@@ -66,7 +66,7 @@ enum { > * consists of at least two pages, the memory limit also dictates the > * number of queue pairs a guest can create. > */ >-#define VMCI_MAX_GUEST_QP_MEMORY (128 * 1024 * 1024) >+#define VMCI_MAX_GUEST_QP_MEMORY ((size_t)(128 * 1024 * 1024)) > #define VMCI_MAX_GUEST_QP_COUNT (VMCI_MAX_GUEST_QP_MEMORY / PAGE_SIZE / 2) > > /* >@@ -80,7 +80,7 @@ enum { > * too much kernel memory (especially on vmkernel). We limit a queuepair to > * 32 KB, or 16 KB per queue for symmetrical pairs. > */ >-#define VMCI_MAX_PINNED_QP_MEMORY (32 * 1024) >+#define VMCI_MAX_PINNED_QP_MEMORY ((size_t)(32 * 1024)) > > /* > * We have a fixed set of resource IDs available in the VMX. >-- >2.6.2 > >_______________________________________________ >Virtualization mailing list >Virtualization at lists.linux-foundation.org >https://lists.linuxfoundation.org/mailman/listinfo/virtualization >