The following series is my current state of an attempt to use wait queues for xenpaging and mem_event handling. I post it here for review and comments. Olaf xen/arch/x86/hvm/hvm.c | 6 +- xen/arch/x86/mm/mem_event.c | 82 +++++++++++++++++++++++++++++++++------- xen/arch/x86/mm/mem_sharing.c | 31 +++++---------- xen/arch/x86/mm/p2m-ept.c | 46 +++++++++++++++++++--- xen/arch/x86/mm/p2m.c | 56 ++++++++++++++------------- xen/common/domain.c | 5 ++ xen/include/asm-x86/mem_event.h | 4 - xen/include/asm-x86/p2m.h | 1 xen/include/xen/sched.h | 32 +++++++++++---- 9 files changed, 183 insertions(+), 80 deletions(-)
Olaf Hering
2011-Nov-22 21:13 UTC
[PATCH 1 of 3] mem_event: move mem_event_domain out of struct domain
# HG changeset patch # User Olaf Hering <olaf@aepfle.de> # Date 1321996135 -3600 # Node ID d347a8a36d2e7951f98a3d22866dce004484d95f # Parent d3859e348951cde6b211c5afb610ac1f12a909ec mem_event: move mem_event_domain out of struct domain An upcoming change increases the size of mem_event_domain. The result is a build failure because struct domain gets larger than a page. Allocate the room for the three mem_event_domain members at runtime. Signed-off-by: Olaf Hering <olaf@aepfle.de> diff -r d3859e348951 -r d347a8a36d2e xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -4098,7 +4098,7 @@ static int hvm_memory_event_traps(long p if ( (p & HVMPME_onchangeonly) && (value == old) ) return 1; - rc = mem_event_check_ring(d, &d->mem_access); + rc = mem_event_check_ring(d, &d->mem_event->mem_access); if ( rc ) return rc; @@ -4121,7 +4121,7 @@ static int hvm_memory_event_traps(long p req.gla_valid = 1; } - mem_event_put_request(d, &d->mem_access, &req); + mem_event_put_request(d, &d->mem_event->mem_access, &req); return 1; } diff -r d3859e348951 -r d347a8a36d2e xen/arch/x86/mm/mem_event.c --- a/xen/arch/x86/mm/mem_event.c +++ b/xen/arch/x86/mm/mem_event.c @@ -265,7 +265,7 @@ int mem_event_domctl(struct domain *d, x { case XEN_DOMCTL_MEM_EVENT_OP_PAGING: { - struct mem_event_domain *med = &d->mem_paging; + struct mem_event_domain *med = &d->mem_event->mem_paging; rc = -EINVAL; switch( mec->op ) @@ -310,7 +310,7 @@ int mem_event_domctl(struct domain *d, x case XEN_DOMCTL_MEM_EVENT_OP_ACCESS: { - struct mem_event_domain *med = &d->mem_access; + struct mem_event_domain *med = &d->mem_event->mem_access; rc = -EINVAL; switch( mec->op ) @@ -333,7 +333,7 @@ int mem_event_domctl(struct domain *d, x case XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE: { if ( med->ring_page ) - rc = mem_event_disable(&d->mem_access); + rc = mem_event_disable(med); } break; diff -r d3859e348951 -r d347a8a36d2e xen/arch/x86/mm/mem_sharing.c --- a/xen/arch/x86/mm/mem_sharing.c +++ b/xen/arch/x86/mm/mem_sharing.c @@ -281,12 +281,12 @@ static struct page_info* mem_sharing_all vcpu_pause_nosync(v); req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED; - if(mem_event_check_ring(d, &d->mem_share)) return page; + if(mem_event_check_ring(d, &d->mem_event->mem_share)) return page; req.gfn = gfn; req.p2mt = p2m_ram_shared; req.vcpu_id = v->vcpu_id; - mem_event_put_request(d, &d->mem_share, &req); + mem_event_put_request(d, &d->mem_event->mem_share, &req); return page; } @@ -301,7 +301,7 @@ int mem_sharing_sharing_resume(struct do mem_event_response_t rsp; /* Get request off the ring */ - mem_event_get_response(&d->mem_share, &rsp); + mem_event_get_response(&d->mem_event->mem_share, &rsp); /* Unpause domain/vcpu */ if( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED ) diff -r d3859e348951 -r d347a8a36d2e xen/arch/x86/mm/p2m.c --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -885,7 +885,7 @@ void p2m_mem_paging_drop_page(struct dom mem_event_request_t req; /* Check that there''s space on the ring for this request */ - if ( mem_event_check_ring(d, &d->mem_paging) == 0) + if ( mem_event_check_ring(d, &d->mem_event->mem_paging) == 0) { /* Send release notification to pager */ memset(&req, 0, sizeof(req)); @@ -893,7 +893,7 @@ void p2m_mem_paging_drop_page(struct dom req.gfn = gfn; req.vcpu_id = v->vcpu_id; - mem_event_put_request(d, &d->mem_paging, &req); + mem_event_put_request(d, &d->mem_event->mem_paging, &req); } } @@ -928,7 +928,7 @@ void p2m_mem_paging_populate(struct doma struct p2m_domain *p2m = p2m_get_hostp2m(d); /* Check that there''s space on the ring for this request */ - if ( mem_event_check_ring(d, &d->mem_paging) ) + if ( mem_event_check_ring(d, &d->mem_event->mem_paging) ) return; memset(&req, 0, sizeof(req)); @@ -959,7 +959,7 @@ void p2m_mem_paging_populate(struct doma else if ( p2mt != p2m_ram_paging_out && p2mt != p2m_ram_paged ) { /* gfn is already on its way back and vcpu is not paused */ - mem_event_put_req_producers(&d->mem_paging); + mem_event_put_req_producers(&d->mem_event->mem_paging); return; } @@ -968,7 +968,7 @@ void p2m_mem_paging_populate(struct doma req.p2mt = p2mt; req.vcpu_id = v->vcpu_id; - mem_event_put_request(d, &d->mem_paging, &req); + mem_event_put_request(d, &d->mem_event->mem_paging, &req); } /** @@ -1048,7 +1048,7 @@ void p2m_mem_paging_resume(struct domain mfn_t mfn; /* Pull the response off the ring */ - mem_event_get_response(&d->mem_paging, &rsp); + mem_event_get_response(&d->mem_event->mem_paging, &rsp); /* Fix p2m entry if the page was not dropped */ if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) ) @@ -1101,7 +1101,7 @@ void p2m_mem_access_check(unsigned long p2m_unlock(p2m); /* Otherwise, check if there is a memory event listener, and send the message along */ - res = mem_event_check_ring(d, &d->mem_access); + res = mem_event_check_ring(d, &d->mem_event->mem_access); if ( res < 0 ) { /* No listener */ @@ -1145,7 +1145,7 @@ void p2m_mem_access_check(unsigned long req.vcpu_id = v->vcpu_id; - mem_event_put_request(d, &d->mem_access, &req); + mem_event_put_request(d, &d->mem_event->mem_access, &req); /* VCPU paused, mem event request sent */ } @@ -1155,7 +1155,7 @@ void p2m_mem_access_resume(struct p2m_do struct domain *d = p2m->domain; mem_event_response_t rsp; - mem_event_get_response(&d->mem_access, &rsp); + mem_event_get_response(&d->mem_event->mem_access, &rsp); /* Unpause domain */ if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED ) diff -r d3859e348951 -r d347a8a36d2e xen/common/domain.c --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -304,6 +304,10 @@ struct domain *domain_create( init_status |= INIT_gnttab; poolid = 0; + + d->mem_event = xzalloc(struct mem_event_per_domain); + if ( !d->mem_event ) + goto fail; } if ( arch_domain_create(d, domcr_flags) != 0 ) @@ -335,6 +339,7 @@ struct domain *domain_create( fail: d->is_dying = DOMDYING_dead; atomic_set(&d->refcnt, DOMAIN_DESTROYED); + xfree(d->mem_event); if ( init_status & INIT_arch ) arch_domain_destroy(d); if ( init_status & INIT_gnttab ) diff -r d3859e348951 -r d347a8a36d2e xen/include/xen/sched.h --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -194,6 +194,16 @@ struct mem_event_domain int xen_port; }; +struct mem_event_per_domain +{ + /* Memory sharing support */ + struct mem_event_domain mem_share; + /* Memory paging support */ + struct mem_event_domain mem_paging; + /* Memory access support */ + struct mem_event_domain mem_access; +}; + struct domain { domid_t domain_id; @@ -318,12 +328,8 @@ struct domain /* Non-migratable and non-restoreable? */ bool_t disable_migrate; - /* Memory sharing support */ - struct mem_event_domain mem_share; - /* Memory paging support */ - struct mem_event_domain mem_paging; - /* Memory access support */ - struct mem_event_domain mem_access; + /* Various mem_events */ + struct mem_event_per_domain *mem_event; /* Currently computed from union of all vcpu cpu-affinity masks. */ nodemask_t node_affinity;
Olaf Hering
2011-Nov-22 21:13 UTC
[PATCH 2 of 3] RFC: mem_event: use wait queue when ring is full
# HG changeset patch # User Olaf Hering <olaf@aepfle.de> # Date 1321996145 -3600 # Node ID de6860cb9205b68d1287482288d1b7b9d0255609 # Parent d347a8a36d2e7951f98a3d22866dce004484d95f RFC: mem_event: use wait queue when ring is full CAUTION: while the patch by itself is supposed to be complete, the added usage of waitqueues will lead to sudden host reboots! This change is based on an idea/patch from Adin Scannell. If the ring is full, put the current vcpu to sleep if it belongs to the target domain. The wakeup happens in the p2m_*_resume functions. A request from another domain will use the existing ->req_producers functionality because sleeping is not possible if the vcpu does not belong to the target domain. This change fixes also a bug in p2m_mem_paging_drop_page(). Up to now a full ring will lead to harmless inconsistency in the pager, v2: - p2m_mem_paging_populate: move vcpu_pause after put_request, otherwise the vcpu will not wake_up after a wait_event because the pause_count was increased twice. Fixes guest hangs. - update free space check in _mem_event_put_request() - simplify mem_event_put_request() Signed-off-by: Olaf Hering <olaf@aepfle.de> diff -r d347a8a36d2e -r de6860cb9205 xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -4099,7 +4099,7 @@ static int hvm_memory_event_traps(long p return 1; rc = mem_event_check_ring(d, &d->mem_event->mem_access); - if ( rc ) + if ( rc < 0 ) return rc; memset(&req, 0, sizeof(req)); diff -r d347a8a36d2e -r de6860cb9205 xen/arch/x86/mm/mem_event.c --- a/xen/arch/x86/mm/mem_event.c +++ b/xen/arch/x86/mm/mem_event.c @@ -23,6 +23,7 @@ #include <asm/domain.h> #include <xen/event.h> +#include <xen/wait.h> #include <asm/p2m.h> #include <asm/mem_event.h> #include <asm/mem_paging.h> @@ -39,6 +40,7 @@ static int mem_event_enable(struct domain *d, xen_domctl_mem_event_op_t *mec, + int mem_event_bit, struct mem_event_domain *med) { int rc; @@ -107,8 +109,12 @@ static int mem_event_enable(struct domai mem_event_ring_lock_init(med); + med->mem_event_bit = mem_event_bit; + + init_waitqueue_head(&med->wq); + /* Wake any VCPUs paused for memory events */ - mem_event_unpause_vcpus(d); + mem_event_unpause_vcpus(d, med); return 0; @@ -124,6 +130,9 @@ static int mem_event_enable(struct domai static int mem_event_disable(struct mem_event_domain *med) { + if (!list_empty(&med->wq.list)) + return -EBUSY; + unmap_domain_page(med->ring_page); med->ring_page = NULL; @@ -133,13 +142,21 @@ static int mem_event_disable(struct mem_ return 0; } -void mem_event_put_request(struct domain *d, struct mem_event_domain *med, mem_event_request_t *req) +static int _mem_event_put_request(struct domain *d, struct mem_event_domain *med, mem_event_request_t *req) { mem_event_front_ring_t *front_ring; + int free_requests; RING_IDX req_prod; mem_event_ring_lock(med); + free_requests = RING_FREE_REQUESTS(&med->front_ring); + /* A request was eventually claimed in mem_event_check_ring() */ + if ((current->domain == d && free_requests <= med->req_producers) || !free_requests) { + mem_event_ring_unlock(med); + return 0; + } + front_ring = &med->front_ring; req_prod = front_ring->req_prod_pvt; @@ -155,6 +172,20 @@ void mem_event_put_request(struct domain mem_event_ring_unlock(med); notify_via_xen_event_channel(d, med->xen_port); + + return 1; +} + +void mem_event_put_request(struct domain *d, struct mem_event_domain *med, mem_event_request_t *req) +{ + /* Go to sleep if request came from guest */ + if (current->domain == d) { + wait_event(med->wq, _mem_event_put_request(d, med, req)); + return; + } + /* Ring was full anyway, unable to sleep in non-guest context */ + if (!_mem_event_put_request(d, med, req)) + printk("Failed to put memreq: d %u t %x f %x gfn %lx\n", d->domain_id, req->type, req->flags, (unsigned long)req->gfn); } void mem_event_get_response(struct mem_event_domain *med, mem_event_response_t *rsp) @@ -178,21 +209,27 @@ void mem_event_get_response(struct mem_e mem_event_ring_unlock(med); } -void mem_event_unpause_vcpus(struct domain *d) +void mem_event_unpause_vcpus(struct domain *d, struct mem_event_domain *med) { struct vcpu *v; for_each_vcpu ( d, v ) - if ( test_and_clear_bit(_VPF_mem_event, &v->pause_flags) ) + if ( test_and_clear_bit(med->mem_event_bit, &v->pause_flags) ) vcpu_wake(v); } -void mem_event_mark_and_pause(struct vcpu *v) +void mem_event_mark_and_pause(struct vcpu *v, struct mem_event_domain *med) { - set_bit(_VPF_mem_event, &v->pause_flags); + set_bit(med->mem_event_bit, &v->pause_flags); vcpu_sleep_nosync(v); } +/** + * mem_event_put_req_producers - Release a claimed slot + * @med: mem_event ring + * + * mem_event_put_req_producers() releases a claimed slot in the mem_event ring. + */ void mem_event_put_req_producers(struct mem_event_domain *med) { mem_event_ring_lock(med); @@ -200,9 +237,26 @@ void mem_event_put_req_producers(struct mem_event_ring_unlock(med); } +/** + * mem_event_check_ring - Check state of a mem_event ring + * @d: guest domain + * @med: mem_event ring + * + * Return codes: < 0: the ring is not yet configured + * 0: the ring has some room + * > 0: the ring is full + * + * mem_event_check_ring() checks the state of the given mem_event ring. + * If the current vcpu belongs to the guest domain, the function assumes that + * mem_event_put_request() will sleep until the ring has room again. + * + * If the current vcpu does not belong to the target domain the caller must try + * again until there is room. A slot is claimed and the caller can place a + * request. If the caller does not need to send a request, the claimed slot has + * to be released with mem_event_put_req_producers(). + */ int mem_event_check_ring(struct domain *d, struct mem_event_domain *med) { - struct vcpu *curr = current; int free_requests; int ring_full = 1; @@ -218,8 +272,8 @@ int mem_event_check_ring(struct domain * ring_full = 0; } - if ( ring_full && (curr->domain == d) ) - mem_event_mark_and_pause(curr); + if ( current->domain == d ) + ring_full = 0; mem_event_ring_unlock(med); @@ -287,7 +341,7 @@ int mem_event_domctl(struct domain *d, x if ( p2m->pod.entry_count ) break; - rc = mem_event_enable(d, mec, med); + rc = mem_event_enable(d, mec, _VPF_me_mem_paging, med); } break; @@ -326,7 +380,7 @@ int mem_event_domctl(struct domain *d, x if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ) break; - rc = mem_event_enable(d, mec, med); + rc = mem_event_enable(d, mec, _VPF_me_mem_access, med); } break; diff -r d347a8a36d2e -r de6860cb9205 xen/arch/x86/mm/mem_sharing.c --- a/xen/arch/x86/mm/mem_sharing.c +++ b/xen/arch/x86/mm/mem_sharing.c @@ -253,19 +253,11 @@ static void mem_sharing_audit(void) #endif -static struct page_info* mem_sharing_alloc_page(struct domain *d, - unsigned long gfn) +static void mem_sharing_notify_helper(struct domain *d, unsigned long gfn) { - struct page_info* page; struct vcpu *v = current; mem_event_request_t req; - page = alloc_domheap_page(d, 0); - if(page != NULL) return page; - - memset(&req, 0, sizeof(req)); - req.type = MEM_EVENT_TYPE_SHARED; - if ( v->domain != d ) { /* XXX This path needs some attention. For now, just fail foreign @@ -275,20 +267,20 @@ static struct page_info* mem_sharing_all gdprintk(XENLOG_ERR, "Failed alloc on unshare path for foreign (%d) lookup\n", d->domain_id); - return page; + return; } - vcpu_pause_nosync(v); - req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED; + if (mem_event_check_ring(d, &d->mem_event->mem_share) < 0) + return; - if(mem_event_check_ring(d, &d->mem_event->mem_share)) return page; - + memset(&req, 0, sizeof(req)); + req.type = MEM_EVENT_TYPE_SHARED; + req.flags = MEM_EVENT_FLAG_VCPU_PAUSED; req.gfn = gfn; req.p2mt = p2m_ram_shared; req.vcpu_id = v->vcpu_id; mem_event_put_request(d, &d->mem_event->mem_share, &req); - - return page; + vcpu_pause_nosync(v); } unsigned int mem_sharing_get_nr_saved_mfns(void) @@ -653,7 +645,7 @@ gfn_found: if(ret == 0) goto private_page_found; old_page = page; - page = mem_sharing_alloc_page(d, gfn); + page = alloc_domheap_page(d, 0); if(!page) { /* We''ve failed to obtain memory for private page. Need to re-add the @@ -661,6 +653,7 @@ gfn_found: list_add(&gfn_info->list, &hash_entry->gfns); put_gfn(d, gfn); shr_unlock(); + mem_sharing_notify_helper(d, gfn); return -ENOMEM; } diff -r d347a8a36d2e -r de6860cb9205 xen/arch/x86/mm/p2m.c --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -884,17 +884,13 @@ void p2m_mem_paging_drop_page(struct dom struct vcpu *v = current; mem_event_request_t req; - /* Check that there''s space on the ring for this request */ - if ( mem_event_check_ring(d, &d->mem_event->mem_paging) == 0) - { - /* Send release notification to pager */ - memset(&req, 0, sizeof(req)); - req.flags |= MEM_EVENT_FLAG_DROP_PAGE; - req.gfn = gfn; - req.vcpu_id = v->vcpu_id; + /* Send release notification to pager */ + memset(&req, 0, sizeof(req)); + req.flags = MEM_EVENT_FLAG_DROP_PAGE; + req.gfn = gfn; + req.vcpu_id = v->vcpu_id; - mem_event_put_request(d, &d->mem_event->mem_paging, &req); - } + mem_event_put_request(d, &d->mem_event->mem_paging, &req); } /** @@ -952,7 +948,6 @@ void p2m_mem_paging_populate(struct doma /* Pause domain if request came from guest and gfn has paging type */ if ( p2m_is_paging(p2mt) && v->domain == d ) { - vcpu_pause_nosync(v); req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED; } /* No need to inform pager if the gfn is not in the page-out path */ @@ -969,6 +964,10 @@ void p2m_mem_paging_populate(struct doma req.vcpu_id = v->vcpu_id; mem_event_put_request(d, &d->mem_event->mem_paging, &req); + + /* Pause guest after put_request to allow wake_up after wait_event */ + if ( req.flags & MEM_EVENT_FLAG_VCPU_PAUSED ) + vcpu_pause_nosync(v); } /** @@ -1071,8 +1070,8 @@ void p2m_mem_paging_resume(struct domain if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED ) vcpu_unpause(d->vcpu[rsp.vcpu_id]); - /* Unpause any domains that were paused because the ring was full */ - mem_event_unpause_vcpus(d); + /* Unpause all vcpus that were paused because the ring was full */ + wake_up(&d->mem_event->mem_paging.wq); } void p2m_mem_access_check(unsigned long gpa, bool_t gla_valid, unsigned long gla, @@ -1111,7 +1110,7 @@ void p2m_mem_access_check(unsigned long "Memory access permissions failure, no mem_event listener: pausing VCPU %d, dom %d\n", v->vcpu_id, d->domain_id); - mem_event_mark_and_pause(v); + mem_event_mark_and_pause(v, &d->mem_event->mem_access); } else { @@ -1131,8 +1130,7 @@ void p2m_mem_access_check(unsigned long req.reason = MEM_EVENT_REASON_VIOLATION; /* Pause the current VCPU unconditionally */ - vcpu_pause_nosync(v); - req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED; + req.flags = MEM_EVENT_FLAG_VCPU_PAUSED; /* Send request to mem event */ req.gfn = gfn; @@ -1148,6 +1146,7 @@ void p2m_mem_access_check(unsigned long mem_event_put_request(d, &d->mem_event->mem_access, &req); /* VCPU paused, mem event request sent */ + vcpu_pause_nosync(v); } void p2m_mem_access_resume(struct p2m_domain *p2m) @@ -1161,9 +1160,11 @@ void p2m_mem_access_resume(struct p2m_do if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED ) vcpu_unpause(d->vcpu[rsp.vcpu_id]); - /* Unpause any domains that were paused because the ring was full or no listener - * was available */ - mem_event_unpause_vcpus(d); + /* Wakeup all vcpus waiting because the ring was full */ + wake_up(&d->mem_event->mem_access.wq); + + /* Unpause all vcpus that were paused because no listener was available */ + mem_event_unpause_vcpus(d, &d->mem_event->mem_access); } diff -r d347a8a36d2e -r de6860cb9205 xen/include/asm-x86/mem_event.h --- a/xen/include/asm-x86/mem_event.h +++ b/xen/include/asm-x86/mem_event.h @@ -25,12 +25,12 @@ #define __MEM_EVENT_H__ /* Pauses VCPU while marking pause flag for mem event */ -void mem_event_mark_and_pause(struct vcpu *v); +void mem_event_mark_and_pause(struct vcpu *v, struct mem_event_domain *med); int mem_event_check_ring(struct domain *d, struct mem_event_domain *med); void mem_event_put_req_producers(struct mem_event_domain *med); void mem_event_put_request(struct domain *d, struct mem_event_domain *med, mem_event_request_t *req); void mem_event_get_response(struct mem_event_domain *med, mem_event_response_t *rsp); -void mem_event_unpause_vcpus(struct domain *d); +void mem_event_unpause_vcpus(struct domain *d, struct mem_event_domain *med); int mem_event_domctl(struct domain *d, xen_domctl_mem_event_op_t *mec, XEN_GUEST_HANDLE(void) u_domctl); diff -r d347a8a36d2e -r de6860cb9205 xen/include/xen/sched.h --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -14,6 +14,7 @@ #include <xen/nodemask.h> #include <xen/radix-tree.h> #include <xen/multicall.h> +#include <xen/wait.h> #include <public/xen.h> #include <public/domctl.h> #include <public/sysctl.h> @@ -192,6 +193,10 @@ struct mem_event_domain mem_event_front_ring_t front_ring; /* event channel port (vcpu0 only) */ int xen_port; + /* mem_event bit for vcpu->pause_flags */ + int mem_event_bit; + /* list of vcpus waiting for room in the ring */ + struct waitqueue_head wq; }; struct mem_event_per_domain @@ -615,9 +620,12 @@ static inline struct domain *next_domain /* VCPU affinity has changed: migrating to a new CPU. */ #define _VPF_migrating 3 #define VPF_migrating (1UL<<_VPF_migrating) - /* VCPU is blocked on memory-event ring. */ -#define _VPF_mem_event 4 -#define VPF_mem_event (1UL<<_VPF_mem_event) + /* VCPU is blocked on mem_paging ring. */ +#define _VPF_me_mem_paging 4 +#define VPF_me_mem_paging (1UL<<_VPF_me_mem_paging) + /* VCPU is blocked on mem_access ring. */ +#define _VPF_me_mem_access 5 +#define VPF_me_mem_access (1UL<<_VPF_me_mem_access) static inline int vcpu_runnable(struct vcpu *v) {
# HG changeset patch # User Olaf Hering <olaf@aepfle.de> # Date 1321996199 -3600 # Node ID 9d63ecd3969bb7a2e39841f6c859b4c23f750642 # Parent de6860cb9205b68d1287482288d1b7b9d0255609 RFC: xenpaging: use waitqueue in ept_get Signed-off-by: Olaf Hering <olaf@aepfle.de> diff -r de6860cb9205 -r 9d63ecd3969b xen/arch/x86/mm/p2m-ept.c --- a/xen/arch/x86/mm/p2m-ept.c +++ b/xen/arch/x86/mm/p2m-ept.c @@ -505,7 +505,7 @@ out: } /* Read ept p2m entries */ -static mfn_t ept_get_entry(struct p2m_domain *p2m, +static unsigned long _ept_get_entry(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t, p2m_access_t* a, p2m_query_t q, unsigned int *page_order) { @@ -516,7 +516,7 @@ static mfn_t ept_get_entry(struct p2m_do u32 index; int i; int ret = 0; - mfn_t mfn = _mfn(INVALID_MFN); + unsigned long mfn = INVALID_MFN; *t = p2m_mmio_dm; *a = p2m_access_n; @@ -582,17 +582,14 @@ static mfn_t ept_get_entry(struct p2m_do *t = ept_entry->sa_p2mt; *a = ept_entry->access; - mfn = _mfn(ept_entry->mfn); + mfn = ept_entry->mfn; if ( i ) { /* * We may meet super pages, and to split into 4k pages * to emulate p2m table */ - unsigned long split_mfn = mfn_x(mfn) + - (gfn_remainder & - ((1 << (i * EPT_TABLE_ORDER)) - 1)); - mfn = _mfn(split_mfn); + mfn += (gfn_remainder & ((1 << (i * EPT_TABLE_ORDER)) - 1)); } if ( page_order ) @@ -604,6 +601,41 @@ out: return mfn; } +static int +ept_get_entry_wait(unsigned long *mfn, int *populated, + struct p2m_domain *p2m, unsigned long gfn, + p2m_type_t *t, p2m_access_t *a, p2m_query_t q, + unsigned int *page_order) +{ + int ret = 1; + *mfn = _ept_get_entry(p2m, gfn, t, a, q, page_order); + + /* No further action in case of query */ + if ( q == p2m_query ) + goto done; + + /* Populate the page once in case of guest access, then go to sleep */ + if ( p2m_is_paging(*t) && current->domain == p2m->domain ) { + if ( *populated == 0 ) { + *populated = 1; + p2m_mem_paging_populate(p2m->domain, gfn); + } + ret = 0; + } +done: + return ret; +} +static mfn_t +ept_get_entry(struct p2m_domain *p2m, unsigned long gfn, + p2m_type_t *t, p2m_access_t *a, p2m_query_t q, + unsigned int *page_order) +{ + unsigned long mfn; + int populated = 0; + wait_event(p2m->wq, ept_get_entry_wait(&mfn, &populated, p2m, gfn, t, a, q, page_order)); + return _mfn(mfn); +} + /* WARNING: Only caller doesn''t care about PoD pages. So this function will * always return 0 for PoD pages, not populate them. If that becomes necessary, * pass a p2m_query_t type along to distinguish. */ diff -r de6860cb9205 -r 9d63ecd3969b xen/arch/x86/mm/p2m.c --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -76,6 +76,7 @@ static void p2m_initialise(struct domain INIT_PAGE_LIST_HEAD(&p2m->pages); INIT_PAGE_LIST_HEAD(&p2m->pod.super); INIT_PAGE_LIST_HEAD(&p2m->pod.single); + init_waitqueue_head(&p2m->wq); p2m->domain = d; p2m->default_access = p2m_access_rwx; @@ -1072,6 +1073,8 @@ void p2m_mem_paging_resume(struct domain /* Unpause all vcpus that were paused because the ring was full */ wake_up(&d->mem_event->mem_paging.wq); + /* Unpause all vcpus that were paused because the gfn was paged */ + wake_up(&p2m->wq); } void p2m_mem_access_check(unsigned long gpa, bool_t gla_valid, unsigned long gla, diff -r de6860cb9205 -r 9d63ecd3969b xen/include/asm-x86/p2m.h --- a/xen/include/asm-x86/p2m.h +++ b/xen/include/asm-x86/p2m.h @@ -286,6 +286,7 @@ struct p2m_domain { unsigned reclaim_single; /* Last gpfn of a scan */ unsigned max_guest; /* gpfn of max guest demand-populate */ } pod; + struct waitqueue_head wq; }; /* get host p2m table */
Jan Beulich
2011-Nov-23 08:41 UTC
Re: [PATCH 1 of 3] mem_event: move mem_event_domain out of struct domain
>>> On 22.11.11 at 22:13, Olaf Hering <olaf@aepfle.de> wrote: > # HG changeset patch > # User Olaf Hering <olaf@aepfle.de> > # Date 1321996135 -3600 > # Node ID d347a8a36d2e7951f98a3d22866dce004484d95f > # Parent d3859e348951cde6b211c5afb610ac1f12a909ec > mem_event: move mem_event_domain out of struct domain > > An upcoming change increases the size of mem_event_domain. The result is a > build failure because struct domain gets larger than a page. Allocate the > room > for the three mem_event_domain members at runtime.This looks like a good general cleanup thing to me. One comment below.> Signed-off-by: Olaf Hering <olaf@aepfle.de> > > diff -r d3859e348951 -r d347a8a36d2e xen/arch/x86/hvm/hvm.c > --- a/xen/arch/x86/hvm/hvm.c > +++ b/xen/arch/x86/hvm/hvm.c > @@ -4098,7 +4098,7 @@ static int hvm_memory_event_traps(long p > if ( (p & HVMPME_onchangeonly) && (value == old) ) > return 1; > > - rc = mem_event_check_ring(d, &d->mem_access); > + rc = mem_event_check_ring(d, &d->mem_event->mem_access); > if ( rc ) > return rc; > > @@ -4121,7 +4121,7 @@ static int hvm_memory_event_traps(long p > req.gla_valid = 1; > } > > - mem_event_put_request(d, &d->mem_access, &req); > + mem_event_put_request(d, &d->mem_event->mem_access, &req); > > return 1; > } > diff -r d3859e348951 -r d347a8a36d2e xen/arch/x86/mm/mem_event.c > --- a/xen/arch/x86/mm/mem_event.c > +++ b/xen/arch/x86/mm/mem_event.c > @@ -265,7 +265,7 @@ int mem_event_domctl(struct domain *d, x > { > case XEN_DOMCTL_MEM_EVENT_OP_PAGING: > { > - struct mem_event_domain *med = &d->mem_paging; > + struct mem_event_domain *med = &d->mem_event->mem_paging; > rc = -EINVAL; > > switch( mec->op ) > @@ -310,7 +310,7 @@ int mem_event_domctl(struct domain *d, x > > case XEN_DOMCTL_MEM_EVENT_OP_ACCESS: > { > - struct mem_event_domain *med = &d->mem_access; > + struct mem_event_domain *med = &d->mem_event->mem_access; > rc = -EINVAL; > > switch( mec->op ) > @@ -333,7 +333,7 @@ int mem_event_domctl(struct domain *d, x > case XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE: > { > if ( med->ring_page ) > - rc = mem_event_disable(&d->mem_access); > + rc = mem_event_disable(med); > } > break; > > diff -r d3859e348951 -r d347a8a36d2e xen/arch/x86/mm/mem_sharing.c > --- a/xen/arch/x86/mm/mem_sharing.c > +++ b/xen/arch/x86/mm/mem_sharing.c > @@ -281,12 +281,12 @@ static struct page_info* mem_sharing_all > vcpu_pause_nosync(v); > req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED; > > - if(mem_event_check_ring(d, &d->mem_share)) return page; > + if(mem_event_check_ring(d, &d->mem_event->mem_share)) return page; > > req.gfn = gfn; > req.p2mt = p2m_ram_shared; > req.vcpu_id = v->vcpu_id; > - mem_event_put_request(d, &d->mem_share, &req); > + mem_event_put_request(d, &d->mem_event->mem_share, &req); > > return page; > } > @@ -301,7 +301,7 @@ int mem_sharing_sharing_resume(struct do > mem_event_response_t rsp; > > /* Get request off the ring */ > - mem_event_get_response(&d->mem_share, &rsp); > + mem_event_get_response(&d->mem_event->mem_share, &rsp); > > /* Unpause domain/vcpu */ > if( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED ) > diff -r d3859e348951 -r d347a8a36d2e xen/arch/x86/mm/p2m.c > --- a/xen/arch/x86/mm/p2m.c > +++ b/xen/arch/x86/mm/p2m.c > @@ -885,7 +885,7 @@ void p2m_mem_paging_drop_page(struct dom > mem_event_request_t req; > > /* Check that there''s space on the ring for this request */ > - if ( mem_event_check_ring(d, &d->mem_paging) == 0) > + if ( mem_event_check_ring(d, &d->mem_event->mem_paging) == 0) > { > /* Send release notification to pager */ > memset(&req, 0, sizeof(req)); > @@ -893,7 +893,7 @@ void p2m_mem_paging_drop_page(struct dom > req.gfn = gfn; > req.vcpu_id = v->vcpu_id; > > - mem_event_put_request(d, &d->mem_paging, &req); > + mem_event_put_request(d, &d->mem_event->mem_paging, &req); > } > } > > @@ -928,7 +928,7 @@ void p2m_mem_paging_populate(struct doma > struct p2m_domain *p2m = p2m_get_hostp2m(d); > > /* Check that there''s space on the ring for this request */ > - if ( mem_event_check_ring(d, &d->mem_paging) ) > + if ( mem_event_check_ring(d, &d->mem_event->mem_paging) ) > return; > > memset(&req, 0, sizeof(req)); > @@ -959,7 +959,7 @@ void p2m_mem_paging_populate(struct doma > else if ( p2mt != p2m_ram_paging_out && p2mt != p2m_ram_paged ) > { > /* gfn is already on its way back and vcpu is not paused */ > - mem_event_put_req_producers(&d->mem_paging); > + mem_event_put_req_producers(&d->mem_event->mem_paging); > return; > } > > @@ -968,7 +968,7 @@ void p2m_mem_paging_populate(struct doma > req.p2mt = p2mt; > req.vcpu_id = v->vcpu_id; > > - mem_event_put_request(d, &d->mem_paging, &req); > + mem_event_put_request(d, &d->mem_event->mem_paging, &req); > } > > /** > @@ -1048,7 +1048,7 @@ void p2m_mem_paging_resume(struct domain > mfn_t mfn; > > /* Pull the response off the ring */ > - mem_event_get_response(&d->mem_paging, &rsp); > + mem_event_get_response(&d->mem_event->mem_paging, &rsp); > > /* Fix p2m entry if the page was not dropped */ > if ( !(rsp.flags & MEM_EVENT_FLAG_DROP_PAGE) ) > @@ -1101,7 +1101,7 @@ void p2m_mem_access_check(unsigned long > p2m_unlock(p2m); > > /* Otherwise, check if there is a memory event listener, and send the > message along */ > - res = mem_event_check_ring(d, &d->mem_access); > + res = mem_event_check_ring(d, &d->mem_event->mem_access); > if ( res < 0 ) > { > /* No listener */ > @@ -1145,7 +1145,7 @@ void p2m_mem_access_check(unsigned long > > req.vcpu_id = v->vcpu_id; > > - mem_event_put_request(d, &d->mem_access, &req); > + mem_event_put_request(d, &d->mem_event->mem_access, &req); > > /* VCPU paused, mem event request sent */ > } > @@ -1155,7 +1155,7 @@ void p2m_mem_access_resume(struct p2m_do > struct domain *d = p2m->domain; > mem_event_response_t rsp; > > - mem_event_get_response(&d->mem_access, &rsp); > + mem_event_get_response(&d->mem_event->mem_access, &rsp); > > /* Unpause domain */ > if ( rsp.flags & MEM_EVENT_FLAG_VCPU_PAUSED ) > diff -r d3859e348951 -r d347a8a36d2e xen/common/domain.c > --- a/xen/common/domain.c > +++ b/xen/common/domain.c > @@ -304,6 +304,10 @@ struct domain *domain_create( > init_status |= INIT_gnttab; > > poolid = 0; > + > + d->mem_event = xzalloc(struct mem_event_per_domain); > + if ( !d->mem_event ) > + goto fail; > } > > if ( arch_domain_create(d, domcr_flags) != 0 ) > @@ -335,6 +339,7 @@ struct domain *domain_create( > fail: > d->is_dying = DOMDYING_dead; > atomic_set(&d->refcnt, DOMAIN_DESTROYED); > + xfree(d->mem_event); > if ( init_status & INIT_arch ) > arch_domain_destroy(d); > if ( init_status & INIT_gnttab ) > diff -r d3859e348951 -r d347a8a36d2e xen/include/xen/sched.h > --- a/xen/include/xen/sched.h > +++ b/xen/include/xen/sched.h > @@ -194,6 +194,16 @@ struct mem_event_domain > int xen_port; > }; > > +struct mem_event_per_domain > +{ > + /* Memory sharing support */ > + struct mem_event_domain mem_share; > + /* Memory paging support */ > + struct mem_event_domain mem_paging; > + /* Memory access support */ > + struct mem_event_domain mem_access;Could we drop the mem_ prefixes here? Reduces typing as well as line wrapping pressure. Jan> +}; > + > struct domain > { > domid_t domain_id; > @@ -318,12 +328,8 @@ struct domain > /* Non-migratable and non-restoreable? */ > bool_t disable_migrate; > > - /* Memory sharing support */ > - struct mem_event_domain mem_share; > - /* Memory paging support */ > - struct mem_event_domain mem_paging; > - /* Memory access support */ > - struct mem_event_domain mem_access; > + /* Various mem_events */ > + struct mem_event_per_domain *mem_event; > > /* Currently computed from union of all vcpu cpu-affinity masks. */ > nodemask_t node_affinity; > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xensource.com > http://lists.xensource.com/xen-devel
Jan Beulich
2011-Nov-23 08:49 UTC
Re: [PATCH 2 of 3] RFC: mem_event: use wait queue when ring is full
>>> On 22.11.11 at 22:13, Olaf Hering <olaf@aepfle.de> wrote: > --- a/xen/include/xen/sched.h > +++ b/xen/include/xen/sched.h > @@ -14,6 +14,7 @@ > #include <xen/nodemask.h> > #include <xen/radix-tree.h> > #include <xen/multicall.h> > +#include <xen/wait.h> > #include <public/xen.h> > #include <public/domctl.h> > #include <public/sysctl.h> > @@ -192,6 +193,10 @@ struct mem_event_domain > mem_event_front_ring_t front_ring; > /* event channel port (vcpu0 only) */ > int xen_port; > + /* mem_event bit for vcpu->pause_flags */ > + int mem_event_bit;Perhaps pause_bit would be a better name here? Or at least, as for the first patch, the mem_ prefix should go away (or really the mem_event_ one, but that would just leave "bit", which is how I got to the above proposal).> + /* list of vcpus waiting for room in the ring */ > + struct waitqueue_head wq; > }; > > struct mem_event_per_domain > @@ -615,9 +620,12 @@ static inline struct domain *next_domain > /* VCPU affinity has changed: migrating to a new CPU. */ > #define _VPF_migrating 3 > #define VPF_migrating (1UL<<_VPF_migrating) > - /* VCPU is blocked on memory-event ring. */ > -#define _VPF_mem_event 4 > -#define VPF_mem_event (1UL<<_VPF_mem_event) > + /* VCPU is blocked on mem_paging ring. */ > +#define _VPF_me_mem_paging 4 > +#define VPF_me_mem_paging (1UL<<_VPF_me_mem_paging) > + /* VCPU is blocked on mem_access ring. */ > +#define _VPF_me_mem_access 5 > +#define VPF_me_mem_access (1UL<<_VPF_me_mem_access)Same here - the mem_ seems superfluous. Jan> > static inline int vcpu_runnable(struct vcpu *v) > { > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xensource.com > http://lists.xensource.com/xen-devel
Jan Beulich
2011-Nov-23 08:54 UTC
Re: [PATCH 3 of 3] RFC: xenpaging: use waitqueue in ept_get
>>> On 22.11.11 at 22:13, Olaf Hering <olaf@aepfle.de> wrote: > # HG changeset patch > # User Olaf Hering <olaf@aepfle.de> > # Date 1321996199 -3600 > # Node ID 9d63ecd3969bb7a2e39841f6c859b4c23f750642 > # Parent de6860cb9205b68d1287482288d1b7b9d0255609 > RFC: xenpaging: use waitqueue in ept_get > > Signed-off-by: Olaf Hering <olaf@aepfle.de> > > diff -r de6860cb9205 -r 9d63ecd3969b xen/arch/x86/mm/p2m-ept.c > --- a/xen/arch/x86/mm/p2m-ept.c > +++ b/xen/arch/x86/mm/p2m-ept.c > @@ -505,7 +505,7 @@ out: > } > > /* Read ept p2m entries */ > -static mfn_t ept_get_entry(struct p2m_domain *p2m, > +static unsigned long _ept_get_entry(struct p2m_domain *p2m,Looking at the rest of the patch I can''t see why the type change here and below is necessary. It is my understanding that the wrapped types exist to reduce the chance of mistakes, so they should be kept whenever possible. Jan> unsigned long gfn, p2m_type_t *t, p2m_access_t* a, > p2m_query_t q, unsigned int *page_order) > { > @@ -516,7 +516,7 @@ static mfn_t ept_get_entry(struct p2m_do > u32 index; > int i; > int ret = 0; > - mfn_t mfn = _mfn(INVALID_MFN); > + unsigned long mfn = INVALID_MFN; > > *t = p2m_mmio_dm; > *a = p2m_access_n; > @@ -582,17 +582,14 @@ static mfn_t ept_get_entry(struct p2m_do > *t = ept_entry->sa_p2mt; > *a = ept_entry->access; > > - mfn = _mfn(ept_entry->mfn); > + mfn = ept_entry->mfn; > if ( i ) > { > /* > * We may meet super pages, and to split into 4k pages > * to emulate p2m table > */ > - unsigned long split_mfn = mfn_x(mfn) + > - (gfn_remainder & > - ((1 << (i * EPT_TABLE_ORDER)) - 1)); > - mfn = _mfn(split_mfn); > + mfn += (gfn_remainder & ((1 << (i * EPT_TABLE_ORDER)) - 1)); > } > > if ( page_order ) > @@ -604,6 +601,41 @@ out: > return mfn; > } > > +static int > +ept_get_entry_wait(unsigned long *mfn, int *populated, > + struct p2m_domain *p2m, unsigned long gfn, > + p2m_type_t *t, p2m_access_t *a, p2m_query_t q, > + unsigned int *page_order) > +{ > + int ret = 1; > + *mfn = _ept_get_entry(p2m, gfn, t, a, q, page_order); > + > + /* No further action in case of query */ > + if ( q == p2m_query ) > + goto done; > + > + /* Populate the page once in case of guest access, then go to sleep */ > + if ( p2m_is_paging(*t) && current->domain == p2m->domain ) { > + if ( *populated == 0 ) { > + *populated = 1; > + p2m_mem_paging_populate(p2m->domain, gfn); > + } > + ret = 0; > + } > +done: > + return ret; > +} > +static mfn_t > +ept_get_entry(struct p2m_domain *p2m, unsigned long gfn, > + p2m_type_t *t, p2m_access_t *a, p2m_query_t q, > + unsigned int *page_order) > +{ > + unsigned long mfn; > + int populated = 0; > + wait_event(p2m->wq, ept_get_entry_wait(&mfn, &populated, p2m, gfn, t, a, q, > page_order)); > + return _mfn(mfn); > +} > + > /* WARNING: Only caller doesn''t care about PoD pages. So this function > will > * always return 0 for PoD pages, not populate them. If that becomes > necessary, > * pass a p2m_query_t type along to distinguish. */ > diff -r de6860cb9205 -r 9d63ecd3969b xen/arch/x86/mm/p2m.c > --- a/xen/arch/x86/mm/p2m.c > +++ b/xen/arch/x86/mm/p2m.c > @@ -76,6 +76,7 @@ static void p2m_initialise(struct domain > INIT_PAGE_LIST_HEAD(&p2m->pages); > INIT_PAGE_LIST_HEAD(&p2m->pod.super); > INIT_PAGE_LIST_HEAD(&p2m->pod.single); > + init_waitqueue_head(&p2m->wq); > > p2m->domain = d; > p2m->default_access = p2m_access_rwx; > @@ -1072,6 +1073,8 @@ void p2m_mem_paging_resume(struct domain > > /* Unpause all vcpus that were paused because the ring was full */ > wake_up(&d->mem_event->mem_paging.wq); > + /* Unpause all vcpus that were paused because the gfn was paged */ > + wake_up(&p2m->wq); > } > > void p2m_mem_access_check(unsigned long gpa, bool_t gla_valid, unsigned > long gla, > diff -r de6860cb9205 -r 9d63ecd3969b xen/include/asm-x86/p2m.h > --- a/xen/include/asm-x86/p2m.h > +++ b/xen/include/asm-x86/p2m.h > @@ -286,6 +286,7 @@ struct p2m_domain { > unsigned reclaim_single; /* Last gpfn of a scan */ > unsigned max_guest; /* gpfn of max guest demand-populate > */ > } pod; > + struct waitqueue_head wq; > }; > > /* get host p2m table */ > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xensource.com > http://lists.xensource.com/xen-devel
Keir Fraser
2011-Nov-23 09:01 UTC
Re: [PATCH 2 of 3] RFC: mem_event: use wait queue when ring is full
On 23/11/2011 08:49, "Jan Beulich" <JBeulich@suse.com> wrote:>>>> On 22.11.11 at 22:13, Olaf Hering <olaf@aepfle.de> wrote: >> --- a/xen/include/xen/sched.h >> +++ b/xen/include/xen/sched.h >> @@ -14,6 +14,7 @@ >> #include <xen/nodemask.h> >> #include <xen/radix-tree.h> >> #include <xen/multicall.h> >> +#include <xen/wait.h> >> #include <public/xen.h> >> #include <public/domctl.h> >> #include <public/sysctl.h> >> @@ -192,6 +193,10 @@ struct mem_event_domain >> mem_event_front_ring_t front_ring; >> /* event channel port (vcpu0 only) */ >> int xen_port; >> + /* mem_event bit for vcpu->pause_flags */ >> + int mem_event_bit; > > Perhaps pause_bit would be a better name here? Or at least, as for > the first patch, the mem_ prefix should go away (or really the > mem_event_ one, but that would just leave "bit", which is how I got > to the above proposal).Yes, mem_event_bit is a lazy name here. Doesn''t really describe what the bit is actually for. It''s obviously mem_event related because of the struct it is a member of.>> + /* list of vcpus waiting for room in the ring */ >> + struct waitqueue_head wq; >> }; >> >> struct mem_event_per_domain >> @@ -615,9 +620,12 @@ static inline struct domain *next_domain >> /* VCPU affinity has changed: migrating to a new CPU. */ >> #define _VPF_migrating 3 >> #define VPF_migrating (1UL<<_VPF_migrating) >> - /* VCPU is blocked on memory-event ring. */ >> -#define _VPF_mem_event 4 >> -#define VPF_mem_event (1UL<<_VPF_mem_event) >> + /* VCPU is blocked on mem_paging ring. */ >> +#define _VPF_me_mem_paging 4 >> +#define VPF_me_mem_paging (1UL<<_VPF_me_mem_paging) >> + /* VCPU is blocked on mem_access ring. */ >> +#define _VPF_me_mem_access 5 >> +#define VPF_me_mem_access (1UL<<_VPF_me_mem_access) > > Same here - the mem_ seems superfluous.Mem_event-related flags in a more general grouping do require a mem_ prefix imo. The names need to stand on their own and still be descriptive. -- Keir> Jan > >> >> static inline int vcpu_runnable(struct vcpu *v) >> { >> >> _______________________________________________ >> Xen-devel mailing list >> Xen-devel@lists.xensource.com >> http://lists.xensource.com/xen-devel > > > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xensource.com > http://lists.xensource.com/xen-devel
Jan Beulich
2011-Nov-23 09:44 UTC
Re: [PATCH 2 of 3] RFC: mem_event: use wait queue when ring is full
>>> On 23.11.11 at 10:01, Keir Fraser <keir@xen.org> wrote: > On 23/11/2011 08:49, "Jan Beulich" <JBeulich@suse.com> wrote: > >>>>> On 22.11.11 at 22:13, Olaf Hering <olaf@aepfle.de> wrote: >>> @@ -615,9 +620,12 @@ static inline struct domain *next_domain >>> /* VCPU affinity has changed: migrating to a new CPU. */ >>> #define _VPF_migrating 3 >>> #define VPF_migrating (1UL<<_VPF_migrating) >>> - /* VCPU is blocked on memory-event ring. */ >>> -#define _VPF_mem_event 4 >>> -#define VPF_mem_event (1UL<<_VPF_mem_event) >>> + /* VCPU is blocked on mem_paging ring. */ >>> +#define _VPF_me_mem_paging 4 >>> +#define VPF_me_mem_paging (1UL<<_VPF_me_mem_paging) >>> + /* VCPU is blocked on mem_access ring. */ >>> +#define _VPF_me_mem_access 5 >>> +#define VPF_me_mem_access (1UL<<_VPF_me_mem_access) >> >> Same here - the mem_ seems superfluous. > > Mem_event-related flags in a more general grouping do require a mem_ prefix > imo. The names need to stand on their own and still be descriptive.But me_mem_ is still bogus - I thought the me_ stands for "mem event", and then the subsequent mem_ is unnecessary. Jan
Keir Fraser
2011-Nov-23 09:49 UTC
Re: [PATCH 2 of 3] RFC: mem_event: use wait queue when ring is full
On 23/11/2011 09:44, "Jan Beulich" <JBeulich@suse.com> wrote:>>>> + /* VCPU is blocked on mem_paging ring. */ >>>> +#define _VPF_me_mem_paging 4 >>>> +#define VPF_me_mem_paging (1UL<<_VPF_me_mem_paging) >>>> + /* VCPU is blocked on mem_access ring. */ >>>> +#define _VPF_me_mem_access 5 >>>> +#define VPF_me_mem_access (1UL<<_VPF_me_mem_access) >>> >>> Same here - the mem_ seems superfluous. >> >> Mem_event-related flags in a more general grouping do require a mem_ prefix >> imo. The names need to stand on their own and still be descriptive. > > But me_mem_ is still bogus - I thought the me_ stands for "mem event", > and then the subsequent mem_ is unnecessary.I''d get rid of the me_ rather than the mem_. The me_ is too short to be meaningful really. -- Keir