Olaf Hering
2011-Sep-05 09:18 UTC
[Xen-devel] [PATCH] mem_event: add ref counting for free requestslots
# HG changeset patch # User Olaf Hering <olaf@aepfle.de> # Date 1315214140 -7200 # Node ID b2c8dacb2dc08bd93e4926eb0c2c7823d40e2174 # Parent f1349a968a5ac5577d67ad4a3f3490c580dbe264 mem_event: add ref counting for free requestslots If mem_event_check_ring() is called by many vcpus at the same time before any of them called also mem_event_put_request(), all of the callers must assume there are enough free slots available in the ring. Record the number of request producers in mem_event_check_ring() to keep track of available free slots. Add a new mem_event_put_req_producers() function to release a request attempt made in mem_event_check_ring(). Its required for p2m_mem_paging_populate() because that function can only modify the p2m type if there are free request slots. But in some cases p2m_mem_paging_populate() does not actually have to produce another request when it is known that the same request was already made earlier by a different vcpu. mem_event_check_ring() can not return a reference to a free request slot because there could be multiple references for different vcpus and the order of mem_event_put_request() calls is not known. As a result, incomplete requests could be consumed by the ring user. Signed-off-by: Olaf Hering <olaf@aepfle.de> diff -r f1349a968a5a -r b2c8dacb2dc0 xen/arch/x86/mm/mem_event.c --- a/xen/arch/x86/mm/mem_event.c +++ b/xen/arch/x86/mm/mem_event.c @@ -37,8 +37,6 @@ #define mem_event_ring_lock(_d) spin_lock(&(_d)->mem_event.ring_lock) #define mem_event_ring_unlock(_d) spin_unlock(&(_d)->mem_event.ring_lock) -#define MEM_EVENT_RING_THRESHOLD 4 - static int mem_event_enable(struct domain *d, mfn_t ring_mfn, mfn_t shared_mfn) { int rc; @@ -109,6 +107,7 @@ void mem_event_put_request(struct domain req_prod++; /* Update ring */ + d->mem_event.req_producers--; front_ring->req_prod_pvt = req_prod; RING_PUSH_REQUESTS(front_ring); @@ -153,11 +152,18 @@ void mem_event_mark_and_pause(struct vcp vcpu_sleep_nosync(v); } +void mem_event_put_req_producers(struct domain *d) +{ + mem_event_ring_lock(d); + d->mem_event.req_producers--; + mem_event_ring_unlock(d); +} + int mem_event_check_ring(struct domain *d) { struct vcpu *curr = current; int free_requests; - int ring_full; + int ring_full = 1; if ( !d->mem_event.ring_page ) return -1; @@ -165,12 +171,11 @@ int mem_event_check_ring(struct domain * mem_event_ring_lock(d); free_requests = RING_FREE_REQUESTS(&d->mem_event.front_ring); - if ( unlikely(free_requests < 2) ) + if ( d->mem_event.req_producers < free_requests ) { - gdprintk(XENLOG_INFO, "free request slots: %d\n", free_requests); - WARN_ON(free_requests == 0); + d->mem_event.req_producers++; + ring_full = 0; } - ring_full = free_requests < MEM_EVENT_RING_THRESHOLD ? 1 : 0; if ( (curr->domain->domain_id == d->domain_id) && ring_full ) { diff -r f1349a968a5a -r b2c8dacb2dc0 xen/arch/x86/mm/mem_sharing.c --- a/xen/arch/x86/mm/mem_sharing.c +++ b/xen/arch/x86/mm/mem_sharing.c @@ -281,7 +281,6 @@ static struct page_info* mem_sharing_all vcpu_pause_nosync(v); req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED; - /* XXX: Need to reserve a request, not just check the ring! */ if(mem_event_check_ring(d)) return page; req.gfn = gfn; diff -r f1349a968a5a -r b2c8dacb2dc0 xen/arch/x86/mm/p2m.c --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -803,6 +803,7 @@ void p2m_mem_paging_populate(struct doma else if ( p2mt != p2m_ram_paging_out && p2mt != p2m_ram_paged ) { /* gfn is already on its way back and vcpu is not paused */ + mem_event_put_req_producers(d); return; } diff -r f1349a968a5a -r b2c8dacb2dc0 xen/include/asm-x86/mem_event.h --- a/xen/include/asm-x86/mem_event.h +++ b/xen/include/asm-x86/mem_event.h @@ -27,6 +27,7 @@ /* Pauses VCPU while marking pause flag for mem event */ void mem_event_mark_and_pause(struct vcpu *v); int mem_event_check_ring(struct domain *d); +void mem_event_put_req_producers(struct domain *d); void mem_event_put_request(struct domain *d, mem_event_request_t *req); void mem_event_get_response(struct domain *d, mem_event_response_t *rsp); void mem_event_unpause_vcpus(struct domain *d); diff -r f1349a968a5a -r b2c8dacb2dc0 xen/include/xen/sched.h --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -183,6 +183,7 @@ struct mem_event_domain { /* ring lock */ spinlock_t ring_lock; + unsigned int req_producers; /* shared page */ mem_event_shared_page_t *shared_page; /* shared ring page */ _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel