The intention of the first patch was to shrink mem_event_st to make room for more events in the ring buffer. But since type is just u16 and the struct would need much more shrinking to get more events per ring this goal was not reached. Now both patches just serve as cleanup. Changes: mem_event: remove type member mem_event: use C99 initializers for mem_event_request_t users xen/arch/x86/hvm/hvm.c | 6 +----- xen/arch/x86/mm/mem_sharing.c | 3 +-- xen/arch/x86/mm/p2m.c | 12 ++---------- xen/include/public/mem_event.h | 8 +------- 4 files changed, 5 insertions(+), 24 deletions(-)
# HG changeset patch # User Olaf Hering <olaf@aepfle.de> # Date 1329772180 -3600 # Node ID e1a866546aef8ec1395858c4d2c8f28ff0d0502f # Parent 0900b1c905f1d038aad58a2732fe2bad682149a3 mem_event: remove type member When mem_event was added the type flag should indicate who the consumer is. But the concept of a single ring buffer for multiple event types can not work for two reasons. One is that no multiplexer exists which provides individual event types to the final consumer, and second is that even if such multiplexer can not work reliable because a request needs to be answered with a response. The response should be sent roughly in the order of received events. But with multiple consumers one of them can so stall all the others. For that reason the single mem_event buffer for all types of events was split into individual ring buffers with commit 23842:483c5f8319ad. This commit made the type member already obsolete because the meaning of each buffer is now obvious. This change removes the type member and increases the flags field. Even though this is an ABI incompatible change, it will have no practical impact on existing binaries because the changeset referenced above already bumped the SONAME. So these binaries have to be recompiled anyway for the upcoming major release. Signed-off-by: Olaf Hering <olaf@aepfle.de> diff -r 0900b1c905f1 -r e1a866546aef xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -4322,7 +4322,6 @@ static int hvm_memory_event_traps(long p return rc; memset(&req, 0, sizeof(req)); - req.type = MEM_EVENT_TYPE_ACCESS; req.reason = reason; if ( (p & HVMPME_MODE_MASK) == HVMPME_mode_sync ) diff -r 0900b1c905f1 -r e1a866546aef xen/arch/x86/mm/mem_sharing.c --- a/xen/arch/x86/mm/mem_sharing.c +++ b/xen/arch/x86/mm/mem_sharing.c @@ -347,7 +347,7 @@ int mem_sharing_audit(void) static void mem_sharing_notify_helper(struct domain *d, unsigned long gfn) { struct vcpu *v = current; - mem_event_request_t req = { .type = MEM_EVENT_TYPE_SHARED }; + mem_event_request_t req = { .gfn = gfn }; if ( v->domain != d ) { @@ -369,7 +369,6 @@ static void mem_sharing_notify_helper(st req.flags = MEM_EVENT_FLAG_VCPU_PAUSED; vcpu_pause_nosync(v); - req.gfn = gfn; req.p2mt = p2m_ram_shared; req.vcpu_id = v->vcpu_id; mem_event_put_request(d, &d->mem_event->share, &req); diff -r 0900b1c905f1 -r e1a866546aef xen/arch/x86/mm/p2m.c --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -925,7 +925,6 @@ void p2m_mem_paging_drop_page(struct dom /* Send release notification to pager */ memset(&req, 0, sizeof(req)); - req.type = MEM_EVENT_TYPE_PAGING; req.gfn = gfn; req.flags = MEM_EVENT_FLAG_DROP_PAGE; @@ -982,7 +981,6 @@ void p2m_mem_paging_populate(struct doma return; memset(&req, 0, sizeof(req)); - req.type = MEM_EVENT_TYPE_PAGING; /* Fix p2m mapping */ gfn_lock(p2m, gfn, 0); @@ -1221,7 +1219,6 @@ bool_t p2m_mem_access_check(unsigned lon { *req_ptr = req; memset(req, 0, sizeof(req)); - req->type = MEM_EVENT_TYPE_ACCESS; req->reason = MEM_EVENT_REASON_VIOLATION; /* Pause the current VCPU */ diff -r 0900b1c905f1 -r e1a866546aef xen/include/public/mem_event.h --- a/xen/include/public/mem_event.h +++ b/xen/include/public/mem_event.h @@ -30,11 +30,6 @@ #include "xen.h" #include "io/ring.h" -/* Memory event type */ -#define MEM_EVENT_TYPE_SHARED 0 -#define MEM_EVENT_TYPE_PAGING 1 -#define MEM_EVENT_TYPE_ACCESS 2 - /* Memory event flags */ #define MEM_EVENT_FLAG_VCPU_PAUSED (1 << 0) #define MEM_EVENT_FLAG_DROP_PAGE (1 << 1) @@ -56,8 +51,7 @@ typedef struct mem_event_shared_page { } mem_event_shared_page_t; typedef struct mem_event_st { - uint16_t type; - uint16_t flags; + uint32_t flags; uint32_t vcpu_id; uint64_t gfn;
Olaf Hering
2012-Feb-20 21:18 UTC
[PATCH 2 of 2] mem_event: use C99 initializers for mem_event_request_t users
# HG changeset patch # User Olaf Hering <olaf@aepfle.de> # Date 1329772592 -3600 # Node ID fcb66fe4134321fa089132ee0e9b24e21600404c # Parent e1a866546aef8ec1395858c4d2c8f28ff0d0502f mem_event: use C99 initializers for mem_event_request_t users Use C99 initializers for mem_event_request_t users to make sure req is always cleared, even with local debug patches that shuffle code around to have a single exit point. The common case is to use and send req, so it does not add significant overhead to always clear req. Signed-off-by: Olaf Hering <olaf@aepfle.de> diff -r e1a866546aef -r fcb66fe41343 xen/arch/x86/hvm/hvm.c --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -4302,7 +4302,7 @@ static int hvm_memory_event_traps(long p { struct vcpu* v = current; struct domain *d = v->domain; - mem_event_request_t req; + mem_event_request_t req = { .reason = reason }; int rc; if ( !(p & HVMPME_MODE_MASK) ) @@ -4321,9 +4321,6 @@ static int hvm_memory_event_traps(long p else if ( rc < 0 ) return rc; - memset(&req, 0, sizeof(req)); - req.reason = reason; - if ( (p & HVMPME_MODE_MASK) == HVMPME_mode_sync ) { req.flags |= MEM_EVENT_FLAG_VCPU_PAUSED; diff -r e1a866546aef -r fcb66fe41343 xen/arch/x86/mm/p2m.c --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -913,7 +913,7 @@ int p2m_mem_paging_evict(struct domain * void p2m_mem_paging_drop_page(struct domain *d, unsigned long gfn, p2m_type_t p2mt) { - mem_event_request_t req; + mem_event_request_t req = { .gfn = gfn }; /* We allow no ring in this unique case, because it won''t affect * correctness of the guest execution at this point. If this is the only @@ -924,8 +924,6 @@ void p2m_mem_paging_drop_page(struct dom return; /* Send release notification to pager */ - memset(&req, 0, sizeof(req)); - req.gfn = gfn; req.flags = MEM_EVENT_FLAG_DROP_PAGE; /* Update stats unless the page hasn''t yet been evicted */ @@ -962,7 +960,7 @@ void p2m_mem_paging_drop_page(struct dom void p2m_mem_paging_populate(struct domain *d, unsigned long gfn) { struct vcpu *v = current; - mem_event_request_t req; + mem_event_request_t req = { .gfn = gfn }; p2m_type_t p2mt; p2m_access_t a; mfn_t mfn; @@ -980,8 +978,6 @@ void p2m_mem_paging_populate(struct doma else if ( rc < 0 ) return; - memset(&req, 0, sizeof(req)); - /* Fix p2m mapping */ gfn_lock(p2m, gfn, 0); mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, p2m_query, NULL); @@ -1011,7 +1007,6 @@ void p2m_mem_paging_populate(struct doma } /* Send request to pager */ - req.gfn = gfn; req.p2mt = p2mt; req.vcpu_id = v->vcpu_id;
At 22:18 +0100 on 20 Feb (1329776280), Olaf Hering wrote:> > The intention of the first patch was to shrink mem_event_st to make room > for more events in the ring buffer. But since type is just u16 and the > struct would need much more shrinking to get more events per ring this > goal was not reached. > > Now both patches just serve as cleanup.Applied, thanks. Tim.