This series fixes two bugs in the FIFO-based event channel ABI. With the first bug, the priority of a newly bound event may not be the default (it might have been an old priority for that port or 0). The second bug is triggered by moving events between queues (either moving VCPUs or changing their priority). This would cause events to be lost. Testing with a process continually moving all event channels between VCPUs has been done. This would previously fail in under an hour but with this fix the system stayed up for over 10 days. It has also been through a complete set of XenServer''s automated regression tests and no issues were found. Changes in v7: - Add patch to initialize priority of all newly bound events. Changes in v6: - Limit loop to acquire old_q->lock to 3 iterations. Changes in v5: - Only set READY bits for new heads. - Rework old tail bug fix to cover all cases. Changes in v4: - const struct domain * - Clear BUSY with existing cmpxchg() where possible. - Fix BUSY bit debug output. Changes in v3: - Use a new BUSY bit to block guests from clearing UNMASKED, this is lower overhead than the previous solution (which required a hypercall). - Fix another problem with moving events between queues. - Add evtchn->last_vpcu_id and evtchn->last_priority instead of evtchn->q. This keeps the structure at 32 bytes long. Changes in v2: - Add MAINTAINERS patch - Remove some unnecessary temporary pending state clears - Add fix for DoS David
David Vrabel
2013-Dec-10 13:56 UTC
[PATCH 1/2] evtchn/fifo: initialize priority when events are bound
From: David Vrabel <david.vrabel@citrix.com> Event channel ports that are reused or that were not in the initial bucket would have a non-default priority. Add an init evtchn_port_op hook and use this to set the priority when an event channel is bound. Within this new evtchn_fifo_init() call, also check if the event is already on a queue and print a warning, as this event may have its first event delivered on a queue with the wrong VCPU or priority. This guest is expected to prevent this (if it cares) by not unbinding events that are still linked. Signed-off-by: David Vrabel <david.vrabel@citrix.com> --- xen/common/event_channel.c | 5 +++++ xen/common/event_fifo.c | 17 +++++++++++++++++ xen/include/xen/event.h | 7 +++++++ 3 files changed, 29 insertions(+), 0 deletions(-) diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c index 34efd24..db952af 100644 --- a/xen/common/event_channel.c +++ b/xen/common/event_channel.c @@ -220,6 +220,7 @@ static long evtchn_alloc_unbound(evtchn_alloc_unbound_t *alloc) chn->state = ECS_UNBOUND; if ( (chn->u.unbound.remote_domid = alloc->remote_dom) == DOMID_SELF ) chn->u.unbound.remote_domid = current->domain->domain_id; + evtchn_port_init(d, chn); alloc->port = port; @@ -276,6 +277,7 @@ static long evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind) lchn->u.interdomain.remote_dom = rd; lchn->u.interdomain.remote_port = (u16)rport; lchn->state = ECS_INTERDOMAIN; + evtchn_port_init(ld, lchn); rchn->u.interdomain.remote_dom = ld; rchn->u.interdomain.remote_port = (u16)lport; @@ -330,6 +332,7 @@ static long evtchn_bind_virq(evtchn_bind_virq_t *bind) chn->state = ECS_VIRQ; chn->notify_vcpu_id = vcpu; chn->u.virq = virq; + evtchn_port_init(d, chn); v->virq_to_evtchn[virq] = bind->port = port; @@ -359,6 +362,7 @@ static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind) chn = evtchn_from_port(d, port); chn->state = ECS_IPI; chn->notify_vcpu_id = vcpu; + evtchn_port_init(d, chn); bind->port = port; @@ -437,6 +441,7 @@ static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind) chn->state = ECS_PIRQ; chn->u.pirq.irq = pirq; link_pirq_port(port, chn, v); + evtchn_port_init(d, chn); bind->port = port; diff --git a/xen/common/event_fifo.c b/xen/common/event_fifo.c index 6048784..2ab4c29 100644 --- a/xen/common/event_fifo.c +++ b/xen/common/event_fifo.c @@ -34,6 +34,22 @@ static inline event_word_t *evtchn_fifo_word_from_port(struct domain *d, return d->evtchn_fifo->event_array[p] + w; } +static void evtchn_fifo_init(struct domain *d, struct evtchn *evtchn) +{ + event_word_t *word; + + evtchn->priority = EVTCHN_FIFO_PRIORITY_DEFAULT; + + /* + * If this event is still linked, the first event may be delivered + * on the wrong VCPU or with an unexpected priority. + */ + word = evtchn_fifo_word_from_port(d, evtchn->port); + if ( word && test_bit(EVTCHN_FIFO_LINKED, word) ) + gdprintk(XENLOG_WARNING, "domain %d, port %d already on a queue\n", + d->domain_id, evtchn->port); +} + static int try_set_link(event_word_t *word, event_word_t *w, uint32_t link) { event_word_t new, old; @@ -261,6 +277,7 @@ static void evtchn_fifo_print_state(struct domain *d, static const struct evtchn_port_ops evtchn_port_ops_fifo { + .init = evtchn_fifo_init, .set_pending = evtchn_fifo_set_pending, .clear_pending = evtchn_fifo_clear_pending, .unmask = evtchn_fifo_unmask, diff --git a/xen/include/xen/event.h b/xen/include/xen/event.h index 70fc271..06c0654 100644 --- a/xen/include/xen/event.h +++ b/xen/include/xen/event.h @@ -132,6 +132,7 @@ void evtchn_2l_init(struct domain *d); * Low-level event channel port ops. */ struct evtchn_port_ops { + void (*init)(struct domain *d, struct evtchn *evtchn); void (*set_pending)(struct vcpu *v, struct evtchn *evtchn); void (*clear_pending)(struct domain *d, struct evtchn *evtchn); void (*unmask)(struct domain *d, struct evtchn *evtchn); @@ -142,6 +143,12 @@ struct evtchn_port_ops { void (*print_state)(struct domain *d, const struct evtchn *evtchn); }; +static inline void evtchn_port_init(struct domain *d, struct evtchn *evtchn) +{ + if ( d->evtchn_port_ops->init ) + d->evtchn_port_ops->init(d, evtchn); +} + static inline void evtchn_port_set_pending(struct vcpu *v, struct evtchn *evtchn) { -- 1.7.2.5
David Vrabel
2013-Dec-10 13:57 UTC
[PATCH 2/2] evtchn/fifo: don''t corrupt queues if an old tail is linked
From: David Vrabel <david.vrabel@citrix.com> An event may still be the tail of a queue even if the queue is now empty (an ''old tail'' event). There is logic to handle the case when this old tail event needs to be added to the now empty queue (by checking for q->tail == port). However, this does not cover all cases. 1. An old tail may be re-added simultaneously with another event. LINKED is set on the old tail, and the other CPU may misinterpret this as the old tail still being valid and set LINK instead of HEAD. All events on this queue will then be lost. 2. If the old tail event on queue A is moved to a different queue B (by changing its VCPU or priority), the event may then be linked onto queue B. When another event is linked onto queue A it will check the old tail, see that it is linked (but on queue B) and overwrite the LINK field, corrupting both queues. When an event is linked, save the vcpu id and priority of the queue it is being linked onto. Use this when linking an event to check if it is an unlinked old tail event. If it is an old tail event, the old queue is empty and old_q->tail is invalidated to ensure adding another event to old_q will update HEAD. The tail is invalidated by setting it to 0 since the event 0 is never linked. The old_q->lock is held while setting LINKED to avoid the race with the test of LINKED in evtchn_fifo_set_link(). Since a event channel may move queues after old_q->lock is acquired, we must check that we have the correct lock and retry if not. Since changing VCPUs or priority is expected to be rare events that are serialized in the guest, we try at most 3 times before dropping the event. This prevents a malicious guest from repeatedly adjusting priority to prevent another domain from acquiring old_q->lock. Signed-off-by: David Vrabel <david.vrabel@citrix.com> --- xen/common/event_fifo.c | 80 ++++++++++++++++++++++++++++++++++++++++------- xen/include/xen/sched.h | 2 + 2 files changed, 70 insertions(+), 12 deletions(-) diff --git a/xen/common/event_fifo.c b/xen/common/event_fifo.c index 2ab4c29..fc43e62 100644 --- a/xen/common/event_fifo.c +++ b/xen/common/event_fifo.c @@ -50,6 +50,36 @@ static void evtchn_fifo_init(struct domain *d, struct evtchn *evtchn) d->domain_id, evtchn->port); } +static struct evtchn_fifo_queue *lock_old_queue(const struct domain *d, + struct evtchn *evtchn, + unsigned long *flags) +{ + struct vcpu *v; + struct evtchn_fifo_queue *q, *old_q; + unsigned int try; + + for ( try = 0; try < 3; try++ ) + { + v = d->vcpu[evtchn->last_vcpu_id]; + old_q = &v->evtchn_fifo->queue[evtchn->last_priority]; + + spin_lock_irqsave(&old_q->lock, *flags); + + v = d->vcpu[evtchn->last_vcpu_id]; + q = &v->evtchn_fifo->queue[evtchn->last_priority]; + + if ( old_q == q ) + return old_q; + + spin_unlock_irqrestore(&old_q->lock, *flags); + } + + gdprintk(XENLOG_WARNING, + "domain %d, port %d lost event (too many queue changes)\n", + d->domain_id, evtchn->port); + return NULL; +} + static int try_set_link(event_word_t *word, event_word_t *w, uint32_t link) { event_word_t new, old; @@ -119,7 +149,6 @@ static void evtchn_fifo_set_pending(struct vcpu *v, struct evtchn *evtchn) struct domain *d = v->domain; unsigned int port; event_word_t *word; - struct evtchn_fifo_queue *q; unsigned long flags; bool_t was_pending; @@ -136,25 +165,52 @@ static void evtchn_fifo_set_pending(struct vcpu *v, struct evtchn *evtchn) return; } - /* - * No locking around getting the queue. This may race with - * changing the priority but we are allowed to signal the event - * once on the old priority. - */ - q = &v->evtchn_fifo->queue[evtchn->priority]; - was_pending = test_and_set_bit(EVTCHN_FIFO_PENDING, word); /* * Link the event if it unmasked and not already linked. */ if ( !test_bit(EVTCHN_FIFO_MASKED, word) - && !test_and_set_bit(EVTCHN_FIFO_LINKED, word) ) + && !test_bit(EVTCHN_FIFO_LINKED, word) ) { + struct evtchn_fifo_queue *q, *old_q; event_word_t *tail_word; bool_t linked = 0; - spin_lock_irqsave(&q->lock, flags); + /* + * No locking around getting the queue. This may race with + * changing the priority but we are allowed to signal the + * event once on the old priority. + */ + q = &v->evtchn_fifo->queue[evtchn->priority]; + + old_q = lock_old_queue(d, evtchn, &flags); + if ( !old_q ) + goto done; + + if ( test_and_set_bit(EVTCHN_FIFO_LINKED, word) ) + { + spin_unlock_irqrestore(&old_q->lock, flags); + goto done; + } + + /* + * If this event was a tail, the old queue is now empty and + * its tail must be invalidated to prevent adding an event to + * the old queue from corrupting the new queue. + */ + if ( old_q->tail == port ) + old_q->tail = 0; + + /* Moved to a different queue? */ + if ( old_q != q ) + { + evtchn->last_vcpu_id = evtchn->notify_vcpu_id; + evtchn->last_priority = evtchn->priority; + + spin_unlock_irqrestore(&old_q->lock, flags); + spin_lock_irqsave(&q->lock, flags); + } /* * Atomically link the tail to port iff the tail is linked. @@ -166,7 +222,7 @@ static void evtchn_fifo_set_pending(struct vcpu *v, struct evtchn *evtchn) * If the queue is empty (i.e., we haven''t linked to the new * event), head must be updated. */ - if ( port != q->tail ) + if ( q->tail ) { tail_word = evtchn_fifo_word_from_port(d, q->tail); linked = evtchn_fifo_set_link(d, tail_word, port); @@ -182,7 +238,7 @@ static void evtchn_fifo_set_pending(struct vcpu *v, struct evtchn *evtchn) &v->evtchn_fifo->control_block->ready) ) vcpu_mark_events_pending(v); } - + done: if ( !was_pending ) evtchn_check_pollers(d, port); } diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h index cbdf377..5ab92dd 100644 --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -98,6 +98,8 @@ struct evtchn } u; u8 priority; u8 pending:1; + u16 last_vcpu_id; + u8 last_priority; #ifdef FLASK_ENABLE void *ssid; #endif -- 1.7.2.5
Jan Beulich
2013-Dec-10 14:50 UTC
Re: [PATCH 1/2] evtchn/fifo: initialize priority when events are bound
>>> On 10.12.13 at 14:56, David Vrabel <david.vrabel@citrix.com> wrote: > From: David Vrabel <david.vrabel@citrix.com> > > Event channel ports that are reused or that were not in the initial > bucket would have a non-default priority. > > Add an init evtchn_port_op hook and use this to set the priority when > an event channel is bound. > > Within this new evtchn_fifo_init() call, also check if the event is > already on a queue and print a warning, as this event may have its > first event delivered on a queue with the wrong VCPU or priority. > This guest is expected to prevent this (if it cares) by not unbinding > events that are still linked. >Reported-by: Jan Beulich <jbeulich@suse.com>> Signed-off-by: David Vrabel <david.vrabel@citrix.com>Reviewed-by: Jan Beulich <jbeulich@suse.com>> --- > xen/common/event_channel.c | 5 +++++ > xen/common/event_fifo.c | 17 +++++++++++++++++ > xen/include/xen/event.h | 7 +++++++ > 3 files changed, 29 insertions(+), 0 deletions(-) > > diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c > index 34efd24..db952af 100644 > --- a/xen/common/event_channel.c > +++ b/xen/common/event_channel.c > @@ -220,6 +220,7 @@ static long evtchn_alloc_unbound(evtchn_alloc_unbound_t > *alloc) > chn->state = ECS_UNBOUND; > if ( (chn->u.unbound.remote_domid = alloc->remote_dom) == DOMID_SELF ) > chn->u.unbound.remote_domid = current->domain->domain_id; > + evtchn_port_init(d, chn); > > alloc->port = port; > > @@ -276,6 +277,7 @@ static long > evtchn_bind_interdomain(evtchn_bind_interdomain_t *bind) > lchn->u.interdomain.remote_dom = rd; > lchn->u.interdomain.remote_port = (u16)rport; > lchn->state = ECS_INTERDOMAIN; > + evtchn_port_init(ld, lchn); > > rchn->u.interdomain.remote_dom = ld; > rchn->u.interdomain.remote_port = (u16)lport; > @@ -330,6 +332,7 @@ static long evtchn_bind_virq(evtchn_bind_virq_t *bind) > chn->state = ECS_VIRQ; > chn->notify_vcpu_id = vcpu; > chn->u.virq = virq; > + evtchn_port_init(d, chn); > > v->virq_to_evtchn[virq] = bind->port = port; > > @@ -359,6 +362,7 @@ static long evtchn_bind_ipi(evtchn_bind_ipi_t *bind) > chn = evtchn_from_port(d, port); > chn->state = ECS_IPI; > chn->notify_vcpu_id = vcpu; > + evtchn_port_init(d, chn); > > bind->port = port; > > @@ -437,6 +441,7 @@ static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind) > chn->state = ECS_PIRQ; > chn->u.pirq.irq = pirq; > link_pirq_port(port, chn, v); > + evtchn_port_init(d, chn); > > bind->port = port; > > diff --git a/xen/common/event_fifo.c b/xen/common/event_fifo.c > index 6048784..2ab4c29 100644 > --- a/xen/common/event_fifo.c > +++ b/xen/common/event_fifo.c > @@ -34,6 +34,22 @@ static inline event_word_t > *evtchn_fifo_word_from_port(struct domain *d, > return d->evtchn_fifo->event_array[p] + w; > } > > +static void evtchn_fifo_init(struct domain *d, struct evtchn *evtchn) > +{ > + event_word_t *word; > + > + evtchn->priority = EVTCHN_FIFO_PRIORITY_DEFAULT; > + > + /* > + * If this event is still linked, the first event may be delivered > + * on the wrong VCPU or with an unexpected priority. > + */ > + word = evtchn_fifo_word_from_port(d, evtchn->port); > + if ( word && test_bit(EVTCHN_FIFO_LINKED, word) ) > + gdprintk(XENLOG_WARNING, "domain %d, port %d already on a queue\n", > + d->domain_id, evtchn->port); > +} > + > static int try_set_link(event_word_t *word, event_word_t *w, uint32_t link) > { > event_word_t new, old; > @@ -261,6 +277,7 @@ static void evtchn_fifo_print_state(struct domain *d, > > static const struct evtchn_port_ops evtchn_port_ops_fifo > { > + .init = evtchn_fifo_init, > .set_pending = evtchn_fifo_set_pending, > .clear_pending = evtchn_fifo_clear_pending, > .unmask = evtchn_fifo_unmask, > diff --git a/xen/include/xen/event.h b/xen/include/xen/event.h > index 70fc271..06c0654 100644 > --- a/xen/include/xen/event.h > +++ b/xen/include/xen/event.h > @@ -132,6 +132,7 @@ void evtchn_2l_init(struct domain *d); > * Low-level event channel port ops. > */ > struct evtchn_port_ops { > + void (*init)(struct domain *d, struct evtchn *evtchn); > void (*set_pending)(struct vcpu *v, struct evtchn *evtchn); > void (*clear_pending)(struct domain *d, struct evtchn *evtchn); > void (*unmask)(struct domain *d, struct evtchn *evtchn); > @@ -142,6 +143,12 @@ struct evtchn_port_ops { > void (*print_state)(struct domain *d, const struct evtchn *evtchn); > }; > > +static inline void evtchn_port_init(struct domain *d, struct evtchn > *evtchn) > +{ > + if ( d->evtchn_port_ops->init ) > + d->evtchn_port_ops->init(d, evtchn); > +} > + > static inline void evtchn_port_set_pending(struct vcpu *v, > struct evtchn *evtchn) > { > -- > 1.7.2.5
David Vrabel
2013-Dec-10 14:54 UTC
Re: [PATCH 1/2] evtchn/fifo: initialize priority when events are bound
On 10/12/13 14:50, Jan Beulich wrote:>>>> On 10.12.13 at 14:56, David Vrabel <david.vrabel@citrix.com> wrote: >> From: David Vrabel <david.vrabel@citrix.com> >> >> Event channel ports that are reused or that were not in the initial >> bucket would have a non-default priority. >> >> Add an init evtchn_port_op hook and use this to set the priority when >> an event channel is bound. >> >> Within this new evtchn_fifo_init() call, also check if the event is >> already on a queue and print a warning, as this event may have its >> first event delivered on a queue with the wrong VCPU or priority. >> This guest is expected to prevent this (if it cares) by not unbinding >> events that are still linked. >> > > Reported-by: Jan Beulich <jbeulich@suse.com>Yes, sorry. Not the first time I''ve not properly attributed someone. I shall have to be more careful in future.>> Signed-off-by: David Vrabel <david.vrabel@citrix.com> > > Reviewed-by: Jan Beulich <jbeulich@suse.com>Thanks! David
Jan Beulich
2013-Dec-10 14:55 UTC
Re: [PATCH 2/2] evtchn/fifo: don''t corrupt queues if an old tail is linked
>>> On 10.12.13 at 14:57, David Vrabel <david.vrabel@citrix.com> wrote: > From: David Vrabel <david.vrabel@citrix.com> > > An event may still be the tail of a queue even if the queue is now > empty (an ''old tail'' event). There is logic to handle the case when > this old tail event needs to be added to the now empty queue (by > checking for q->tail == port). > > However, this does not cover all cases. > > 1. An old tail may be re-added simultaneously with another event. > LINKED is set on the old tail, and the other CPU may misinterpret > this as the old tail still being valid and set LINK instead of > HEAD. All events on this queue will then be lost. > > 2. If the old tail event on queue A is moved to a different queue B > (by changing its VCPU or priority), the event may then be linked > onto queue B. When another event is linked onto queue A it will > check the old tail, see that it is linked (but on queue B) and > overwrite the LINK field, corrupting both queues. > > When an event is linked, save the vcpu id and priority of the queue it > is being linked onto. Use this when linking an event to check if it > is an unlinked old tail event. If it is an old tail event, the old > queue is empty and old_q->tail is invalidated to ensure adding another > event to old_q will update HEAD. The tail is invalidated by setting > it to 0 since the event 0 is never linked. > > The old_q->lock is held while setting LINKED to avoid the race with > the test of LINKED in evtchn_fifo_set_link(). > > Since a event channel may move queues after old_q->lock is acquired, > we must check that we have the correct lock and retry if not. Since > changing VCPUs or priority is expected to be rare events that are > serialized in the guest, we try at most 3 times before dropping the > event. This prevents a malicious guest from repeatedly adjusting > priority to prevent another domain from acquiring old_q->lock. > > Signed-off-by: David Vrabel <david.vrabel@citrix.com>Reviewed-by: Jan Beulich <jbeulich@suse.com>> --- > xen/common/event_fifo.c | 80 ++++++++++++++++++++++++++++++++++++++++------- > xen/include/xen/sched.h | 2 + > 2 files changed, 70 insertions(+), 12 deletions(-) > > diff --git a/xen/common/event_fifo.c b/xen/common/event_fifo.c > index 2ab4c29..fc43e62 100644 > --- a/xen/common/event_fifo.c > +++ b/xen/common/event_fifo.c > @@ -50,6 +50,36 @@ static void evtchn_fifo_init(struct domain *d, struct > evtchn *evtchn) > d->domain_id, evtchn->port); > } > > +static struct evtchn_fifo_queue *lock_old_queue(const struct domain *d, > + struct evtchn *evtchn, > + unsigned long *flags) > +{ > + struct vcpu *v; > + struct evtchn_fifo_queue *q, *old_q; > + unsigned int try; > + > + for ( try = 0; try < 3; try++ ) > + { > + v = d->vcpu[evtchn->last_vcpu_id]; > + old_q = &v->evtchn_fifo->queue[evtchn->last_priority]; > + > + spin_lock_irqsave(&old_q->lock, *flags); > + > + v = d->vcpu[evtchn->last_vcpu_id]; > + q = &v->evtchn_fifo->queue[evtchn->last_priority]; > + > + if ( old_q == q ) > + return old_q; > + > + spin_unlock_irqrestore(&old_q->lock, *flags); > + } > + > + gdprintk(XENLOG_WARNING, > + "domain %d, port %d lost event (too many queue changes)\n", > + d->domain_id, evtchn->port); > + return NULL; > +} > + > static int try_set_link(event_word_t *word, event_word_t *w, uint32_t link) > { > event_word_t new, old; > @@ -119,7 +149,6 @@ static void evtchn_fifo_set_pending(struct vcpu *v, > struct evtchn *evtchn) > struct domain *d = v->domain; > unsigned int port; > event_word_t *word; > - struct evtchn_fifo_queue *q; > unsigned long flags; > bool_t was_pending; > > @@ -136,25 +165,52 @@ static void evtchn_fifo_set_pending(struct vcpu *v, > struct evtchn *evtchn) > return; > } > > - /* > - * No locking around getting the queue. This may race with > - * changing the priority but we are allowed to signal the event > - * once on the old priority. > - */ > - q = &v->evtchn_fifo->queue[evtchn->priority]; > - > was_pending = test_and_set_bit(EVTCHN_FIFO_PENDING, word); > > /* > * Link the event if it unmasked and not already linked. > */ > if ( !test_bit(EVTCHN_FIFO_MASKED, word) > - && !test_and_set_bit(EVTCHN_FIFO_LINKED, word) ) > + && !test_bit(EVTCHN_FIFO_LINKED, word) ) > { > + struct evtchn_fifo_queue *q, *old_q; > event_word_t *tail_word; > bool_t linked = 0; > > - spin_lock_irqsave(&q->lock, flags); > + /* > + * No locking around getting the queue. This may race with > + * changing the priority but we are allowed to signal the > + * event once on the old priority. > + */ > + q = &v->evtchn_fifo->queue[evtchn->priority]; > + > + old_q = lock_old_queue(d, evtchn, &flags); > + if ( !old_q ) > + goto done; > + > + if ( test_and_set_bit(EVTCHN_FIFO_LINKED, word) ) > + { > + spin_unlock_irqrestore(&old_q->lock, flags); > + goto done; > + } > + > + /* > + * If this event was a tail, the old queue is now empty and > + * its tail must be invalidated to prevent adding an event to > + * the old queue from corrupting the new queue. > + */ > + if ( old_q->tail == port ) > + old_q->tail = 0; > + > + /* Moved to a different queue? */ > + if ( old_q != q ) > + { > + evtchn->last_vcpu_id = evtchn->notify_vcpu_id; > + evtchn->last_priority = evtchn->priority; > + > + spin_unlock_irqrestore(&old_q->lock, flags); > + spin_lock_irqsave(&q->lock, flags); > + } > > /* > * Atomically link the tail to port iff the tail is linked. > @@ -166,7 +222,7 @@ static void evtchn_fifo_set_pending(struct vcpu *v, > struct evtchn *evtchn) > * If the queue is empty (i.e., we haven''t linked to the new > * event), head must be updated. > */ > - if ( port != q->tail ) > + if ( q->tail ) > { > tail_word = evtchn_fifo_word_from_port(d, q->tail); > linked = evtchn_fifo_set_link(d, tail_word, port); > @@ -182,7 +238,7 @@ static void evtchn_fifo_set_pending(struct vcpu *v, > struct evtchn *evtchn) > &v->evtchn_fifo->control_block->ready) ) > vcpu_mark_events_pending(v); > } > - > + done: > if ( !was_pending ) > evtchn_check_pollers(d, port); > } > diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h > index cbdf377..5ab92dd 100644 > --- a/xen/include/xen/sched.h > +++ b/xen/include/xen/sched.h > @@ -98,6 +98,8 @@ struct evtchn > } u; > u8 priority; > u8 pending:1; > + u16 last_vcpu_id; > + u8 last_priority; > #ifdef FLASK_ENABLE > void *ssid; > #endif > -- > 1.7.2.5