As a general library routine, it should behave as efficiently as possible, even if at present no significant contention is known here. Signed-off-by: Jan Beulich <jbeulich@novell.com> --- a/xen/common/rangeset.c +++ b/xen/common/rangeset.c @@ -25,7 +25,7 @@ struct rangeset { /* Ordered list of ranges contained in this set, and protecting lock. */ struct list_head range_list; - spinlock_t lock; + rwlock_t lock; /* Pretty-printing name. */ char name[32]; @@ -103,7 +103,7 @@ int rangeset_add_range( ASSERT(s <= e); - spin_lock(&r->lock); + write_lock(&r->lock); x = find_range(r, s); y = find_range(r, e); @@ -159,7 +159,7 @@ int rangeset_add_range( } out: - spin_unlock(&r->lock); + write_unlock(&r->lock); return rc; } @@ -175,7 +175,7 @@ int rangeset_remove_range( ASSERT(s <= e); - spin_lock(&r->lock); + write_lock(&r->lock); x = find_range(r, s); y = find_range(r, e); @@ -231,7 +231,7 @@ int rangeset_remove_range( } out: - spin_unlock(&r->lock); + write_unlock(&r->lock); return rc; } @@ -243,10 +243,10 @@ int rangeset_contains_range( ASSERT(s <= e); - spin_lock(&r->lock); + read_lock(&r->lock); x = find_range(r, s); contains = (x && (x->e >= e)); - spin_unlock(&r->lock); + read_unlock(&r->lock); return contains; } @@ -259,10 +259,10 @@ int rangeset_overlaps_range( ASSERT(s <= e); - spin_lock(&r->lock); + read_lock(&r->lock); x = find_range(r, e); overlaps = (x && (s <= x->e)); - spin_unlock(&r->lock); + read_unlock(&r->lock); return overlaps; } @@ -274,13 +274,13 @@ int rangeset_report_ranges( struct range *x; int rc = 0; - spin_lock(&r->lock); + read_lock(&r->lock); for ( x = find_range(r, s); x && (x->s <= e) && !rc; x = next_range(r, x) ) if ( x->e >= s ) rc = cb(max(x->s, s), min(x->e, e), ctxt); - spin_unlock(&r->lock); + read_unlock(&r->lock); return rc; } @@ -318,7 +318,7 @@ struct rangeset *rangeset_new( if ( r == NULL ) return NULL; - spin_lock_init(&r->lock); + rwlock_init(&r->lock); INIT_LIST_HEAD(&r->range_list); BUG_ON(flags & ~RANGESETF_prettyprint_hex); @@ -403,7 +403,7 @@ void rangeset_printk( int nr_printed = 0; struct range *x; - spin_lock(&r->lock); + read_lock(&r->lock); printk("%-10s {", r->name); @@ -422,7 +422,7 @@ void rangeset_printk( printk(" }"); - spin_unlock(&r->lock); + read_unlock(&r->lock); } void rangeset_domain_printk( _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Keir Fraser
2011-Mar-25 17:08 UTC
Re: [Xen-devel] [PATCH] switch rangeset''s lock to rwlock
I''d rather get rid of rwlocks altogether and use RCU in any cases where we really have contention. Rwlocks don''t help unless the read-side critical sections are large enough to amortise the cache ping-pong cost of the locking/unlocking operations. And in Xen we have very few if any significantly sized critical sections. I need to double check, but I believe we have only a couple of rwlock users now, and none of the read-side critical sections are large, so in that case I suggest we switch them to use spinlocks and kill our rwlock implementation. -- Keir On 25/03/2011 16:49, "Jan Beulich" <JBeulich@novell.com> wrote:> As a general library routine, it should behave as efficiently as > possible, even if at present no significant contention is known here. > > Signed-off-by: Jan Beulich <jbeulich@novell.com> > > --- a/xen/common/rangeset.c > +++ b/xen/common/rangeset.c > @@ -25,7 +25,7 @@ struct rangeset { > > /* Ordered list of ranges contained in this set, and protecting lock. */ > struct list_head range_list; > - spinlock_t lock; > + rwlock_t lock; > > /* Pretty-printing name. */ > char name[32]; > @@ -103,7 +103,7 @@ int rangeset_add_range( > > ASSERT(s <= e); > > - spin_lock(&r->lock); > + write_lock(&r->lock); > > x = find_range(r, s); > y = find_range(r, e); > @@ -159,7 +159,7 @@ int rangeset_add_range( > } > > out: > - spin_unlock(&r->lock); > + write_unlock(&r->lock); > return rc; > } > > @@ -175,7 +175,7 @@ int rangeset_remove_range( > > ASSERT(s <= e); > > - spin_lock(&r->lock); > + write_lock(&r->lock); > > x = find_range(r, s); > y = find_range(r, e); > @@ -231,7 +231,7 @@ int rangeset_remove_range( > } > > out: > - spin_unlock(&r->lock); > + write_unlock(&r->lock); > return rc; > } > > @@ -243,10 +243,10 @@ int rangeset_contains_range( > > ASSERT(s <= e); > > - spin_lock(&r->lock); > + read_lock(&r->lock); > x = find_range(r, s); > contains = (x && (x->e >= e)); > - spin_unlock(&r->lock); > + read_unlock(&r->lock); > > return contains; > } > @@ -259,10 +259,10 @@ int rangeset_overlaps_range( > > ASSERT(s <= e); > > - spin_lock(&r->lock); > + read_lock(&r->lock); > x = find_range(r, e); > overlaps = (x && (s <= x->e)); > - spin_unlock(&r->lock); > + read_unlock(&r->lock); > > return overlaps; > } > @@ -274,13 +274,13 @@ int rangeset_report_ranges( > struct range *x; > int rc = 0; > > - spin_lock(&r->lock); > + read_lock(&r->lock); > > for ( x = find_range(r, s); x && (x->s <= e) && !rc; x = next_range(r, x) > ) > if ( x->e >= s ) > rc = cb(max(x->s, s), min(x->e, e), ctxt); > > - spin_unlock(&r->lock); > + read_unlock(&r->lock); > > return rc; > } > @@ -318,7 +318,7 @@ struct rangeset *rangeset_new( > if ( r == NULL ) > return NULL; > > - spin_lock_init(&r->lock); > + rwlock_init(&r->lock); > INIT_LIST_HEAD(&r->range_list); > > BUG_ON(flags & ~RANGESETF_prettyprint_hex); > @@ -403,7 +403,7 @@ void rangeset_printk( > int nr_printed = 0; > struct range *x; > > - spin_lock(&r->lock); > + read_lock(&r->lock); > > printk("%-10s {", r->name); > > @@ -422,7 +422,7 @@ void rangeset_printk( > > printk(" }"); > > - spin_unlock(&r->lock); > + read_unlock(&r->lock); > } > > void rangeset_domain_printk( > > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xensource.com > http://lists.xensource.com/xen-devel_______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Dan Magenheimer
2011-Mar-25 17:52 UTC
RE: [Xen-devel] [PATCH] switch rangeset''s lock to rwlock
Tmem (in Xen) does use rwlocks. After hearing from Jeremy that Linux maintainers wouldn''t approve of new users of rwlocks, I redid the locking structure in the in-Linux-kernel version of tmem to avoid using them. I am fairly sure that the same approach used in zcache can be used in Xen, but have not tried, and it''s likely to be a fairly big coding/testing effort that I can''t undertake right now. I am also fairly sure that the current Xen tmem locking structure is not suitable for switching to normal spinlocks nor RCU, but am far from an expert in this area. Dan> -----Original Message----- > From: Keir Fraser [mailto:keir.xen@gmail.com] > Sent: Friday, March 25, 2011 11:09 AM > To: Jan Beulich; xen-devel@lists.xensource.com > Subject: Re: [Xen-devel] [PATCH] switch rangeset''s lock to rwlock > > I''d rather get rid of rwlocks altogether and use RCU in any cases where > we > really have contention. Rwlocks don''t help unless the read-side > critical > sections are large enough to amortise the cache ping-pong cost of the > locking/unlocking operations. And in Xen we have very few if any > significantly sized critical sections. > > I need to double check, but I believe we have only a couple of rwlock > users > now, and none of the read-side critical sections are large, so in that > case > I suggest we switch them to use spinlocks and kill our rwlock > implementation. > > -- Keir > > On 25/03/2011 16:49, "Jan Beulich" <JBeulich@novell.com> wrote: > > > As a general library routine, it should behave as efficiently as > > possible, even if at present no significant contention is known here. > > > > Signed-off-by: Jan Beulich <jbeulich@novell.com> > > > > --- a/xen/common/rangeset.c > > +++ b/xen/common/rangeset.c > > @@ -25,7 +25,7 @@ struct rangeset { > > > > /* Ordered list of ranges contained in this set, and protecting > lock. */ > > struct list_head range_list; > > - spinlock_t lock; > > + rwlock_t lock; > > > > /* Pretty-printing name. */ > > char name[32]; > > @@ -103,7 +103,7 @@ int rangeset_add_range( > > > > ASSERT(s <= e); > > > > - spin_lock(&r->lock); > > + write_lock(&r->lock); > > > > x = find_range(r, s); > > y = find_range(r, e); > > @@ -159,7 +159,7 @@ int rangeset_add_range( > > } > > > > out: > > - spin_unlock(&r->lock); > > + write_unlock(&r->lock); > > return rc; > > } > > > > @@ -175,7 +175,7 @@ int rangeset_remove_range( > > > > ASSERT(s <= e); > > > > - spin_lock(&r->lock); > > + write_lock(&r->lock); > > > > x = find_range(r, s); > > y = find_range(r, e); > > @@ -231,7 +231,7 @@ int rangeset_remove_range( > > } > > > > out: > > - spin_unlock(&r->lock); > > + write_unlock(&r->lock); > > return rc; > > } > > > > @@ -243,10 +243,10 @@ int rangeset_contains_range( > > > > ASSERT(s <= e); > > > > - spin_lock(&r->lock); > > + read_lock(&r->lock); > > x = find_range(r, s); > > contains = (x && (x->e >= e)); > > - spin_unlock(&r->lock); > > + read_unlock(&r->lock); > > > > return contains; > > } > > @@ -259,10 +259,10 @@ int rangeset_overlaps_range( > > > > ASSERT(s <= e); > > > > - spin_lock(&r->lock); > > + read_lock(&r->lock); > > x = find_range(r, e); > > overlaps = (x && (s <= x->e)); > > - spin_unlock(&r->lock); > > + read_unlock(&r->lock); > > > > return overlaps; > > } > > @@ -274,13 +274,13 @@ int rangeset_report_ranges( > > struct range *x; > > int rc = 0; > > > > - spin_lock(&r->lock); > > + read_lock(&r->lock); > > > > for ( x = find_range(r, s); x && (x->s <= e) && !rc; x > next_range(r, x) > > ) > > if ( x->e >= s ) > > rc = cb(max(x->s, s), min(x->e, e), ctxt); > > > > - spin_unlock(&r->lock); > > + read_unlock(&r->lock); > > > > return rc; > > } > > @@ -318,7 +318,7 @@ struct rangeset *rangeset_new( > > if ( r == NULL ) > > return NULL; > > > > - spin_lock_init(&r->lock); > > + rwlock_init(&r->lock); > > INIT_LIST_HEAD(&r->range_list); > > > > BUG_ON(flags & ~RANGESETF_prettyprint_hex); > > @@ -403,7 +403,7 @@ void rangeset_printk( > > int nr_printed = 0; > > struct range *x; > > > > - spin_lock(&r->lock); > > + read_lock(&r->lock); > > > > printk("%-10s {", r->name); > > > > @@ -422,7 +422,7 @@ void rangeset_printk( > > > > printk(" }"); > > > > - spin_unlock(&r->lock); > > + read_unlock(&r->lock); > > } > > > > void rangeset_domain_printk( > > > > > > > > _______________________________________________ > > Xen-devel mailing list > > Xen-devel@lists.xensource.com > > http://lists.xensource.com/xen-devel > > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xensource.com > http://lists.xensource.com/xen-devel_______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Keir Fraser
2011-Mar-25 20:52 UTC
Re: [Xen-devel] [PATCH] switch rangeset''s lock to rwlock
On 25/03/2011 17:52, "Dan Magenheimer" <dan.magenheimer@oracle.com> wrote:> Tmem (in Xen) does use rwlocks. After hearing from Jeremy that > Linux maintainers wouldn''t approve of new users of rwlocks, I > redid the locking structure in the in-Linux-kernel version of tmem > to avoid using them. I am fairly sure that the same approach used in > zcache can be used in Xen, but have not tried, and it''s likely > to be a fairly big coding/testing effort that I can''t undertake > right now. > > I am also fairly sure that the current Xen tmem locking structure > is not suitable for switching to normal spinlocks nor RCU, > but am far from an expert in this area.Why would a normal spinlock not work? -- Keir> Dan > >> -----Original Message----- >> From: Keir Fraser [mailto:keir.xen@gmail.com] >> Sent: Friday, March 25, 2011 11:09 AM >> To: Jan Beulich; xen-devel@lists.xensource.com >> Subject: Re: [Xen-devel] [PATCH] switch rangeset''s lock to rwlock >> >> I''d rather get rid of rwlocks altogether and use RCU in any cases where >> we >> really have contention. Rwlocks don''t help unless the read-side >> critical >> sections are large enough to amortise the cache ping-pong cost of the >> locking/unlocking operations. And in Xen we have very few if any >> significantly sized critical sections. >> >> I need to double check, but I believe we have only a couple of rwlock >> users >> now, and none of the read-side critical sections are large, so in that >> case >> I suggest we switch them to use spinlocks and kill our rwlock >> implementation. >> >> -- Keir >> >> On 25/03/2011 16:49, "Jan Beulich" <JBeulich@novell.com> wrote: >> >>> As a general library routine, it should behave as efficiently as >>> possible, even if at present no significant contention is known here. >>> >>> Signed-off-by: Jan Beulich <jbeulich@novell.com> >>> >>> --- a/xen/common/rangeset.c >>> +++ b/xen/common/rangeset.c >>> @@ -25,7 +25,7 @@ struct rangeset { >>> >>> /* Ordered list of ranges contained in this set, and protecting >> lock. */ >>> struct list_head range_list; >>> - spinlock_t lock; >>> + rwlock_t lock; >>> >>> /* Pretty-printing name. */ >>> char name[32]; >>> @@ -103,7 +103,7 @@ int rangeset_add_range( >>> >>> ASSERT(s <= e); >>> >>> - spin_lock(&r->lock); >>> + write_lock(&r->lock); >>> >>> x = find_range(r, s); >>> y = find_range(r, e); >>> @@ -159,7 +159,7 @@ int rangeset_add_range( >>> } >>> >>> out: >>> - spin_unlock(&r->lock); >>> + write_unlock(&r->lock); >>> return rc; >>> } >>> >>> @@ -175,7 +175,7 @@ int rangeset_remove_range( >>> >>> ASSERT(s <= e); >>> >>> - spin_lock(&r->lock); >>> + write_lock(&r->lock); >>> >>> x = find_range(r, s); >>> y = find_range(r, e); >>> @@ -231,7 +231,7 @@ int rangeset_remove_range( >>> } >>> >>> out: >>> - spin_unlock(&r->lock); >>> + write_unlock(&r->lock); >>> return rc; >>> } >>> >>> @@ -243,10 +243,10 @@ int rangeset_contains_range( >>> >>> ASSERT(s <= e); >>> >>> - spin_lock(&r->lock); >>> + read_lock(&r->lock); >>> x = find_range(r, s); >>> contains = (x && (x->e >= e)); >>> - spin_unlock(&r->lock); >>> + read_unlock(&r->lock); >>> >>> return contains; >>> } >>> @@ -259,10 +259,10 @@ int rangeset_overlaps_range( >>> >>> ASSERT(s <= e); >>> >>> - spin_lock(&r->lock); >>> + read_lock(&r->lock); >>> x = find_range(r, e); >>> overlaps = (x && (s <= x->e)); >>> - spin_unlock(&r->lock); >>> + read_unlock(&r->lock); >>> >>> return overlaps; >>> } >>> @@ -274,13 +274,13 @@ int rangeset_report_ranges( >>> struct range *x; >>> int rc = 0; >>> >>> - spin_lock(&r->lock); >>> + read_lock(&r->lock); >>> >>> for ( x = find_range(r, s); x && (x->s <= e) && !rc; x >> next_range(r, x) >>> ) >>> if ( x->e >= s ) >>> rc = cb(max(x->s, s), min(x->e, e), ctxt); >>> >>> - spin_unlock(&r->lock); >>> + read_unlock(&r->lock); >>> >>> return rc; >>> } >>> @@ -318,7 +318,7 @@ struct rangeset *rangeset_new( >>> if ( r == NULL ) >>> return NULL; >>> >>> - spin_lock_init(&r->lock); >>> + rwlock_init(&r->lock); >>> INIT_LIST_HEAD(&r->range_list); >>> >>> BUG_ON(flags & ~RANGESETF_prettyprint_hex); >>> @@ -403,7 +403,7 @@ void rangeset_printk( >>> int nr_printed = 0; >>> struct range *x; >>> >>> - spin_lock(&r->lock); >>> + read_lock(&r->lock); >>> >>> printk("%-10s {", r->name); >>> >>> @@ -422,7 +422,7 @@ void rangeset_printk( >>> >>> printk(" }"); >>> >>> - spin_unlock(&r->lock); >>> + read_unlock(&r->lock); >>> } >>> >>> void rangeset_domain_printk( >>> >>> >>> >>> _______________________________________________ >>> Xen-devel mailing list >>> Xen-devel@lists.xensource.com >>> http://lists.xensource.com/xen-devel >> >> >> >> _______________________________________________ >> Xen-devel mailing list >> Xen-devel@lists.xensource.com >> http://lists.xensource.com/xen-devel_______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Jan Beulich
2011-Mar-28 08:23 UTC
Re: [Xen-devel] [PATCH] switch rangeset''s lock to rwlock
>>> On 25.03.11 at 18:08, Keir Fraser <keir.xen@gmail.com> wrote: > I''d rather get rid of rwlocks altogether and use RCU in any cases where we > really have contention. Rwlocks don''t help unless the read-side critical > sections are large enough to amortise the cache ping-pong cost of the > locking/unlocking operations. And in Xen we have very few if any > significantly sized critical sections.As I said in the description, the rangeset code is of general library kind, and hence shouldn''t make assumptions on the sort of data stored in rangesets. While I agree that in many cases the read side critical section would be small, there can be exceptions. Using RCU in this kind of a leaf, independent of almost anything else library routine would seem odd. The only reason to stay with spinlocks was imo if you indeed wanted to knock off rwlocks altogether, which would new seem contrary to your c/s 23099:612171ff82ea.> I need to double check, but I believe we have only a couple of rwlock users > now, and none of the read-side critical sections are large, so in that case > I suggest we switch them to use spinlocks and kill our rwlock > implementation.Indeed, there''s only tmem without the two patches I had sent. One of the reasons for putting them together was to actually have some *active* users of rwlocks again, so the code wouldn''t have too good chances to bit-rot. Further, I''ve taken note of a few other locks that may be candidates for conversion to rwlocks (pcidevs_lock being the most prominent example), requiring closer inspection and possibly code adjustments other than the mere lock kind conversion. Jan _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Keir Fraser
2011-Mar-28 08:54 UTC
Re: [Xen-devel] [PATCH] switch rangeset''s lock to rwlock
On 28/03/2011 09:23, "Jan Beulich" <JBeulich@novell.com> wrote:> As I said in the description, the rangeset code is of general library > kind, and hence shouldn''t make assumptions on the sort of data > stored in rangesets. While I agree that in many cases the read > side critical section would be small, there can be exceptions.Because of rangeset_report_ranges()? Well, we could equally say that it is not nice to be executing a callback in a spinlock critical section. For example, the __copy_to_guest_offset() in the current sole callback handler will be invalid for any guest using xenpaging (when that works properly) since paging work could be done in there. Not a problem right now of course, not least because the callback is executed only for dom0. For that one iterator function I''m sure we could devise a method for executing callbacks with no lock held, without resorting to RCU. Again, this would be preferable to using rwlocks.> Using RCU in this kind of a leaf, independent of almost anything > else library routine would seem odd.Callers wouldn''t see the RCU-ness. Not that I''m arguing to make rangesets use RCU, or avoid holding a lock during report_ranges callbacks. I think concurrency bottlenecks in that code are a non-issue.> The only reason to stay with spinlocks was imo if you indeed > wanted to knock off rwlocks altogether, which would new seem > contrary to your c/s 23099:612171ff82ea.Well they''re only used now in code that is outside my personal areas of interest. I''d prefer more sensible locking strategies to be used, but I''m not going to remove code from Xen as an alternative, nor am I going to do the work myself. So rwlocks remain in their current limited uses.>> I need to double check, but I believe we have only a couple of rwlock users >> now, and none of the read-side critical sections are large, so in that case >> I suggest we switch them to use spinlocks and kill our rwlock >> implementation. > > Indeed, there''s only tmem without the two patches I had sent. One > of the reasons for putting them together was to actually have some > *active* users of rwlocks again, so the code wouldn''t have too good > chances to bit-rot. > > Further, I''ve taken note of a few other locks that may be > candidates for conversion to rwlocks (pcidevs_lock being the > most prominent example), requiring closer inspection and possibly > code adjustments other than the mere lock kind conversion.There''d be a bigger win in fragmenting pcidevs_lock, I''m sure. It looks like it would be easy to do, it covers all sorts of stuff at the moment. -- Keir> Jan >_______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Jeremy Fitzhardinge
2011-Mar-30 22:44 UTC
Re: [Xen-devel] [PATCH] switch rangeset''s lock to rwlock
On 03/25/2011 01:52 PM, Keir Fraser wrote:> On 25/03/2011 17:52, "Dan Magenheimer" <dan.magenheimer@oracle.com> wrote: > >> Tmem (in Xen) does use rwlocks. After hearing from Jeremy that >> Linux maintainers wouldn''t approve of new users of rwlocks, I >> redid the locking structure in the in-Linux-kernel version of tmem >> to avoid using them. I am fairly sure that the same approach used in >> zcache can be used in Xen, but have not tried, and it''s likely >> to be a fairly big coding/testing effort that I can''t undertake >> right now. >> >> I am also fairly sure that the current Xen tmem locking structure >> is not suitable for switching to normal spinlocks nor RCU, >> but am far from an expert in this area. > Why would a normal spinlock not work?Should work; any rwlock use that can''t be mechanically replaced with plain spinlocks is buggy anyway. J> -- Keir > >> Dan >> >>> -----Original Message----- >>> From: Keir Fraser [mailto:keir.xen@gmail.com] >>> Sent: Friday, March 25, 2011 11:09 AM >>> To: Jan Beulich; xen-devel@lists.xensource.com >>> Subject: Re: [Xen-devel] [PATCH] switch rangeset''s lock to rwlock >>> >>> I''d rather get rid of rwlocks altogether and use RCU in any cases where >>> we >>> really have contention. Rwlocks don''t help unless the read-side >>> critical >>> sections are large enough to amortise the cache ping-pong cost of the >>> locking/unlocking operations. And in Xen we have very few if any >>> significantly sized critical sections. >>> >>> I need to double check, but I believe we have only a couple of rwlock >>> users >>> now, and none of the read-side critical sections are large, so in that >>> case >>> I suggest we switch them to use spinlocks and kill our rwlock >>> implementation. >>> >>> -- Keir >>> >>> On 25/03/2011 16:49, "Jan Beulich" <JBeulich@novell.com> wrote: >>> >>>> As a general library routine, it should behave as efficiently as >>>> possible, even if at present no significant contention is known here. >>>> >>>> Signed-off-by: Jan Beulich <jbeulich@novell.com> >>>> >>>> --- a/xen/common/rangeset.c >>>> +++ b/xen/common/rangeset.c >>>> @@ -25,7 +25,7 @@ struct rangeset { >>>> >>>> /* Ordered list of ranges contained in this set, and protecting >>> lock. */ >>>> struct list_head range_list; >>>> - spinlock_t lock; >>>> + rwlock_t lock; >>>> >>>> /* Pretty-printing name. */ >>>> char name[32]; >>>> @@ -103,7 +103,7 @@ int rangeset_add_range( >>>> >>>> ASSERT(s <= e); >>>> >>>> - spin_lock(&r->lock); >>>> + write_lock(&r->lock); >>>> >>>> x = find_range(r, s); >>>> y = find_range(r, e); >>>> @@ -159,7 +159,7 @@ int rangeset_add_range( >>>> } >>>> >>>> out: >>>> - spin_unlock(&r->lock); >>>> + write_unlock(&r->lock); >>>> return rc; >>>> } >>>> >>>> @@ -175,7 +175,7 @@ int rangeset_remove_range( >>>> >>>> ASSERT(s <= e); >>>> >>>> - spin_lock(&r->lock); >>>> + write_lock(&r->lock); >>>> >>>> x = find_range(r, s); >>>> y = find_range(r, e); >>>> @@ -231,7 +231,7 @@ int rangeset_remove_range( >>>> } >>>> >>>> out: >>>> - spin_unlock(&r->lock); >>>> + write_unlock(&r->lock); >>>> return rc; >>>> } >>>> >>>> @@ -243,10 +243,10 @@ int rangeset_contains_range( >>>> >>>> ASSERT(s <= e); >>>> >>>> - spin_lock(&r->lock); >>>> + read_lock(&r->lock); >>>> x = find_range(r, s); >>>> contains = (x && (x->e >= e)); >>>> - spin_unlock(&r->lock); >>>> + read_unlock(&r->lock); >>>> >>>> return contains; >>>> } >>>> @@ -259,10 +259,10 @@ int rangeset_overlaps_range( >>>> >>>> ASSERT(s <= e); >>>> >>>> - spin_lock(&r->lock); >>>> + read_lock(&r->lock); >>>> x = find_range(r, e); >>>> overlaps = (x && (s <= x->e)); >>>> - spin_unlock(&r->lock); >>>> + read_unlock(&r->lock); >>>> >>>> return overlaps; >>>> } >>>> @@ -274,13 +274,13 @@ int rangeset_report_ranges( >>>> struct range *x; >>>> int rc = 0; >>>> >>>> - spin_lock(&r->lock); >>>> + read_lock(&r->lock); >>>> >>>> for ( x = find_range(r, s); x && (x->s <= e) && !rc; x >>> next_range(r, x) >>>> ) >>>> if ( x->e >= s ) >>>> rc = cb(max(x->s, s), min(x->e, e), ctxt); >>>> >>>> - spin_unlock(&r->lock); >>>> + read_unlock(&r->lock); >>>> >>>> return rc; >>>> } >>>> @@ -318,7 +318,7 @@ struct rangeset *rangeset_new( >>>> if ( r == NULL ) >>>> return NULL; >>>> >>>> - spin_lock_init(&r->lock); >>>> + rwlock_init(&r->lock); >>>> INIT_LIST_HEAD(&r->range_list); >>>> >>>> BUG_ON(flags & ~RANGESETF_prettyprint_hex); >>>> @@ -403,7 +403,7 @@ void rangeset_printk( >>>> int nr_printed = 0; >>>> struct range *x; >>>> >>>> - spin_lock(&r->lock); >>>> + read_lock(&r->lock); >>>> >>>> printk("%-10s {", r->name); >>>> >>>> @@ -422,7 +422,7 @@ void rangeset_printk( >>>> >>>> printk(" }"); >>>> >>>> - spin_unlock(&r->lock); >>>> + read_unlock(&r->lock); >>>> } >>>> >>>> void rangeset_domain_printk( >>>> >>>> >>>> >>>> _______________________________________________ >>>> Xen-devel mailing list >>>> Xen-devel@lists.xensource.com >>>> http://lists.xensource.com/xen-devel >>> >>> >>> _______________________________________________ >>> Xen-devel mailing list >>> Xen-devel@lists.xensource.com >>> http://lists.xensource.com/xen-devel > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xensource.com > http://lists.xensource.com/xen-devel >_______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel