Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/ia64/linux-xen/acpi.c
+++ b/xen/arch/ia64/linux-xen/acpi.c
@@ -557,7 +557,7 @@ acpi_numa_processor_affinity_init(struct
(pa->apic_id << 8) | (pa->local_sapic_eid);
/* nid should be overridden as logical node id later */
node_cpuid[srat_num_cpus].nid = pxm;
- cpu_set(srat_num_cpus, early_cpu_possible_map);
+ cpumask_set_cpu(srat_num_cpus, &early_cpu_possible_map);
srat_num_cpus++;
}
@@ -917,7 +917,7 @@ __init void prefill_possible_map(void)
possible, max((possible - available_cpus), 0));
for (i = 0; i < possible; i++)
- cpu_set(i, cpu_possible_map);
+ cpumask_set_cpu(i, &cpu_possible_map);
}
#ifndef XEN
--- a/xen/arch/ia64/linux-xen/setup.c
+++ b/xen/arch/ia64/linux-xen/setup.c
@@ -463,7 +463,7 @@ mark_bsp_online (void)
{
#ifdef CONFIG_SMP
/* If we register an early console, allow CPU 0 to printk */
- cpu_set(smp_processor_id(), cpu_online_map);
+ set_cpu_online(smp_processor_id(), 1);
#endif
}
--- a/xen/arch/ia64/linux-xen/smpboot.c
+++ b/xen/arch/ia64/linux-xen/smpboot.c
@@ -392,7 +392,7 @@ smp_callin (void)
#else
lock_ipi_calllock();
#endif
- cpu_set(cpuid, cpu_online_map);
+ set_cpu_online(cpuid, 1);
#ifdef XEN
unlock_ipi_calllock(flags);
#else
@@ -437,7 +437,7 @@ smp_callin (void)
/*
* Allow the master to continue.
*/
- cpu_set(cpuid, cpu_callin_map);
+ cpumask_set_cpu(cpuid, &cpu_callin_map);
Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid);
}
@@ -625,8 +625,8 @@ smp_prepare_cpus (unsigned int max_cpus)
/*
* We have the boot CPU online for sure.
*/
- cpu_set(0, cpu_online_map);
- cpu_set(0, cpu_callin_map);
+ set_cpu_online(0, 1);
+ cpumask_set_cpu(0, &cpu_callin_map);
local_cpu_data->loops_per_jiffy = loops_per_jiffy;
ia64_cpu_to_sapicid[0] = boot_cpu_id;
@@ -652,8 +652,8 @@ smp_prepare_cpus (unsigned int max_cpus)
void __devinit smp_prepare_boot_cpu(void)
{
- cpu_set(smp_processor_id(), cpu_online_map);
- cpu_set(smp_processor_id(), cpu_callin_map);
+ set_cpu_online(smp_processor_id(), 1);
+ cpumask_set_cpu(smp_processor_id(), &cpu_callin_map);
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
}
--- a/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c
+++ b/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c
@@ -214,7 +214,7 @@ sn2_global_tlb_purge(unsigned long start
for_each_possible_cpu(cpu) {
cnode = cpu_to_node(cpu);
if (!node_isset(cnode, nodes_flushed)) {
- cpu_set(cpu, selected_cpus);
+ cpumask_set_cpu(cpu, &selected_cpus);
i++;
}
node_set(cnode, nodes_flushed);
--- a/xen/arch/ia64/xen/mm_init.c
+++ b/xen/arch/ia64/xen/mm_init.c
@@ -38,7 +38,7 @@ ia64_mmu_init (void *my_cpu_data)
ia64_set_psr(psr);
ia64_srlz_i();
#ifdef XEN
- cpu_set(cpu, percpu_set);
+ cpumask_set_cpu(cpu, &percpu_set);
#endif
/*
--- a/xen/arch/ia64/xen/tlb_track.c
+++ b/xen/arch/ia64/xen/tlb_track.c
@@ -389,7 +389,7 @@ tlb_track_insert_or_dirty(struct tlb_tra
found:
BUG_ON(v->processor >= NR_CPUS);
- cpu_set(v->processor, entry->pcpu_dirty_mask);
+ cpumask_set_cpu(v->processor, &entry->pcpu_dirty_mask);
BUG_ON(v->vcpu_id >= NR_CPUS);
vcpu_set(v->vcpu_id, entry->vcpu_dirty_mask);
perfc_incr(tlb_track_iod_dirtied);
--- a/xen/arch/x86/acpi/cpu_idle.c
+++ b/xen/arch/x86/acpi/cpu_idle.c
@@ -271,9 +271,9 @@ static void mwait_idle_with_hints(unsign
*/
if ( expires > NOW() || expires == 0 )
{
- cpu_set(cpu, cpuidle_mwait_flags);
+ cpumask_set_cpu(cpu, &cpuidle_mwait_flags);
__mwait(eax, ecx);
- cpu_clear(cpu, cpuidle_mwait_flags);
+ cpumask_clear_cpu(cpu, &cpuidle_mwait_flags);
}
if ( expires <= NOW() && expires > 0 )
--- a/xen/arch/x86/cpu/mcheck/mce_intel.c
+++ b/xen/arch/x86/cpu/mcheck/mce_intel.c
@@ -828,7 +828,7 @@ static void intel_machine_check(struct c
* (the MSRs are sticky)
*/
if (bs.pcc || !bs.recoverable)
- cpu_set(smp_processor_id(), mce_fatal_cpus);
+ cpumask_set_cpu(smp_processor_id(), &mce_fatal_cpus);
} else {
if (mctc != NULL)
mctelem_commit(mctc);
@@ -849,7 +849,7 @@ static void intel_machine_check(struct c
mce_barrier_enter(&mce_trap_bar);
if ( mctc != NULL && mce_urgent_action(regs, mctc))
- cpu_set(smp_processor_id(), mce_fatal_cpus);
+ cpumask_set_cpu(smp_processor_id(), &mce_fatal_cpus);
mce_barrier_exit(&mce_trap_bar);
/*
* Wait until everybody has processed the trap.
--- a/xen/arch/x86/mpparse.c
+++ b/xen/arch/x86/mpparse.c
@@ -161,7 +161,7 @@ static int __devinit MP_processor_info_x
return cpu;
}
x86_cpu_to_apicid[cpu] = apicid;
- cpu_set(cpu, cpu_present_map);
+ set_cpu_present(cpu, 1);
}
if (++num_processors > 8) {
--- a/xen/arch/x86/numa.c
+++ b/xen/arch/x86/numa.c
@@ -288,7 +288,7 @@ void __init numa_initmem_init(unsigned l
__cpuinit void numa_add_cpu(int cpu)
{
- cpu_set(cpu, node_to_cpumask[cpu_to_node(cpu)]);
+ cpumask_set_cpu(cpu, &node_to_cpumask[cpu_to_node(cpu)]);
}
void __cpuinit numa_set_node(int cpu, int node)
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -244,7 +244,7 @@ static void set_cpu_sibling_map(int cpu)
int i;
struct cpuinfo_x86 *c = cpu_data;
- cpu_set(cpu, cpu_sibling_setup_map);
+ cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
if ( c[cpu].x86_num_siblings > 1 )
{
@@ -380,7 +380,7 @@ void start_secondary(void *unused)
*/
lock_vector_lock();
__setup_vector_irq(cpu);
- cpu_set(cpu, cpu_online_map);
+ set_cpu_online(cpu, 1);
unlock_vector_lock();
init_percpu_time();
@@ -804,8 +804,8 @@ void __init smp_prepare_cpus(unsigned in
void __init smp_prepare_boot_cpu(void)
{
- cpu_set(smp_processor_id(), cpu_online_map);
- cpu_set(smp_processor_id(), cpu_present_map);
+ set_cpu_online(smp_processor_id(), 1);
+ set_cpu_present(smp_processor_id(), 1);
}
static void
@@ -933,7 +933,7 @@ int cpu_add(uint32_t apic_id, uint32_t a
"break assumed cross-CPU TSC coherency.\n"
" ** Consider using boot parameter
\"tsc=skewed\" "
"which forces TSC emulation where appropriate.\n",
cpu);
- cpu_set(cpu, tsc_sync_cpu_mask);
+ cpumask_set_cpu(cpu, &tsc_sync_cpu_mask);
}
srat_detect_node(cpu);
--- a/xen/arch/x86/time.c
+++ b/xen/arch/x86/time.c
@@ -1573,7 +1573,7 @@ __initcall(disable_pit_irq);
void pit_broadcast_enter(void)
{
- cpu_set(smp_processor_id(), pit_broadcast_mask);
+ cpumask_set_cpu(smp_processor_id(), &pit_broadcast_mask);
}
void pit_broadcast_exit(void)
--- a/xen/common/cpupool.c
+++ b/xen/common/cpupool.c
@@ -253,7 +253,7 @@ static long cpupool_unassign_cpu_helper(
spin_lock(&cpupool_lock);
ret = cpu_disable_scheduler(cpu);
- cpu_set(cpu, cpupool_free_cpus);
+ cpumask_set_cpu(cpu, &cpupool_free_cpus);
if ( !ret )
{
ret = schedule_cpu_switch(cpu, NULL);
@@ -409,8 +409,8 @@ void cpupool_rm_domain(struct domain *d)
static void cpupool_cpu_add(unsigned int cpu)
{
spin_lock(&cpupool_lock);
- cpu_clear(cpu, cpupool_locked_cpus);
- cpu_set(cpu, cpupool_free_cpus);
+ cpumask_clear_cpu(cpu, &cpupool_locked_cpus);
+ cpumask_set_cpu(cpu, &cpupool_free_cpus);
cpupool_assign_cpu_locked(cpupool0, cpu);
spin_unlock(&cpupool_lock);
}
@@ -428,7 +428,7 @@ static int cpupool_cpu_remove(unsigned i
if ( !cpumask_test_cpu(cpu, cpupool0->cpu_valid))
ret = -EBUSY;
else
- cpu_set(cpu, cpupool_locked_cpus);
+ cpumask_set_cpu(cpu, &cpupool_locked_cpus);
spin_unlock(&cpupool_lock);
return ret;
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -1725,7 +1725,7 @@ csched_schedule(
{
/* Update the idle mask if necessary */
if ( !cpumask_test_cpu(cpu, &rqd->idle) )
- cpu_set(cpu, rqd->idle);
+ cpumask_set_cpu(cpu, &rqd->idle);
/* Make sure avgload gets updated periodically even
* if there''s no activity */
update_load(ops, rqd, NULL, 0, now);
@@ -1860,7 +1860,7 @@ static void activate_runqueue(struct csc
INIT_LIST_HEAD(&rqd->runq);
spin_lock_init(&rqd->lock);
- cpu_set(rqi, prv->active_queues);
+ cpumask_set_cpu(rqi, &prv->active_queues);
}
static void deactivate_runqueue(struct csched_private *prv, int rqi)
@@ -1927,12 +1927,12 @@ static void init_pcpu(const struct sched
/* Set the runqueue map */
prv->runq_map[cpu]=rqi;
- cpu_set(cpu, rqd->idle);
- cpu_set(cpu, rqd->active);
+ cpumask_set_cpu(cpu, &rqd->idle);
+ cpumask_set_cpu(cpu, &rqd->active);
spin_unlock(old_lock);
- cpu_set(cpu, prv->initialized);
+ cpumask_set_cpu(cpu, &prv->initialized);
spin_unlock_irqrestore(&prv->lock, flags);
--- a/xen/include/asm-ia64/linux-xen/asm/acpi.h
+++ b/xen/include/asm-ia64/linux-xen/asm/acpi.h
@@ -153,7 +153,7 @@ static inline void per_cpu_scan_finalize
high_cpu = min(high_cpu + reserve_cpus, NR_CPUS);
for (cpu = low_cpu; cpu < high_cpu; cpu++) {
- cpu_set(cpu, early_cpu_possible_map);
+ cpumask_set_cpu(cpu, &early_cpu_possible_map);
if (node_cpuid[cpu].nid == NUMA_NO_NODE) {
node_cpuid[cpu].nid = next_nid;
next_nid++;
--- a/xen/include/xen/cpumask.h
+++ b/xen/include/xen/cpumask.h
@@ -97,7 +97,6 @@ static inline unsigned int cpumask_check
return cpu;
}
-#define cpu_set(cpu, dst) cpumask_set_cpu(cpu, &(dst))
static inline void cpumask_set_cpu(int cpu, volatile cpumask_t *dstp)
{
set_bit(cpumask_check(cpu), dstp->bits);
@@ -452,6 +451,14 @@ extern cpumask_t cpu_present_map;
#define cpu_present(cpu) ((cpu) == 0)
#endif
+#define set_cpu_online(cpu, online) \
+ ((online) ? cpumask_set_cpu(cpu, &cpu_online_map) \
+ : cpumask_clear_cpu(cpu, &cpu_online_map))
+
+#define set_cpu_present(cpu, present) \
+ ((present) ? cpumask_set_cpu(cpu, &cpu_present_map) \
+ : cpumask_clear_cpu(cpu, &cpu_present_map))
+
#define any_online_cpu(mask) \
({ \
int cpu; \
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
On 07/11/2011 09:57, "Jan Beulich" <JBeulich@suse.com> wrote:> Signed-off-by: Jan Beulich <jbeulich@suse.com>I don''t like set_cpu_{present,online} taking a boolean clear/set flag. There is no caller that can both set and clear a flag, so it is always hardcoded as 0 or 1. And then the reader has to make a (probably not hard) guess what that means. If you must add an abstraction interface here, better to define four of them: {set,clear}_cpu_{present,online}. Apart from this, all patches are: Acked-by: Keir Fraser <keir@xen.org> -- Keir> --- a/xen/arch/ia64/linux-xen/acpi.c > +++ b/xen/arch/ia64/linux-xen/acpi.c > @@ -557,7 +557,7 @@ acpi_numa_processor_affinity_init(struct > (pa->apic_id << 8) | (pa->local_sapic_eid); > /* nid should be overridden as logical node id later */ > node_cpuid[srat_num_cpus].nid = pxm; > - cpu_set(srat_num_cpus, early_cpu_possible_map); > + cpumask_set_cpu(srat_num_cpus, &early_cpu_possible_map); > srat_num_cpus++; > } > > @@ -917,7 +917,7 @@ __init void prefill_possible_map(void) > possible, max((possible - available_cpus), 0)); > > for (i = 0; i < possible; i++) > - cpu_set(i, cpu_possible_map); > + cpumask_set_cpu(i, &cpu_possible_map); > } > > #ifndef XEN > --- a/xen/arch/ia64/linux-xen/setup.c > +++ b/xen/arch/ia64/linux-xen/setup.c > @@ -463,7 +463,7 @@ mark_bsp_online (void) > { > #ifdef CONFIG_SMP > /* If we register an early console, allow CPU 0 to printk */ > - cpu_set(smp_processor_id(), cpu_online_map); > + set_cpu_online(smp_processor_id(), 1); > #endif > } > > --- a/xen/arch/ia64/linux-xen/smpboot.c > +++ b/xen/arch/ia64/linux-xen/smpboot.c > @@ -392,7 +392,7 @@ smp_callin (void) > #else > lock_ipi_calllock(); > #endif > - cpu_set(cpuid, cpu_online_map); > + set_cpu_online(cpuid, 1); > #ifdef XEN > unlock_ipi_calllock(flags); > #else > @@ -437,7 +437,7 @@ smp_callin (void) > /* > * Allow the master to continue. > */ > - cpu_set(cpuid, cpu_callin_map); > + cpumask_set_cpu(cpuid, &cpu_callin_map); > Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid); > } > > @@ -625,8 +625,8 @@ smp_prepare_cpus (unsigned int max_cpus) > /* > * We have the boot CPU online for sure. > */ > - cpu_set(0, cpu_online_map); > - cpu_set(0, cpu_callin_map); > + set_cpu_online(0, 1); > + cpumask_set_cpu(0, &cpu_callin_map); > > local_cpu_data->loops_per_jiffy = loops_per_jiffy; > ia64_cpu_to_sapicid[0] = boot_cpu_id; > @@ -652,8 +652,8 @@ smp_prepare_cpus (unsigned int max_cpus) > > void __devinit smp_prepare_boot_cpu(void) > { > - cpu_set(smp_processor_id(), cpu_online_map); > - cpu_set(smp_processor_id(), cpu_callin_map); > + set_cpu_online(smp_processor_id(), 1); > + cpumask_set_cpu(smp_processor_id(), &cpu_callin_map); > per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; > } > > --- a/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c > +++ b/xen/arch/ia64/linux-xen/sn/kernel/sn2_smp.c > @@ -214,7 +214,7 @@ sn2_global_tlb_purge(unsigned long start > for_each_possible_cpu(cpu) { > cnode = cpu_to_node(cpu); > if (!node_isset(cnode, nodes_flushed)) { > - cpu_set(cpu, selected_cpus); > + cpumask_set_cpu(cpu, &selected_cpus); > i++; > } > node_set(cnode, nodes_flushed); > --- a/xen/arch/ia64/xen/mm_init.c > +++ b/xen/arch/ia64/xen/mm_init.c > @@ -38,7 +38,7 @@ ia64_mmu_init (void *my_cpu_data) > ia64_set_psr(psr); > ia64_srlz_i(); > #ifdef XEN > - cpu_set(cpu, percpu_set); > + cpumask_set_cpu(cpu, &percpu_set); > #endif > > /* > --- a/xen/arch/ia64/xen/tlb_track.c > +++ b/xen/arch/ia64/xen/tlb_track.c > @@ -389,7 +389,7 @@ tlb_track_insert_or_dirty(struct tlb_tra > > found: > BUG_ON(v->processor >= NR_CPUS); > - cpu_set(v->processor, entry->pcpu_dirty_mask); > + cpumask_set_cpu(v->processor, &entry->pcpu_dirty_mask); > BUG_ON(v->vcpu_id >= NR_CPUS); > vcpu_set(v->vcpu_id, entry->vcpu_dirty_mask); > perfc_incr(tlb_track_iod_dirtied); > --- a/xen/arch/x86/acpi/cpu_idle.c > +++ b/xen/arch/x86/acpi/cpu_idle.c > @@ -271,9 +271,9 @@ static void mwait_idle_with_hints(unsign > */ > if ( expires > NOW() || expires == 0 ) > { > - cpu_set(cpu, cpuidle_mwait_flags); > + cpumask_set_cpu(cpu, &cpuidle_mwait_flags); > __mwait(eax, ecx); > - cpu_clear(cpu, cpuidle_mwait_flags); > + cpumask_clear_cpu(cpu, &cpuidle_mwait_flags); > } > > if ( expires <= NOW() && expires > 0 ) > --- a/xen/arch/x86/cpu/mcheck/mce_intel.c > +++ b/xen/arch/x86/cpu/mcheck/mce_intel.c > @@ -828,7 +828,7 @@ static void intel_machine_check(struct c > * (the MSRs are sticky) > */ > if (bs.pcc || !bs.recoverable) > - cpu_set(smp_processor_id(), mce_fatal_cpus); > + cpumask_set_cpu(smp_processor_id(), &mce_fatal_cpus); > } else { > if (mctc != NULL) > mctelem_commit(mctc); > @@ -849,7 +849,7 @@ static void intel_machine_check(struct c > > mce_barrier_enter(&mce_trap_bar); > if ( mctc != NULL && mce_urgent_action(regs, mctc)) > - cpu_set(smp_processor_id(), mce_fatal_cpus); > + cpumask_set_cpu(smp_processor_id(), &mce_fatal_cpus); > mce_barrier_exit(&mce_trap_bar); > /* > * Wait until everybody has processed the trap. > --- a/xen/arch/x86/mpparse.c > +++ b/xen/arch/x86/mpparse.c > @@ -161,7 +161,7 @@ static int __devinit MP_processor_info_x > return cpu; > } > x86_cpu_to_apicid[cpu] = apicid; > - cpu_set(cpu, cpu_present_map); > + set_cpu_present(cpu, 1); > } > > if (++num_processors > 8) { > --- a/xen/arch/x86/numa.c > +++ b/xen/arch/x86/numa.c > @@ -288,7 +288,7 @@ void __init numa_initmem_init(unsigned l > > __cpuinit void numa_add_cpu(int cpu) > { > - cpu_set(cpu, node_to_cpumask[cpu_to_node(cpu)]); > + cpumask_set_cpu(cpu, &node_to_cpumask[cpu_to_node(cpu)]); > } > > void __cpuinit numa_set_node(int cpu, int node) > --- a/xen/arch/x86/smpboot.c > +++ b/xen/arch/x86/smpboot.c > @@ -244,7 +244,7 @@ static void set_cpu_sibling_map(int cpu) > int i; > struct cpuinfo_x86 *c = cpu_data; > > - cpu_set(cpu, cpu_sibling_setup_map); > + cpumask_set_cpu(cpu, &cpu_sibling_setup_map); > > if ( c[cpu].x86_num_siblings > 1 ) > { > @@ -380,7 +380,7 @@ void start_secondary(void *unused) > */ > lock_vector_lock(); > __setup_vector_irq(cpu); > - cpu_set(cpu, cpu_online_map); > + set_cpu_online(cpu, 1); > unlock_vector_lock(); > > init_percpu_time(); > @@ -804,8 +804,8 @@ void __init smp_prepare_cpus(unsigned in > > void __init smp_prepare_boot_cpu(void) > { > - cpu_set(smp_processor_id(), cpu_online_map); > - cpu_set(smp_processor_id(), cpu_present_map); > + set_cpu_online(smp_processor_id(), 1); > + set_cpu_present(smp_processor_id(), 1); > } > > static void > @@ -933,7 +933,7 @@ int cpu_add(uint32_t apic_id, uint32_t a > "break assumed cross-CPU TSC coherency.\n" > " ** Consider using boot parameter \"tsc=skewed\" " > "which forces TSC emulation where appropriate.\n", cpu); > - cpu_set(cpu, tsc_sync_cpu_mask); > + cpumask_set_cpu(cpu, &tsc_sync_cpu_mask); > } > > srat_detect_node(cpu); > --- a/xen/arch/x86/time.c > +++ b/xen/arch/x86/time.c > @@ -1573,7 +1573,7 @@ __initcall(disable_pit_irq); > > void pit_broadcast_enter(void) > { > - cpu_set(smp_processor_id(), pit_broadcast_mask); > + cpumask_set_cpu(smp_processor_id(), &pit_broadcast_mask); > } > > void pit_broadcast_exit(void) > --- a/xen/common/cpupool.c > +++ b/xen/common/cpupool.c > @@ -253,7 +253,7 @@ static long cpupool_unassign_cpu_helper( > > spin_lock(&cpupool_lock); > ret = cpu_disable_scheduler(cpu); > - cpu_set(cpu, cpupool_free_cpus); > + cpumask_set_cpu(cpu, &cpupool_free_cpus); > if ( !ret ) > { > ret = schedule_cpu_switch(cpu, NULL); > @@ -409,8 +409,8 @@ void cpupool_rm_domain(struct domain *d) > static void cpupool_cpu_add(unsigned int cpu) > { > spin_lock(&cpupool_lock); > - cpu_clear(cpu, cpupool_locked_cpus); > - cpu_set(cpu, cpupool_free_cpus); > + cpumask_clear_cpu(cpu, &cpupool_locked_cpus); > + cpumask_set_cpu(cpu, &cpupool_free_cpus); > cpupool_assign_cpu_locked(cpupool0, cpu); > spin_unlock(&cpupool_lock); > } > @@ -428,7 +428,7 @@ static int cpupool_cpu_remove(unsigned i > if ( !cpumask_test_cpu(cpu, cpupool0->cpu_valid)) > ret = -EBUSY; > else > - cpu_set(cpu, cpupool_locked_cpus); > + cpumask_set_cpu(cpu, &cpupool_locked_cpus); > spin_unlock(&cpupool_lock); > > return ret; > --- a/xen/common/sched_credit2.c > +++ b/xen/common/sched_credit2.c > @@ -1725,7 +1725,7 @@ csched_schedule( > { > /* Update the idle mask if necessary */ > if ( !cpumask_test_cpu(cpu, &rqd->idle) ) > - cpu_set(cpu, rqd->idle); > + cpumask_set_cpu(cpu, &rqd->idle); > /* Make sure avgload gets updated periodically even > * if there''s no activity */ > update_load(ops, rqd, NULL, 0, now); > @@ -1860,7 +1860,7 @@ static void activate_runqueue(struct csc > INIT_LIST_HEAD(&rqd->runq); > spin_lock_init(&rqd->lock); > > - cpu_set(rqi, prv->active_queues); > + cpumask_set_cpu(rqi, &prv->active_queues); > } > > static void deactivate_runqueue(struct csched_private *prv, int rqi) > @@ -1927,12 +1927,12 @@ static void init_pcpu(const struct sched > /* Set the runqueue map */ > prv->runq_map[cpu]=rqi; > > - cpu_set(cpu, rqd->idle); > - cpu_set(cpu, rqd->active); > + cpumask_set_cpu(cpu, &rqd->idle); > + cpumask_set_cpu(cpu, &rqd->active); > > spin_unlock(old_lock); > > - cpu_set(cpu, prv->initialized); > + cpumask_set_cpu(cpu, &prv->initialized); > > spin_unlock_irqrestore(&prv->lock, flags); > > --- a/xen/include/asm-ia64/linux-xen/asm/acpi.h > +++ b/xen/include/asm-ia64/linux-xen/asm/acpi.h > @@ -153,7 +153,7 @@ static inline void per_cpu_scan_finalize > high_cpu = min(high_cpu + reserve_cpus, NR_CPUS); > > for (cpu = low_cpu; cpu < high_cpu; cpu++) { > - cpu_set(cpu, early_cpu_possible_map); > + cpumask_set_cpu(cpu, &early_cpu_possible_map); > if (node_cpuid[cpu].nid == NUMA_NO_NODE) { > node_cpuid[cpu].nid = next_nid; > next_nid++; > --- a/xen/include/xen/cpumask.h > +++ b/xen/include/xen/cpumask.h > @@ -97,7 +97,6 @@ static inline unsigned int cpumask_check > return cpu; > } > > -#define cpu_set(cpu, dst) cpumask_set_cpu(cpu, &(dst)) > static inline void cpumask_set_cpu(int cpu, volatile cpumask_t *dstp) > { > set_bit(cpumask_check(cpu), dstp->bits); > @@ -452,6 +451,14 @@ extern cpumask_t cpu_present_map; > #define cpu_present(cpu) ((cpu) == 0) > #endif > > +#define set_cpu_online(cpu, online) \ > + ((online) ? cpumask_set_cpu(cpu, &cpu_online_map) \ > + : cpumask_clear_cpu(cpu, &cpu_online_map)) > + > +#define set_cpu_present(cpu, present) \ > + ((present) ? cpumask_set_cpu(cpu, &cpu_present_map) \ > + : cpumask_clear_cpu(cpu, &cpu_present_map)) > + > #define any_online_cpu(mask) \ > ({ \ > int cpu; \ > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xensource.com > http://lists.xensource.com/xen-devel_______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
>>> On 07.11.11 at 15:31, Keir Fraser <keir@xen.org> wrote: > On 07/11/2011 09:57, "Jan Beulich" <JBeulich@suse.com> wrote: > >> Signed-off-by: Jan Beulich <jbeulich@suse.com> > > I don''t like set_cpu_{present,online} taking a boolean clear/set flag. There > is no caller that can both set and clear a flag, so it is always hardcoded > as 0 or 1. And then the reader has to make a (probably not hard) guess what > that means. > > If you must add an abstraction interface here, better to define four of > them: {set,clear}_cpu_{present,online}.Hmm, I don''t like this interface design too much either, but again wanted to follow Linux rather than cooking our own. Do you really want us to diverge in this respect? Jan> Apart from this, all patches are: > Acked-by: Keir Fraser <keir@xen.org> > > -- Keir_______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
On 07/11/2011 14:39, "Jan Beulich" <JBeulich@suse.com> wrote:>> I don''t like set_cpu_{present,online} taking a boolean clear/set flag. There >> is no caller that can both set and clear a flag, so it is always hardcoded >> as 0 or 1. And then the reader has to make a (probably not hard) guess what >> that means. >> >> If you must add an abstraction interface here, better to define four of >> them: {set,clear}_cpu_{present,online}. > > Hmm, I don''t like this interface design too much either, but again wanted > to follow Linux rather than cooking our own. Do you really want us to > diverge in this respect?Yes. Apart from maybe the code that tickles the remote APIC, our smpboot code is already well diverged from Linux. -- Keir _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
>>> On 07.11.11 at 15:48, Keir Fraser <keir@xen.org> wrote: > On 07/11/2011 14:39, "Jan Beulich" <JBeulich@suse.com> wrote: > >>> I don''t like set_cpu_{present,online} taking a boolean clear/set flag. There >>> is no caller that can both set and clear a flag, so it is always hardcoded >>> as 0 or 1. And then the reader has to make a (probably not hard) guess what >>> that means. >>> >>> If you must add an abstraction interface here, better to define four of >>> them: {set,clear}_cpu_{present,online}. >> >> Hmm, I don''t like this interface design too much either, but again wanted >> to follow Linux rather than cooking our own. Do you really want us to >> diverge in this respect? > > Yes. Apart from maybe the code that tickles the remote APIC, our smpboot > code is already well diverged from Linux.In that case I''d prefer not having a separate abstraction here at all - is that fine by you? Jan _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
On 07/11/2011 14:52, "Jan Beulich" <JBeulich@suse.com> wrote:>>> Hmm, I don''t like this interface design too much either, but again wanted >>> to follow Linux rather than cooking our own. Do you really want us to >>> diverge in this respect? >> >> Yes. Apart from maybe the code that tickles the remote APIC, our smpboot >> code is already well diverged from Linux. > > In that case I''d prefer not having a separate abstraction here at all - is > that fine by you?Yes! I also would prefer no abstraction, although I''m not that fussed either way since there are other abstractions over cpu_{online,present}_map. -- Keir _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel