Here is another set of patches for the per-cpu timer changes to xen and
linux. The patch is against today''s xen-unstable-src.tgz. This fixes
the occasional panic I was seeing on booting a slower system, and the
constant panic booting a fast system. It has been tested UP and SMP on
an IBM summit system, and an IntelliStation. The ''time went
backwards''
messages are no longer seen.
A previous version of this patch was tested by
aravindh.puthiyaparambil@unisys.com.
Signed-off-by: Don Fry <brazilnut@us.ibm.com>
--- xeno-unstable.bk/xen/common/schedule.c.orig 2005-06-29 11:03:27.000000000
-0700
+++ xeno-unstable.bk/xen/common/schedule.c 2005-06-29 14:30:56.000000000 -0700
@@ -53,6 +53,7 @@ string_param("sched", opt_sched);
/* Various timer handlers. */
static void s_timer_fn(void *unused);
static void t_timer_fn(void *unused);
+static void tsc_timer_fn(void *unused);
static void dom_timer_fn(void *data);
/* This is global for now so that private implementations can reach it */
@@ -76,6 +77,7 @@ static struct scheduler ops;
/* Per-CPU periodic timer sends an event to the currently-executing domain. */
static struct ac_timer t_timer[NR_CPUS];
+static struct ac_timer tsc_timer[NR_CPUS];
void free_domain_struct(struct domain *d)
{
@@ -531,6 +533,7 @@ int idle_cpu(int cpu)
* Timers: the scheduler utilises a number of timers
* - s_timer: per CPU timer for preemption and scheduling decisions
* - t_timer: per CPU periodic timer to send timer interrupt to current dom
+ * - tsc_timer: per CPU periodic timer to update time bases
* - dom_timer: per domain timer to specifiy timeout values
****************************************************************************/
@@ -560,6 +563,17 @@ static void t_timer_fn(void *unused)
set_ac_timer(&t_timer[cpu], NOW() + MILLISECS(10));
}
+/* Periodic tick timer: update time bases for per-cpu timing. */
+static void tsc_timer_fn(void *unused)
+{
+ unsigned int cpu = current->processor;
+
+ extern void percpu_ticks(void);
+ percpu_ticks();
+
+ set_ac_timer(&tsc_timer[cpu], NOW() + MILLISECS(250));
+}
+
/* Domain timer function, sends a virtual timer interrupt to domain */
static void dom_timer_fn(void *data)
{
@@ -581,6 +595,7 @@ void __init scheduler_init(void)
spin_lock_init(&schedule_data[i].schedule_lock);
init_ac_timer(&schedule_data[i].s_timer, s_timer_fn, NULL, i);
init_ac_timer(&t_timer[i], t_timer_fn, NULL, i);
+ init_ac_timer(&tsc_timer[i], tsc_timer_fn, NULL, i);
}
schedule_data[0].curr = idle_task[0];
@@ -610,6 +625,9 @@ void schedulers_start(void)
{
t_timer_fn(0);
smp_call_function((void *)t_timer_fn, NULL, 1, 1);
+
+ tsc_timer_fn(0);
+ smp_call_function((void *)tsc_timer_fn, NULL, 1, 1);
}
void dump_runq(unsigned char key)
--- xeno-unstable.bk/xen/arch/x86/smpboot.c.orig 2005-06-29 11:00:56.000000000
-0700
+++ xeno-unstable.bk/xen/arch/x86/smpboot.c 2005-06-29 14:31:07.000000000 -0700
@@ -433,6 +433,7 @@ void __init start_secondary(void *unused
extern void percpu_traps_init(void);
extern void cpu_init(void);
+ extern void setup_percpu_time(void);
set_current(idle_task[cpu]);
set_processor_id(cpu);
@@ -454,6 +455,7 @@ void __init start_secondary(void *unused
setup_secondary_APIC_clock();
enable_APIC_timer();
+ setup_percpu_time();
/*
* low-memory mappings have been cleared, flush them from
--- xeno-unstable.bk/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/time.c.orig
2005-06-29 11:02:56.000000000 -0700
+++ xeno-unstable.bk/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/time.c
2005-07-08 11:59:07.000000000 -0700
@@ -104,10 +104,10 @@ extern struct timer_opts timer_tsc;
struct timer_opts *cur_timer = &timer_tsc;
/* These are peridically updated in shared_info, and then copied here. */
-u32 shadow_tsc_stamp;
-u64 shadow_system_time;
-static u32 shadow_time_version;
-static struct timeval shadow_tv;
+DEFINE_PER_CPU(u64, shadow_tsc_stamp);
+DEFINE_PER_CPU(u64, shadow_system_time);
+DEFINE_PER_CPU(u32, shadow_time_version);
+DEFINE_PER_CPU(struct timeval, shadow_tv);
/*
* We use this to ensure that gettimeofday() is monotonically increasing. We
@@ -124,7 +124,6 @@ static long last_rtc_update, last_update
static long last_update_from_xen; /* UTC seconds when last read Xen clock. */
/* Keep track of last time we did processing/updating of jiffies and xtime. */
-static u64 processed_system_time; /* System time (ns) at last processing. */
static DEFINE_PER_CPU(u64, processed_system_time);
#define NS_PER_TICK (1000000000ULL/HZ)
@@ -171,23 +170,24 @@ __setup("independent_wallclock", __indep
static void __get_time_values_from_xen(void)
{
shared_info_t *s = HYPERVISOR_shared_info;
+ int cpu = smp_processor_id();
do {
- shadow_time_version = s->time_version2;
+ per_cpu(shadow_time_version, cpu) = s->vcpu_time[cpu].time_version2;
rmb();
- shadow_tv.tv_sec = s->wc_sec;
- shadow_tv.tv_usec = s->wc_usec;
- shadow_tsc_stamp = (u32)s->tsc_timestamp;
- shadow_system_time = s->system_time;
+ per_cpu(shadow_tv.tv_sec, cpu) = s->vcpu_time[cpu].wc_sec;
+ per_cpu(shadow_tv.tv_usec, cpu) = s->vcpu_time[cpu].wc_usec;
+ per_cpu(shadow_tsc_stamp, cpu) = s->vcpu_time[cpu].tsc_timestamp;
+ per_cpu(shadow_system_time, cpu) = s->vcpu_time[cpu].system_time;
rmb();
}
- while (shadow_time_version != s->time_version1);
+ while (per_cpu(shadow_time_version, cpu) !=
s->vcpu_time[cpu].time_version1);
cur_timer->mark_offset();
}
#define TIME_VALUES_UP_TO_DATE \
- ({ rmb(); (shadow_time_version == HYPERVISOR_shared_info->time_version2);
})
+ ({ rmb(); (per_cpu(shadow_time_version, cpu) ==
HYPERVISOR_shared_info->vcpu_time[cpu].time_version2); })
/*
* This version of gettimeofday has microsecond resolution
@@ -200,6 +200,7 @@ void do_gettimeofday(struct timeval *tv)
unsigned long max_ntp_tick;
unsigned long flags;
s64 nsec;
+ int cpu = smp_processor_id();
do {
unsigned long lost;
@@ -227,7 +228,7 @@ void do_gettimeofday(struct timeval *tv)
sec = xtime.tv_sec;
usec += (xtime.tv_nsec / NSEC_PER_USEC);
- nsec = shadow_system_time - processed_system_time;
+ nsec = per_cpu(shadow_system_time, cpu) - per_cpu(processed_system_time,
cpu);
__normalize_time(&sec, &nsec);
usec += (long)nsec / NSEC_PER_USEC;
@@ -273,6 +274,7 @@ int do_settimeofday(struct timespec *tv)
long wtm_nsec;
s64 nsec;
struct timespec xentime;
+ int cpu = smp_processor_id();
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
@@ -306,7 +308,7 @@ int do_settimeofday(struct timespec *tv)
*/
nsec -= (jiffies - wall_jiffies) * TICK_NSEC;
- nsec -= (shadow_system_time - processed_system_time);
+ nsec -= (per_cpu(shadow_system_time, cpu) - per_cpu(processed_system_time,
cpu));
__normalize_time(&sec, &nsec);
wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
@@ -331,7 +333,7 @@ int do_settimeofday(struct timespec *tv)
op.cmd = DOM0_SETTIME;
op.u.settime.secs = xentime.tv_sec;
op.u.settime.usecs = xentime.tv_nsec / NSEC_PER_USEC;
- op.u.settime.system_time = shadow_system_time;
+ op.u.settime.system_time = per_cpu(shadow_system_time, cpu);
write_sequnlock_irq(&xtime_lock);
HYPERVISOR_dom0_op(&op);
} else
@@ -384,6 +386,8 @@ unsigned long profile_pc(struct pt_regs
EXPORT_SYMBOL(profile_pc);
#endif
+extern unsigned long long get_full_tsc_offset(void);
+
/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "do_timer()" routine every clocktick
@@ -392,27 +396,25 @@ static inline void do_timer_interrupt(in
struct pt_regs *regs)
{
time_t wtm_sec, sec;
- s64 delta, delta_cpu, nsec;
+ s64 delta_cpu, nsec;
long sec_diff, wtm_nsec;
int cpu = smp_processor_id();
do {
__get_time_values_from_xen();
- delta = delta_cpu = (s64)shadow_system_time +
- ((s64)cur_timer->get_offset() * (s64)NSEC_PER_USEC);
- delta -= processed_system_time;
- delta_cpu -= per_cpu(processed_system_time, cpu);
+ delta_cpu = per_cpu(shadow_system_time, cpu)
+ + get_full_tsc_offset()
+ - per_cpu(processed_system_time, cpu);
}
while (!TIME_VALUES_UP_TO_DATE);
- if (unlikely(delta < 0) || unlikely(delta_cpu < 0)) {
+ if (unlikely(delta_cpu < 0)) {
printk("Timer ISR/%d: Time went backwards: "
- "delta=%lld cpu_delta=%lld shadow=%lld "
- "off=%lld processed=%lld cpu_processed=%lld\n",
- cpu, delta, delta_cpu, shadow_system_time,
+ "cpu_delta=%lld "
+ "off=%lld cpu_processed=%lld\n",
+ cpu, delta_cpu,
((s64)cur_timer->get_offset() * (s64)NSEC_PER_USEC),
- processed_system_time,
per_cpu(processed_system_time, cpu));
for (cpu = 0; cpu < num_online_cpus(); cpu++)
printk(" %d: %lld\n", cpu,
@@ -420,19 +422,15 @@ static inline void do_timer_interrupt(in
return;
}
- /* System-wide jiffy work. */
- while (delta >= NS_PER_TICK) {
- delta -= NS_PER_TICK;
- processed_system_time += NS_PER_TICK;
- do_timer(regs);
- }
-
/* Local CPU jiffy work. */
while (delta_cpu >= NS_PER_TICK) {
delta_cpu -= NS_PER_TICK;
per_cpu(processed_system_time, cpu) += NS_PER_TICK;
update_process_times(user_mode(regs));
profile_tick(CPU_PROFILING, regs);
+ if (cpu == 0) {
+ do_timer(regs);
+ }
}
if (cpu != 0)
@@ -447,19 +445,19 @@ static inline void do_timer_interrupt(in
((time_status & STA_UNSYNC) != 0) &&
(xtime.tv_sec > (last_update_from_xen + 60))) {
/* Adjust shadow for jiffies that haven''t updated xtime yet. */
- shadow_tv.tv_usec -=
+ per_cpu(shadow_tv.tv_usec, cpu) -=
(jiffies - wall_jiffies) * (USEC_PER_SEC / HZ);
- HANDLE_USEC_UNDERFLOW(shadow_tv);
+ HANDLE_USEC_UNDERFLOW(per_cpu(shadow_tv, cpu));
/*
* Reset our running time counts if they are invalidated by
* a warp backwards of more than 500ms.
*/
- sec_diff = xtime.tv_sec - shadow_tv.tv_sec;
+ sec_diff = xtime.tv_sec - per_cpu(shadow_tv.tv_sec, cpu);
if (unlikely(abs(sec_diff) > 1) ||
unlikely(((sec_diff * USEC_PER_SEC) +
(xtime.tv_nsec / NSEC_PER_USEC) -
- shadow_tv.tv_usec) > 500000)) {
+ per_cpu(shadow_tv.tv_usec, cpu)) > 500000)) {
#ifdef CONFIG_XEN_PRIVILEGED_GUEST
last_rtc_update = last_update_to_xen = 0;
#endif
@@ -467,8 +465,8 @@ static inline void do_timer_interrupt(in
}
/* Update our unsynchronised xtime appropriately. */
- sec = shadow_tv.tv_sec;
- nsec = shadow_tv.tv_usec * NSEC_PER_USEC;
+ sec = per_cpu(shadow_tv.tv_sec, cpu);
+ nsec = per_cpu(shadow_tv.tv_usec, cpu) * NSEC_PER_USEC;
__normalize_time(&sec, &nsec);
wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
@@ -498,7 +496,7 @@ static inline void do_timer_interrupt(in
op.cmd = DOM0_SETTIME;
op.u.settime.secs = tv.tv_sec;
op.u.settime.usecs = tv.tv_usec;
- op.u.settime.system_time = shadow_system_time;
+ op.u.settime.system_time = per_cpu(shadow_system_time, cpu);
HYPERVISOR_dom0_op(&op);
last_update_to_xen = xtime.tv_sec;
@@ -654,6 +652,7 @@ static struct irqaction irq_timer = {
void __init time_init(void)
{
+ int cpu = smp_processor_id();
#ifdef CONFIG_HPET_TIMER
if (is_hpet_capable()) {
/*
@@ -665,12 +664,11 @@ void __init time_init(void)
}
#endif
__get_time_values_from_xen();
- xtime.tv_sec = shadow_tv.tv_sec;
- xtime.tv_nsec = shadow_tv.tv_usec * NSEC_PER_USEC;
+ xtime.tv_sec = per_cpu(shadow_tv.tv_sec, cpu);
+ xtime.tv_nsec = per_cpu(shadow_tv.tv_usec, cpu) * NSEC_PER_USEC;
set_normalized_timespec(&wall_to_monotonic,
-xtime.tv_sec, -xtime.tv_nsec);
- processed_system_time = shadow_system_time;
- per_cpu(processed_system_time, 0) = processed_system_time;
+ per_cpu(processed_system_time, 0) = per_cpu(shadow_system_time, 0);
if (timer_tsc_init.init(NULL) != 0)
BUG();
@@ -703,7 +701,7 @@ static inline u64 jiffies_to_st(unsigned
* but that''s ok: we''ll just end up with a shorter timeout.
*/
if (delta < 1)
delta = 1;
- st = processed_system_time + (delta * NS_PER_TICK);
+ st = per_cpu(processed_system_time, smp_processor_id()) + (delta *
NS_PER_TICK);
} while (read_seqretry(&xtime_lock, seq));
return st;
@@ -752,8 +750,7 @@ void time_resume(void)
__get_time_values_from_xen();
/* Reset our own concept of passage of system time. */
- processed_system_time = shadow_system_time;
- per_cpu(processed_system_time, 0) = processed_system_time;
+ per_cpu(processed_system_time, 0) = per_cpu(shadow_system_time, 0);
/* Accept a warp in UTC (wall-clock) time. */
last_seen_tv.tv_sec = 0;
@@ -770,7 +767,7 @@ void local_setup_timer(void)
do {
seq = read_seqbegin(&xtime_lock);
- per_cpu(processed_system_time, cpu) = shadow_system_time;
+ per_cpu(processed_system_time, cpu) = per_cpu(shadow_system_time, cpu);
} while (read_seqretry(&xtime_lock, seq));
per_cpu(timer_irq, cpu) = bind_virq_to_irq(VIRQ_TIMER);
---
xeno-unstable.bk/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/timers/timer_tsc.c.orig
2005-06-29 11:03:12.000000000 -0700
+++
xeno-unstable.bk/linux-2.6.11-xen-sparse/arch/xen/i386/kernel/timers/timer_tsc.c
2005-07-08 11:58:00.000000000 -0700
@@ -10,6 +10,7 @@
#include <linux/cpufreq.h>
#include <linux/string.h>
#include <linux/jiffies.h>
+#include <linux/percpu.h>
#include <asm/timer.h>
#include <asm/io.h>
@@ -35,8 +36,8 @@ extern spinlock_t i8253_lock;
static int use_tsc;
-static unsigned long long monotonic_base;
-static u32 monotonic_offset;
+static DEFINE_PER_CPU(unsigned long long, monotonic_base);
+static DEFINE_PER_CPU(u64, monotonic_offset);
static seqlock_t monotonic_lock = SEQLOCK_UNLOCKED;
/* convert from cycles(64bits) => nanoseconds (64bits)
@@ -75,18 +76,32 @@ static inline unsigned long long cycles_
static unsigned long fast_gettimeoffset_quotient;
extern u32 shadow_tsc_stamp;
-extern u64 shadow_system_time;
+extern DEFINE_PER_CPU(u64, shadow_tsc_stamp);
+extern DEFINE_PER_CPU(u64, shadow_system_time);
+
+unsigned long long get_full_tsc_offset(void)
+{
+ unsigned long long tsc;
+
+ /* Read the Time Stamp Counter */
+ rdtscll(tsc);
+
+ tsc -= per_cpu(shadow_tsc_stamp, smp_processor_id());
+
+ return cycles_2_ns(tsc);
+}
static unsigned long get_offset_tsc(void)
{
register unsigned long eax, edx;
+unsigned long long tsc;
/* Read the Time Stamp Counter */
- rdtsc(eax,edx);
+ rdtscll(tsc);
/* .. relative to previous jiffy (32 bits is enough) */
- eax -= shadow_tsc_stamp;
+ eax = tsc - per_cpu(shadow_tsc_stamp, smp_processor_id());
/*
* Time offset = (tsc_low delta) * fast_gettimeoffset_quotient
@@ -110,12 +125,13 @@ static unsigned long long monotonic_cloc
{
unsigned long long last_offset, this_offset, base;
unsigned seq;
+ int cpu = smp_processor_id();
/* atomically read monotonic base & last_offset */
do {
seq = read_seqbegin(&monotonic_lock);
- last_offset = monotonic_offset;
- base = monotonic_base;
+ last_offset = per_cpu(monotonic_offset, cpu);
+ base = per_cpu(monotonic_base, cpu);
} while (read_seqretry(&monotonic_lock, seq));
/* Read the Time Stamp Counter */
@@ -152,11 +168,12 @@ unsigned long long sched_clock(void)
static void mark_offset_tsc(void)
{
+ int cpu = smp_processor_id();
/* update the monotonic base value */
write_seqlock(&monotonic_lock);
- monotonic_base = shadow_system_time;
- monotonic_offset = shadow_tsc_stamp;
+ per_cpu(monotonic_base, cpu) = per_cpu(shadow_system_time, cpu);
+ per_cpu(monotonic_offset, cpu) = per_cpu(shadow_tsc_stamp, cpu);
write_sequnlock(&monotonic_lock);
}
--- xeno-unstable.bk/xen/arch/x86/time.c.orig 2005-06-29 11:03:27.000000000
-0700
+++ xeno-unstable.bk/xen/arch/x86/time.c 2005-06-29 14:33:30.000000000 -0700
@@ -48,6 +48,29 @@ static u64 full_tsc_irq;
static s_time_t stime_irq; /* System time at last ''time
update'' */
static unsigned long wc_sec, wc_usec; /* UTC time at last ''time
update''. */
static rwlock_t time_lock = RW_LOCK_UNLOCKED;
+static time_info_t percpu_time_info[NR_CPUS];
+
+void percpu_ticks(void)
+{
+ int cpu = smp_processor_id();
+ time_info_t *t = &percpu_time_info[cpu];
+ u64 tsc, delta;
+ u64 quarter = t->cpu_freq >> 2;
+
+ rdtscll(tsc);
+ delta = tsc - t->tsc_timestamp;
+ while (delta >= quarter) {
+ t->wc_usec += 1000000UL / 4;
+ t->system_time += 1000000000ULL / 4;
+ t->tsc_timestamp += quarter;
+ delta -= quarter;
+ }
+
+ while (t->wc_usec > 1000000UL) {
+ t->wc_sec += 1;
+ t->wc_usec -= 10000000UL;
+ }
+}
void timer_interrupt(int irq, void *dev_id, struct cpu_user_regs *regs)
{
@@ -278,20 +301,29 @@ static inline void __update_dom_time(str
{
struct domain *d = v->domain;
shared_info_t *si = d->shared_info;
+ time_info_t *dom = &si->vcpu_time[v->processor];
+ time_info_t *xen = &percpu_time_info[smp_processor_id()];
spin_lock(&d->time_lock);
si->time_version1++;
+ dom->time_version1++;
wmb();
si->cpu_freq = cpu_freq;
+ dom->cpu_freq = xen->cpu_freq;
si->tsc_timestamp = full_tsc_irq;
+ dom->tsc_timestamp = xen->tsc_timestamp;
si->system_time = stime_irq;
+ dom->system_time = xen->system_time;
si->wc_sec = wc_sec;
+ dom->wc_sec = xen->wc_sec;
si->wc_usec = wc_usec;
+ dom->wc_usec = xen->wc_usec;
wmb();
si->time_version2++;
+ dom->time_version2++;
spin_unlock(&d->time_lock);
}
@@ -299,8 +331,11 @@ static inline void __update_dom_time(str
void update_dom_time(struct vcpu *v)
{
unsigned long flags;
+ int cpu = smp_processor_id();
- if ( v->domain->shared_info->tsc_timestamp != full_tsc_irq )
+ if ( v->domain->shared_info->tsc_timestamp != full_tsc_irq
+ ||
v->domain->shared_info->vcpu_time[v->processor].tsc_timestamp !+
percpu_time_info[cpu].tsc_timestamp)
{
read_lock_irqsave(&time_lock, flags);
__update_dom_time(v);
@@ -313,6 +348,7 @@ void do_settime(unsigned long secs, unsi
{
s64 delta;
long _usecs = (long)usecs;
+ int i;
write_lock_irq(&time_lock);
@@ -327,6 +363,10 @@ void do_settime(unsigned long secs, unsi
wc_sec = secs;
wc_usec = _usecs;
+ for (i=0; i<NR_CPUS; i++) {
+ percpu_time_info[i].wc_sec = wc_sec;
+ percpu_time_info[i].wc_usec = wc_usec;
+ }
/* Others will pick up the change at the next tick. */
__update_dom_time(current);
@@ -336,16 +376,39 @@ void do_settime(unsigned long secs, unsi
}
+spinlock_t tsc_lock = SPIN_LOCK_UNLOCKED;
+
+/*
+ * Time setup for this processor.
+ */
+void __init setup_percpu_time(void)
+{
+ unsigned long flags;
+ unsigned long ticks_per_frac;
+ int cpu = smp_processor_id();
+
+ /* only have 1 cpu calibrate at a time */
+ spin_lock_irqsave(&tsc_lock, flags);
+ ticks_per_frac = calibrate_tsc();
+ spin_unlock_irqrestore(&tsc_lock, flags);
+
+ if (!ticks_per_frac)
+ panic("Error calibrating TSC\n");
+ percpu_time_info[cpu].cpu_freq = (u64)ticks_per_frac * (u64)CALIBRATE_FRAC;
+ rdtscll(percpu_time_info[cpu].tsc_timestamp);
+ percpu_time_info[cpu].system_time = stime_irq;
+}
+
/* Late init function (after all CPUs are booted). */
int __init init_xen_time()
{
u64 scale;
unsigned int cpu_ghz;
+ int i;
cpu_ghz = (unsigned int)(cpu_freq / 1000000000ULL);
for ( rdtsc_bitshift = 0; cpu_ghz != 0; rdtsc_bitshift++, cpu_ghz >>=
1 )
continue;
-
scale = 1000000000LL << (32 + rdtsc_bitshift);
scale /= cpu_freq;
st_scale_f = scale & 0xffffffff;
@@ -361,6 +424,13 @@ int __init init_xen_time()
/* Wallclock time starts as the initial RTC time. */
wc_sec = get_cmos_time();
+ for (i=0; i<NR_CPUS; i++) {
+ percpu_time_info[i].wc_sec = wc_sec;
+ percpu_time_info[i].wc_usec = 0;
+ percpu_time_info[i].system_time = stime_irq;
+ percpu_time_info[i].cpu_freq = cpu_freq; // default speed
+ }
+
local_irq_enable();
printk("Time init:\n");
--- xen-unstable/xen/include/public/xen.h.orig 2005-07-07 20:38:24.000000000
-0700
+++ xen-unstable/xen/include/public/xen.h 2005-07-08 15:07:18.000000000 -0700
@@ -330,6 +330,21 @@ typedef struct vcpu_info {
} vcpu_info_t;
/*
+ * Xen/kernel shared data
+ * per cpu timing information.
+ */
+typedef struct time_info_st
+{
+ u32 time_version1;
+ u32 time_version2;
+ tsc_timestamp_t tsc_timestamp; /* TSC at last update */
+ u64 system_time; /* time, in nanoseconds, since boot */
+ u64 cpu_freq; /* CPU frequency (Hz) */
+ u32 wc_sec; /* Secs 00:00:00 UTC, Jan 1, 1970. */
+ u32 wc_usec; /* Usecs 00:00:00 UTC, Jan 1, 1970. */
+} time_info_t;
+
+/*
* Xen/kernel shared data -- pointer provided in start_info.
* NB. We expect that this struct is smaller than a page.
*/
@@ -403,6 +418,7 @@ typedef struct shared_info {
arch_shared_info_t arch;
+ time_info_t vcpu_time[MAX_VIRT_CPUS];
} shared_info_t;
/*
--
Don Fry
brazilnut@us.ibm.com
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel