Michael.Fetterman@cl.cam.ac.uk
2008-Feb-09 21:22 UTC
[Xen-devel] [PATCH 0 of 4] Various xentrace improvements
In trying to move from Xen 3.1 to Xen 3.2, I found that the new xentrace infrastructure in 3.2 had a shortcoming or two vs that in 3.1... These patches address that, and also add a little new functionality. Last I checked, these patches applied cleanly to both 3.2-testing and unstable. Michael _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Michael.Fetterman@cl.cam.ac.uk
2008-Feb-09 21:22 UTC
[Xen-devel] [PATCH 1 of 4] Fix bug in logic for bytes_to_wrap in trace buffer
# HG changeset patch # User Michael.Fetterman@cl.cam.ac.uk # Date 1202591468 0 # Node ID 91b4a9b2629c770bafca0d58bb106ce72a3a968f # Parent d05f5a5389afdbc57b35de94b26df49431369478 Fix bug in logic for bytes_to_wrap in trace buffer. Admittedly, the bug could only be manifest with much larger trace records than are currently allowed (or equivalently, much smaller trace buffers), but the old code was harder to read, and thus hid the logic bug well, too. Signed-off-by: Michael A Fetterman <Michael.Fetterman@cl.cam.ac.uk> diff -r d05f5a5389af -r 91b4a9b2629c xen/common/trace.c --- a/xen/common/trace.c Fri Feb 08 16:34:23 2008 +0000 +++ b/xen/common/trace.c Sat Feb 09 21:11:08 2008 +0000 @@ -425,25 +425,18 @@ void __trace_var(u32 event, int cycles, total_size += bytes_to_wrap; bytes_to_wrap = data_size; } - else - { - bytes_to_wrap -= LOST_REC_SIZE; - if ( bytes_to_wrap == 0 ) - bytes_to_wrap = data_size; - } total_size += LOST_REC_SIZE; + bytes_to_wrap -= LOST_REC_SIZE; + + /* LOST_REC might line up perfectly with the buffer wrap */ + if ( bytes_to_wrap == 0 ) + bytes_to_wrap = data_size; } if ( rec_size > bytes_to_wrap ) { total_size += bytes_to_wrap; - bytes_to_wrap = data_size; } - else - { - bytes_to_wrap -= rec_size; - } - total_size += rec_size; /* Do we have enough space for everything? */ @@ -466,14 +459,12 @@ void __trace_var(u32 event, int cycles, insert_wrap_record(buf, LOST_REC_SIZE); bytes_to_wrap = data_size; } - else - { - bytes_to_wrap -= LOST_REC_SIZE; - /* LOST_REC might line up perfectly with the buffer wrap */ - if ( bytes_to_wrap == 0 ) - bytes_to_wrap = data_size; - } insert_lost_records(buf); + bytes_to_wrap -= LOST_REC_SIZE; + + /* LOST_REC might line up perfectly with the buffer wrap */ + if ( bytes_to_wrap == 0 ) + bytes_to_wrap = data_size; } if ( rec_size > bytes_to_wrap ) _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Michael.Fetterman@cl.cam.ac.uk
2008-Feb-09 21:22 UTC
[Xen-devel] [PATCH 2 of 4] Allow xentrace to handle >4G of trace data
# HG changeset patch # User Michael.Fetterman@cl.cam.ac.uk # Date 1202591469 0 # Node ID 9ff80e7b54528e06c8e0dc0ad8d8da09afca7f16 # Parent 91b4a9b2629c770bafca0d58bb106ce72a3a968f Allow xentrace to handle >4G of trace data. It was previously assert''ing when it hit 4G. Also, because the trace buffer is not a power of 2 in size, using modulo arithmetic to address the buffer does not work when the index wraps around 2^32. This patch fixes both issues, and as a side effect, removes all integer division from the hypervisor side of the trace mechanism. Signed-off-by: Michael A Fetterman <Michael.Fetterman@cl.cam.ac.uk> diff -r 91b4a9b2629c -r 9ff80e7b5452 tools/xentrace/xentrace.c --- a/tools/xentrace/xentrace.c Sat Feb 09 21:11:08 2008 +0000 +++ b/tools/xentrace/xentrace.c Sat Feb 09 21:11:09 2008 +0000 @@ -362,9 +362,18 @@ int monitor_tbufs(int outfd) if ( cons == prod ) continue; - assert(prod > cons); - - window_size = prod - cons; + assert(cons < 2*data_size); + assert(prod < 2*data_size); + + // NB: if (prod<cons), then (prod-cons)%data_size will not yield + // the correct answer because data_size is not a power of 2. + if ( prod < cons ) + window_size = (prod + 2*data_size) - cons; + else + window_size = prod - cons; + assert(window_size > 0); + assert(window_size <= data_size); + start_offset = cons % data_size; end_offset = prod % data_size; diff -r 91b4a9b2629c -r 9ff80e7b5452 xen/common/trace.c --- a/xen/common/trace.c Sat Feb 09 21:11:08 2008 +0000 +++ b/xen/common/trace.c Sat Feb 09 21:11:09 2008 +0000 @@ -239,14 +239,46 @@ static inline int calc_rec_size(int cycl return rec_size; } +static inline int calc_unconsumed_bytes(struct t_buf *buf) +{ + int x = buf->prod - buf->cons; + if ( x < 0 ) + x += 2*data_size; + + ASSERT(x >= 0); + ASSERT(x <= data_size); + + return x; +} + static inline int calc_bytes_to_wrap(struct t_buf *buf) { - return data_size - (buf->prod % data_size); -} - -static inline unsigned calc_bytes_avail(struct t_buf *buf) -{ - return data_size - (buf->prod - buf->cons); + int x = data_size - buf->prod; + if ( x <= 0 ) + x += data_size; + + ASSERT(x > 0); + ASSERT(x <= data_size); + + return x; +} + +static inline int calc_bytes_avail(struct t_buf *buf) +{ + return data_size - calc_unconsumed_bytes(buf); +} + +static inline struct t_rec * +next_record(struct t_buf *buf) +{ + int x = buf->prod; + if ( x >= data_size ) + x -= data_size; + + ASSERT(x >= 0); + ASSERT(x < data_size); + + return (struct t_rec *)&this_cpu(t_data)[x]; } static inline int __insert_record(struct t_buf *buf, @@ -260,24 +292,25 @@ static inline int __insert_record(struct unsigned char *dst; unsigned long extra_word = extra/sizeof(u32); int local_rec_size = calc_rec_size(cycles, extra); + uint32_t next; BUG_ON(local_rec_size != rec_size); + BUG_ON(extra & 3); /* Double-check once more that we have enough space. * Don''t bugcheck here, in case the userland tool is doing * something stupid. */ if ( calc_bytes_avail(buf) < rec_size ) { - printk("%s: %u bytes left (%u - (%u - %u)) recsize %u.\n", + printk("%s: %u bytes left (%u - ((%u - %u) %% %u) recsize %u.\n", __func__, - data_size - (buf->prod - buf->cons), - data_size, - buf->prod, buf->cons, rec_size); + calc_bytes_avail(buf), + data_size, buf->prod, buf->cons, data_size, rec_size); return 0; } rmb(); - rec = (struct t_rec *)&this_cpu(t_data)[buf->prod % data_size]; + rec = next_record(buf); rec->event = event; rec->extra_u32 = extra_word; dst = (unsigned char *)rec->u.nocycles.extra_u32; @@ -293,7 +326,13 @@ static inline int __insert_record(struct memcpy(dst, extra_data, extra); wmb(); - buf->prod += rec_size; + + next = buf->prod + rec_size; + if ( next >= 2*data_size ) + next -= 2*data_size; + ASSERT(next >= 0); + ASSERT(next < 2*data_size); + buf->prod = next; return rec_size; } @@ -395,7 +434,7 @@ void __trace_var(u32 event, int cycles, local_irq_save(flags); - started_below_highwater = ((buf->prod - buf->cons) < t_buf_highwater); + started_below_highwater = (calc_unconsumed_bytes(buf) < t_buf_highwater); /* Calculate the record size */ rec_size = calc_rec_size(cycles, extra); @@ -413,10 +452,6 @@ void __trace_var(u32 event, int cycles, total_size = 0; /* First, check to see if we need to include a lost_record. - * - * calc_bytes_to_wrap() involves integer division, which we''d like to - * avoid if we can. So do the math, check it in debug versions, and - * do a final check always if we happen to write a record. */ if ( this_cpu(lost_records) ) { @@ -477,7 +512,7 @@ void __trace_var(u32 event, int cycles, /* Notify trace buffer consumer that we''ve crossed the high water mark. */ if ( started_below_highwater && - ((buf->prod - buf->cons) >= t_buf_highwater) ) + (calc_unconsumed_bytes(buf) >= t_buf_highwater) ) raise_softirq(TRACE_SOFTIRQ); } diff -r 91b4a9b2629c -r 9ff80e7b5452 xen/include/public/trace.h --- a/xen/include/public/trace.h Sat Feb 09 21:11:08 2008 +0000 +++ b/xen/include/public/trace.h Sat Feb 09 21:11:09 2008 +0000 @@ -221,6 +221,14 @@ struct t_rec { * field, indexes into an array of struct t_rec''s. */ struct t_buf { + /* Assume the data buffer size is X. X is generally not a power of 2. + * CONS and PROD are incremented modulo (2*X): + * 0 <= cons < 2*X + * 0 <= prod < 2*X + * This is done because addition modulo X breaks at 2^32 when X is not a + * power of 2: + * (((2^32 - 1) % X) + 1) % X != (2^32) % X + */ uint32_t cons; /* Offset of next item to be consumed by control tools. */ uint32_t prod; /* Offset of next item to be produced by Xen. */ /* Records follow immediately after the meta-data header. */ _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Michael.Fetterman@cl.cam.ac.uk
2008-Feb-09 21:22 UTC
[Xen-devel] [PATCH 3 of 4] Improve xentrace to use VIRQ_TBUF interrupts as well as a user-specified
# HG changeset patch # User Michael.Fetterman@cl.cam.ac.uk # Date 1202591471 0 # Node ID f904468403b4d47dbebc54c76473c28b24066681 # Parent 9ff80e7b54528e06c8e0dc0ad8d8da09afca7f16 Improve xentrace to use VIRQ_TBUF interrupts as well as a user-specified polling interval in order to determine when to empty the trace buffers. Removed the old and unused/unimplemented new_data_threshold logic. Signed-off-by: Michael A Fetterman <Michael.Fetterman@cl.cam.ac.uk> diff -r 9ff80e7b5452 -r f904468403b4 tools/xentrace/xentrace.c --- a/tools/xentrace/xentrace.c Sat Feb 09 21:11:09 2008 +0000 +++ b/tools/xentrace/xentrace.c Sat Feb 09 21:11:11 2008 +0000 @@ -23,6 +23,7 @@ #include <string.h> #include <getopt.h> #include <assert.h> +#include <sys/poll.h> #include <xen/xen.h> #include <xen/trace.h> @@ -40,9 +41,6 @@ do { /***** Compile time configuration of defaults ********************************/ -/* when we''ve got more records than this waiting, we log it to the output */ -#define NEW_DATA_THRESH 1 - /* sleep for this long (milliseconds) between checking the trace buffers */ #define POLL_SLEEP_MILLIS 100 @@ -51,8 +49,7 @@ do { typedef struct settings_st { char *outfile; - struct timespec poll_sleep; - unsigned long new_data_thresh; + unsigned long poll_sleep; /* milliseconds to sleep between polls */ uint32_t evt_mask; uint32_t cpu_mask; unsigned long tbuf_size; @@ -63,23 +60,13 @@ settings_t opts; int interrupted = 0; /* gets set if we get a SIGHUP */ +static int xc_handle = -1; +static int event_fd = -1; +static int virq_port = -1; + void close_handler(int signal) { interrupted = 1; -} - -/** - * millis_to_timespec - convert a time in milliseconds to a struct timespec - * @millis: time interval in milliseconds - */ -struct timespec millis_to_timespec(unsigned long millis) -{ - struct timespec spec; - - spec.tv_sec = millis / 1000; - spec.tv_nsec = (millis % 1000) * 1000; - - return spec; } /** @@ -143,13 +130,7 @@ void write_buffer(unsigned int cpu, unsi static void get_tbufs(unsigned long *mfn, unsigned long *size) { - int xc_handle = xc_interface_open(); int ret; - - if ( xc_handle < 0 ) - { - exit(EXIT_FAILURE); - } if(!opts.tbuf_size) opts.tbuf_size = DEFAULT_TBUF_SIZE; @@ -161,8 +142,6 @@ static void get_tbufs(unsigned long *mfn perror("Couldn''t enable trace buffers"); exit(1); } - - xc_interface_close(xc_handle); } /** @@ -176,22 +155,12 @@ struct t_buf *map_tbufs(unsigned long tb struct t_buf *map_tbufs(unsigned long tbufs_mfn, unsigned int num, unsigned long size) { - int xc_handle; struct t_buf *tbufs_mapped; - - xc_handle = xc_interface_open(); - - if ( xc_handle < 0 ) - { - exit(EXIT_FAILURE); - } tbufs_mapped = xc_map_foreign_range(xc_handle, DOMID_XEN, size * num, PROT_READ | PROT_WRITE, tbufs_mfn); - xc_interface_close(xc_handle); - if ( tbufs_mapped == 0 ) { PERROR("Failed to mmap trace buffers"); @@ -210,7 +179,6 @@ void set_mask(uint32_t mask, int type) void set_mask(uint32_t mask, int type) { int ret = 0; - int xc_handle = xc_interface_open(); /* for accessing control interface */ if (type == 1) { ret = xc_tbuf_set_cpu_mask(xc_handle, mask); @@ -220,8 +188,6 @@ void set_mask(uint32_t mask, int type) fprintf(stderr, "change evtmask to 0x%x\n", mask); } - xc_interface_close(xc_handle); - if ( ret != 0 ) { PERROR("Failure to get trace buffer pointer from Xen and set the new mask"); @@ -295,7 +261,6 @@ unsigned int get_num_cpus(void) unsigned int get_num_cpus(void) { xc_physinfo_t physinfo = { 0 }; - int xc_handle = xc_interface_open(); int ret; ret = xc_physinfo(xc_handle, &physinfo); @@ -306,9 +271,68 @@ unsigned int get_num_cpus(void) exit(EXIT_FAILURE); } - xc_interface_close(xc_handle); - return physinfo.nr_cpus; +} + +/** + * event_init - setup to receive the VIRQ_TBUF event + */ +void event_init(void) +{ + int rc; + + rc = xc_evtchn_open(); + if (rc < 0) { + perror(xc_get_last_error()->message); + exit(EXIT_FAILURE); + } + event_fd = rc; + + rc = xc_evtchn_bind_virq(event_fd, VIRQ_TBUF); + if (rc == -1) { + PERROR("failed to bind to VIRQ port"); + exit(EXIT_FAILURE); + } + virq_port = rc; +} + +/** + * wait_for_event_or_timeout - sleep for the specified number of milliseconds, + * or until an VIRQ_TBUF event occurs + */ +void wait_for_event_or_timeout(unsigned long milliseconds) +{ + int rc; + struct pollfd fd = { .fd = event_fd, + .events = POLLIN | POLLERR }; + int port; + + rc = poll(&fd, 1, milliseconds); + if (rc == -1) { + if (errno == EINTR) + return; + PERROR("poll exitted with an error"); + exit(EXIT_FAILURE); + } + + if (rc == 1) { + port = xc_evtchn_pending(event_fd); + if (port == -1) { + PERROR("failed to read port from evtchn"); + exit(EXIT_FAILURE); + } + if (port != virq_port) { + fprintf(stderr, + "unexpected port returned from evtchn (got %d vs expected %d)\n", + port, virq_port); + exit(EXIT_FAILURE); + } + rc = xc_evtchn_unmask(event_fd, port); + if (rc == -1) { + PERROR("failed to write port to evtchn"); + exit(EXIT_FAILURE); + } + } } @@ -329,6 +353,9 @@ int monitor_tbufs(int outfd) unsigned long size; /* size of a single trace buffer */ unsigned long data_size; + + /* prepare to listen for VIRQ_TBUF */ + event_init(); /* get number of logical CPUs (and therefore number of trace buffers) */ num = get_num_cpus(); @@ -405,7 +432,7 @@ int monitor_tbufs(int outfd) meta[i]->cons = prod; } - nanosleep(&opts.poll_sleep, NULL); + wait_for_event_or_timeout(opts.poll_sleep); } /* cleanup */ @@ -425,7 +452,7 @@ int monitor_tbufs(int outfd) #define xstr(x) str(x) #define str(x) #x -const char *program_version = "xentrace v1.1"; +const char *program_version = "xentrace v1.2"; const char *program_bug_address = "<mark.a.williamson@intel.com>"; void usage(void) @@ -444,9 +471,6 @@ void usage(void) " N.B. that the trace buffer cannot be resized.\n" \ " if it has already been set this boot cycle,\n" \ " this argument will be ignored.\n" \ -" -t, --log-thresh=l Set number, l, of new records required to\n" \ -" trigger a write to output (default " \ - xstr(NEW_DATA_THRESH) ").\n" \ " -?, --help Show this message\n" \ " -V, --version Print program version\n" \ "\n" \ @@ -525,12 +549,8 @@ void parse_args(int argc, char **argv) { switch ( option ) { - case ''t'': /* set new records threshold for logging */ - opts.new_data_thresh = argtol(optarg, 0); - break; - case ''s'': /* set sleep time (given in milliseconds) */ - opts.poll_sleep = millis_to_timespec(argtol(optarg, 0)); + opts.poll_sleep = argtol(optarg, 0); break; case ''c'': /* set new cpu mask for filtering*/ @@ -574,13 +594,19 @@ int main(int argc, char **argv) struct sigaction act; opts.outfile = 0; - opts.poll_sleep = millis_to_timespec(POLL_SLEEP_MILLIS); - opts.new_data_thresh = NEW_DATA_THRESH; + opts.poll_sleep = POLL_SLEEP_MILLIS; opts.evt_mask = 0; opts.cpu_mask = 0; parse_args(argc, argv); - + + xc_handle = xc_interface_open(); + if ( xc_handle < 0 ) + { + perror(xc_get_last_error()->message); + exit(EXIT_FAILURE); + } + if ( opts.evt_mask != 0 ) set_mask(opts.evt_mask, 0); _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Michael.Fetterman@cl.cam.ac.uk
2008-Feb-09 21:22 UTC
[Xen-devel] [PATCH 4 of 4] Remove redundant tb_done_init checks, and add missing ones
# HG changeset patch # User Michael.Fetterman@cl.cam.ac.uk # Date 1202591472 0 # Node ID da66102de0adebf6a28528b9a62d54d64cbf7cd9 # Parent f904468403b4d47dbebc54c76473c28b24066681 Remove redundant tb_done_init checks, and add missing ones. Hand inspection of gcc -02 output confirms significantly shorter codepaths for inactive (i.e. normal case) tracing. Signed-off-by: Michael A Fetterman <Michael.Fetterman@cl.cam.ac.uk> diff -r f904468403b4 -r da66102de0ad xen/arch/x86/trace.c --- a/xen/arch/x86/trace.c Sat Feb 09 21:11:11 2008 +0000 +++ b/xen/arch/x86/trace.c Sat Feb 09 21:11:12 2008 +0000 @@ -15,9 +15,6 @@ asmlinkage void trace_hypercall(void) { struct cpu_user_regs *regs = guest_cpu_user_regs(); - if ( !tb_init_done ) - return; - #ifdef __x86_64__ if ( is_pv_32on64_vcpu(current) ) { @@ -52,9 +49,6 @@ void __trace_pv_trap(int trapnr, unsigne void __trace_pv_trap(int trapnr, unsigned long eip, int use_error_code, unsigned error_code) { - if ( !tb_init_done ) - return; - #ifdef __x86_64__ if ( is_pv_32on64_vcpu(current) ) { @@ -99,9 +93,6 @@ void __trace_pv_page_fault(unsigned long { unsigned long eip = guest_cpu_user_regs()->eip; - if ( !tb_init_done ) - return; - #ifdef __x86_64__ if ( is_pv_32on64_vcpu(current) ) { @@ -135,9 +126,6 @@ void __trace_pv_page_fault(unsigned long void __trace_trap_one_addr(unsigned event, unsigned long va) { - if ( !tb_init_done ) - return; - #ifdef __x86_64__ if ( is_pv_32on64_vcpu(current) ) { @@ -155,9 +143,6 @@ void __trace_trap_two_addr(unsigned even void __trace_trap_two_addr(unsigned event, unsigned long va1, unsigned long va2) { - if ( !tb_init_done ) - return; - #ifdef __x86_64__ if ( is_pv_32on64_vcpu(current) ) { @@ -184,9 +169,6 @@ void __trace_ptwr_emulation(unsigned lon void __trace_ptwr_emulation(unsigned long addr, l1_pgentry_t npte) { unsigned long eip = guest_cpu_user_regs()->eip; - - if ( !tb_init_done ) - return; /* We have a couple of different modes to worry about: * - 32-on-32: 32-bit pte, 32-bit virtual addresses diff -r f904468403b4 -r da66102de0ad xen/include/asm-x86/hvm/trace.h --- a/xen/include/asm-x86/hvm/trace.h Sat Feb 09 21:11:11 2008 +0000 +++ b/xen/include/asm-x86/hvm/trace.h Sat Feb 09 21:11:12 2008 +0000 @@ -37,6 +37,9 @@ static inline void hvmtrace_vmexit(struc unsigned long rip, unsigned long exit_reason) { + if ( likely(!tb_init_done) ) + return; + #ifdef __x86_64__ if(hvm_long_mode_enabled(v)) { @@ -50,7 +53,7 @@ static inline void hvmtrace_vmexit(struc d.vid = v->vcpu_id; d.exit_reason = exit_reason; d.rip = rip; - trace_var(TRC_HVM_VMEXIT64, 1/*cycles*/, sizeof(d), (unsigned char *)&d); + __trace_var(TRC_HVM_VMEXIT64, 1/*cycles*/, sizeof(d), (unsigned char *)&d); } else { #endif struct { @@ -63,7 +66,7 @@ static inline void hvmtrace_vmexit(struc d.vid = v->vcpu_id; d.exit_reason = exit_reason; d.eip = rip; - trace_var(TRC_HVM_VMEXIT, 1/*cycles*/, sizeof(d), (unsigned char *)&d); + __trace_var(TRC_HVM_VMEXIT, 1/*cycles*/, sizeof(d), (unsigned char *)&d); #ifdef __x86_64__ } #endif @@ -75,9 +78,13 @@ static inline void hvmtrace_vmentry(stru struct { unsigned did:16, vid:16; } d; + + if ( likely(!tb_init_done) ) + return; + d.did = v->domain->domain_id; d.vid = v->vcpu_id; - trace_var(TRC_HVM_VMENTRY, 1/*cycles*/, sizeof(d), (unsigned char *)&d); + __trace_var(TRC_HVM_VMENTRY, 1/*cycles*/, sizeof(d), (unsigned char *)&d); } static inline void hvmtrace_msr_read(struct vcpu *v, u32 ecx, u64 msr_content) @@ -87,11 +94,15 @@ static inline void hvmtrace_msr_read(str u32 ecx; u64 msr_content; } d; + + if ( likely(!tb_init_done) ) + return; + d.did = v->domain->domain_id; d.vid = v->vcpu_id; d.ecx = ecx; d.msr_content = msr_content; - trace_var(TRC_HVM_MSR_READ, 0/*!cycles*/, sizeof(d), (unsigned char *)&d); + __trace_var(TRC_HVM_MSR_READ, 0/*!cycles*/, sizeof(d), (unsigned char *)&d); } static inline void hvmtrace_msr_write(struct vcpu *v, u32 ecx, u64 msr_content) @@ -101,16 +112,23 @@ static inline void hvmtrace_msr_write(st u32 ecx; u64 msr_content; } d; + + if ( likely(!tb_init_done) ) + return; + d.did = v->domain->domain_id; d.vid = v->vcpu_id; d.ecx = ecx; d.msr_content = msr_content; - trace_var(TRC_HVM_MSR_WRITE, 0/*!cycles*/,sizeof(d), (unsigned char *)&d); + __trace_var(TRC_HVM_MSR_WRITE, 0/*!cycles*/,sizeof(d), (unsigned char *)&d); } static inline void hvmtrace_pf_xen(struct vcpu *v, unsigned long va, u32 error_code) { + if ( likely(!tb_init_done) ) + return; + #ifdef __x86_64__ if(hvm_long_mode_enabled(v)) { @@ -123,8 +141,8 @@ static inline void hvmtrace_pf_xen(struc d.vid = v->vcpu_id; d.error_code = error_code; d.va = va; - trace_var(TRC_HVM_PF_XEN64, 0/*!cycles*/,sizeof(d), - (unsigned char *)&d); + __trace_var(TRC_HVM_PF_XEN64, 0/*!cycles*/,sizeof(d), + (unsigned char *)&d); } else { #endif struct { @@ -136,7 +154,8 @@ static inline void hvmtrace_pf_xen(struc d.vid = v->vcpu_id; d.error_code = error_code; d.va = va; - trace_var(TRC_HVM_PF_XEN, 0/*!cycles*/,sizeof(d), (unsigned char *)&d); + __trace_var(TRC_HVM_PF_XEN, 0/*!cycles*/,sizeof(d), + (unsigned char *)&d); #ifdef __x86_64__ } #endif @@ -144,7 +163,7 @@ static inline void hvmtrace_pf_xen(struc #define HVMTRACE_ND(evt, vcpu, count, d1, d2, d3, d4) \ do { \ - if (DO_TRC_HVM_ ## evt) \ + if ( unlikely(tb_init_done) && DO_TRC_HVM_ ## evt ) \ { \ struct { \ unsigned did:16, vid:16; \ @@ -156,8 +175,8 @@ static inline void hvmtrace_pf_xen(struc _d.d[1]=(d2); \ _d.d[2]=(d3); \ _d.d[3]=(d4); \ - trace_var(TRC_HVM_ ## evt, 0/*!cycles*/, \ - sizeof(u32)*count+1, (unsigned char *)&_d); \ + __trace_var(TRC_HVM_ ## evt, 0/*!cycles*/, \ + sizeof(u32)*count+1, (unsigned char *)&_d); \ } \ } while(0) diff -r f904468403b4 -r da66102de0ad xen/include/asm-x86/trace.h --- a/xen/include/asm-x86/trace.h Sat Feb 09 21:11:11 2008 +0000 +++ b/xen/include/asm-x86/trace.h Sat Feb 09 21:11:12 2008 +0000 @@ -8,7 +8,7 @@ static inline void trace_pv_trap(int tra static inline void trace_pv_trap(int trapnr, unsigned long eip, int use_error_code, unsigned error_code) { - if ( tb_init_done ) + if ( unlikely(tb_init_done) ) __trace_pv_trap(trapnr, eip, use_error_code, error_code); } @@ -16,14 +16,14 @@ static inline void trace_pv_page_fault(u static inline void trace_pv_page_fault(unsigned long addr, unsigned error_code) { - if ( tb_init_done ) + if ( unlikely(tb_init_done) ) __trace_pv_page_fault(addr, error_code); } void __trace_trap_one_addr(unsigned event, unsigned long va); static inline void trace_trap_one_addr(unsigned event, unsigned long va) { - if ( tb_init_done ) + if ( unlikely(tb_init_done) ) __trace_trap_one_addr(event, va); } @@ -32,14 +32,14 @@ static inline void trace_trap_two_addr(u static inline void trace_trap_two_addr(unsigned event, unsigned long va1, unsigned long va2) { - if ( tb_init_done ) + if ( unlikely(tb_init_done) ) __trace_trap_two_addr(event, va1, va2); } void __trace_ptwr_emulation(unsigned long addr, l1_pgentry_t npte); static inline void trace_ptwr_emulation(unsigned long addr, l1_pgentry_t npte) { - if ( tb_init_done ) + if ( unlikely(tb_init_done) ) __trace_ptwr_emulation(addr, npte); } diff -r f904468403b4 -r da66102de0ad xen/include/xen/trace.h --- a/xen/include/xen/trace.h Sat Feb 09 21:11:11 2008 +0000 +++ b/xen/include/xen/trace.h Sat Feb 09 21:11:12 2008 +0000 @@ -39,7 +39,7 @@ static inline void trace_var(u32 event, static inline void trace_var(u32 event, int cycles, int extra, unsigned char *extra_data) { - if( unlikely(tb_init_done) ) + if ( unlikely(tb_init_done) ) __trace_var(event, cycles, extra, extra_data); } @@ -49,49 +49,64 @@ static inline void trace_var(u32 event, trace_var(_e, 1, 0, NULL); \ } while ( 0 ) -#define TRACE_1D(_e,_d) \ +#define TRACE_1D(_e,d1) \ do { \ - u32 _d1; \ - _d1 = _d; \ - trace_var(_e, 1, sizeof(_d1), (unsigned char *)&_d1); \ + if ( unlikely(tb_init_done) ) \ + { \ + u32 _d[1]; \ + _d[0] = d1; \ + __trace_var(_e, 1, sizeof(*_d), (unsigned char *)_d); \ + } \ } while ( 0 ) #define TRACE_2D(_e,d1,d2) \ - do { \ - u32 _d[2]; \ - _d[0]=d1; \ - _d[1]=d2; \ - trace_var(_e, 1, sizeof(*_d)*2, (unsigned char *)_d); \ + do { \ + if ( unlikely(tb_init_done) ) \ + { \ + u32 _d[2]; \ + _d[0] = d1; \ + _d[1] = d2; \ + __trace_var(_e, 1, sizeof(*_d)*2, (unsigned char *)_d); \ + } \ } while ( 0 ) #define TRACE_3D(_e,d1,d2,d3) \ do { \ - u32 _d[3]; \ - _d[0]=d1; \ - _d[1]=d2; \ - _d[2]=d3; \ - trace_var(_e, 1, sizeof(*_d)*3, (unsigned char *)_d); \ + if ( unlikely(tb_init_done) ) \ + { \ + u32 _d[3]; \ + _d[0] = d1; \ + _d[1] = d2; \ + _d[2] = d3; \ + __trace_var(_e, 1, sizeof(*_d)*3, (unsigned char *)_d); \ + } \ } while ( 0 ) #define TRACE_4D(_e,d1,d2,d3,d4) \ do { \ - u32 _d[4]; \ - _d[0]=d1; \ - _d[1]=d2; \ - _d[2]=d3; \ - _d[3]=d4; \ - trace_var(_e, 1, sizeof(*_d)*4, (unsigned char *)_d); \ + if ( unlikely(tb_init_done) ) \ + { \ + u32 _d[4]; \ + _d[0] = d1; \ + _d[1] = d2; \ + _d[2] = d3; \ + _d[3] = d4; \ + __trace_var(_e, 1, sizeof(*_d)*4, (unsigned char *)_d); \ + } \ } while ( 0 ) #define TRACE_5D(_e,d1,d2,d3,d4,d5) \ - do { \ - u32 _d[5]; \ - _d[0]=d1; \ - _d[1]=d2; \ - _d[2]=d3; \ - _d[3]=d4; \ - _d[4]=d5; \ - trace_var(_e, 1, sizeof(*_d)*5, (unsigned char *)_d); \ + do { \ + if ( unlikely(tb_init_done) ) \ + { \ + u32 _d[5]; \ + _d[0] = d1; \ + _d[1] = d2; \ + _d[2] = d3; \ + _d[3] = d4; \ + _d[4] = d5; \ + __trace_var(_e, 1, sizeof(*_d)*5, (unsigned char *)_d); \ + } \ } while ( 0 ) #endif /* __XEN_TRACE_H__ */ _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel