Stefano Stabellini
2010-Apr-22  15:16 UTC
[Xen-devel] [PATCH 3 of 6] evtchn delivery on HVM
From: Sheng Yang <sheng@linux.intel.com>
this patch sets the callback to receive evtchns from Xen, using the
callback vector delivery mechanism.
Signed-off-by: Stefano Stabellini <stefano.stabellini@eu.citrix.com>
Signed-off-by: Sheng Yang <sheng@linux.intel.com>
---
 arch/x86/xen/enlighten.c         |   32 ++++++++++++++++++++++++++++++++
 drivers/xen/events.c             |   35 ++++++++++++++++++++++++++---------
 include/xen/events.h             |    3 +++
 include/xen/hvm.h                |    9 +++++++++
 include/xen/interface/features.h |    3 +++
 5 files changed, 73 insertions(+), 9 deletions(-)
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 0df697a..2ce2da1 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -36,8 +36,11 @@
 #include <xen/interface/vcpu.h>
 #include <xen/interface/memory.h>
 #include <xen/interface/hvm/hvm_op.h>
+#include <xen/interface/hvm/params.h>
 #include <xen/features.h>
 #include <xen/page.h>
+#include <xen/hvm.h>
+#include <xen/events.h>
 #include <xen/hvc-console.h>
 
 #include <asm/paravirt.h>
@@ -82,6 +85,8 @@ struct shared_info xen_dummy_shared_info;
 
 void *xen_initial_gdt;
 
+int xen_have_vector_callback;
+
 /*
  * Point at some empty memory to start with. We map the real shared_info
  * page as soon as fixmap is up and running.
@@ -1348,10 +1353,26 @@ static void __init init_shared_info(void)
 	per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
 }
 
+int xen_set_callback_via(uint64_t via)
+{
+	struct xen_hvm_param a;
+
+	a.domid = DOMID_SELF;
+	a.index = HVM_PARAM_CALLBACK_IRQ;
+	a.value = via;
+	return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
+}
+
+void do_hvm_pv_evtchn_intr(void)
+{
+	xen_hvm_evtchn_do_upcall(get_irq_regs());
+}
+
 void __init xen_guest_init(void)
 {
 	int r;
 	int major, minor;
+	uint64_t callback_via;
 
 	if (xen_pv_domain())
 		return;
@@ -1361,5 +1382,16 @@ void __init xen_guest_init(void)
 		return;
 
 	init_shared_info();
+
+	if (xen_feature(XENFEAT_hvm_callback_vector)) {
+		callback_via = HVM_CALLBACK_VECTOR(X86_PLATFORM_IPI_VECTOR);
+		xen_set_callback_via(callback_via);
+		x86_platform_ipi_callback = do_hvm_pv_evtchn_intr;
+		xen_have_vector_callback = 1;
+	}
+
+	have_vcpu_info_placement = 0;
+	x86_init.irqs.intr_init = xen_init_IRQ;
+	machine_ops = xen_machine_ops;
 }
 
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index e46a1ef..50a360b 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -41,6 +41,8 @@
 #include <asm/xen/hypervisor.h>
 #include <asm/xen/pci.h>
 
+#include <xen/xen.h>
+#include <xen/hvm.h>
 #include <xen/xen-ops.h>
 #include <xen/events.h>
 #include <xen/interface/xen.h>
@@ -1015,17 +1017,13 @@ static DEFINE_PER_CPU(unsigned, xed_nesting_count);
  * a bitset of words which contain pending event bits.  The second
  * level is a bitset of pending events themselves.
  */
-void xen_evtchn_do_upcall(struct pt_regs *regs)
+void __xen_evtchn_do_upcall(struct pt_regs *regs)
 {
 	int cpu = get_cpu();
-	struct pt_regs *old_regs = set_irq_regs(regs);
 	struct shared_info *s = HYPERVISOR_shared_info;
 	struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
  	unsigned count;
 
-	exit_idle();
-	irq_enter();
-
 	do {
 		unsigned long pending_words;
 
@@ -1065,10 +1063,26 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
 	} while(count != 1);
 
 out:
+
+	put_cpu();
+}
+
+void xen_evtchn_do_upcall(struct pt_regs *regs)
+{
+	struct pt_regs *old_regs = set_irq_regs(regs);
+
+	exit_idle();
+	irq_enter();
+
+	__xen_evtchn_do_upcall(regs);
+
 	irq_exit();
 	set_irq_regs(old_regs);
+}
 
-	put_cpu();
+void xen_hvm_evtchn_do_upcall(struct pt_regs *regs)
+{
+	__xen_evtchn_do_upcall(regs);
 }
 
 /* Rebind a new event channel to an existing irq. */
@@ -1377,7 +1391,10 @@ void __init xen_init_IRQ(void)
 	for (i = 0; i < NR_EVENT_CHANNELS; i++)
 		mask_evtchn(i);
 
-	irq_ctx_init(smp_processor_id());
-
-	xen_setup_pirqs();
+	if (xen_hvm_domain()) {
+		native_init_IRQ();
+	} else {
+		irq_ctx_init(smp_processor_id());
+		xen_setup_pirqs();
+	}
 }
diff --git a/include/xen/events.h b/include/xen/events.h
index c9034af..08197aa 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -90,4 +90,7 @@ static inline void xen_setup_pirqs(void)
 }
 #endif
 
+void xen_evtchn_do_upcall(struct pt_regs *regs);
+void xen_hvm_evtchn_do_upcall(struct pt_regs *regs);
+
 #endif	/* _XEN_EVENTS_H */
diff --git a/include/xen/hvm.h b/include/xen/hvm.h
index c2a55f6..a80c7b9 100644
--- a/include/xen/hvm.h
+++ b/include/xen/hvm.h
@@ -3,6 +3,7 @@
 #define XEN_HVM_H__
 
 #include <xen/interface/hvm/params.h>
+#include <asm/xen/hypercall.h>
 
 static inline unsigned long hvm_get_parameter(int idx)
 {
@@ -20,4 +21,12 @@ static inline unsigned long hvm_get_parameter(int idx)
        return xhv.value;
 }
 
+int xen_set_callback_via(uint64_t via);
+extern int xen_have_vector_callback;
+
+#define HVM_CALLBACK_VIA_TYPE_VECTOR 0x2
+#define HVM_CALLBACK_VIA_TYPE_SHIFT 56
+#define HVM_CALLBACK_VECTOR(x)
(((uint64_t)HVM_CALLBACK_VIA_TYPE_VECTOR)<<\
+                               HVM_CALLBACK_VIA_TYPE_SHIFT | (x))
+
 #endif /* XEN_HVM_H__ */
diff --git a/include/xen/interface/features.h b/include/xen/interface/features.h
index f51b641..8ab08b9 100644
--- a/include/xen/interface/features.h
+++ b/include/xen/interface/features.h
@@ -41,6 +41,9 @@
 /* x86: Does this Xen host support the MMU_PT_UPDATE_PRESERVE_AD hypercall? */
 #define XENFEAT_mmu_pt_update_preserve_ad  5
 
+/* x86: Does this Xen host support the HVM callback vector type? */
+#define XENFEAT_hvm_callback_vector        8
+
 #define XENFEAT_NR_SUBMAPS 1
 
 #endif /* __XEN_PUBLIC_FEATURES_H__ */
-- 
1.5.4.3
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
Konrad Rzeszutek Wilk
2010-Apr-22  20:58 UTC
Re: [Xen-devel] [PATCH 3 of 6] evtchn delivery on HVM
> + > + if (xen_feature(XENFEAT_hvm_callback_vector)) { > + callback_via = HVM_CALLBACK_VECTOR(X86_PLATFORM_IPI_VECTOR); > + xen_set_callback_via(callback_via); > + x86_platform_ipi_callback = do_hvm_pv_evtchn_intr; > + xen_have_vector_callback = 1;OK, so you are using the IPI vector. And the value is:> +#define HVM_CALLBACK_VIA_TYPE_VECTOR 0x2 > +#define HVM_CALLBACK_VIA_TYPE_SHIFT 56 > +#define HVM_CALLBACK_VECTOR(x) (((uint64_t)HVM_CALLBACK_VIA_TYPE_VECTOR)<<\ > + HVM_CALLBACK_VIA_TYPE_SHIFT | (x)) > + > #endif /* XEN_HVM_H__ */2 << 56 || 0xed. But looking back at the other patch, the arguments that are supposed to be set/get via the HVM_PARAM_CALLBACK_IRQ call are: * How should CPU0 event-channel notifications be delivered? + * val[63:56] == 0: val[55:0] is a delivery GSI (Global System Interrupt). + * val[63:56] == 1: val[55:0] is a delivery PCI INTx line, as follows: + * Domain = val[47:32], Bus = val[31:16], + * DevFn = val[15: 8], IntX = val[ 1: 0] + * If val == 0 then CPU0 event-channel notifications are not delivered. I don''t see val[63:56] == 2 ? _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Stefano Stabellini
2010-Apr-23  15:30 UTC
Re: [Xen-devel] [PATCH 3 of 6] evtchn delivery on HVM
On Thu, 22 Apr 2010, Konrad Rzeszutek Wilk wrote:> > + > > + if (xen_feature(XENFEAT_hvm_callback_vector)) { > > + callback_via = HVM_CALLBACK_VECTOR(X86_PLATFORM_IPI_VECTOR); > > + xen_set_callback_via(callback_via); > > + x86_platform_ipi_callback = do_hvm_pv_evtchn_intr; > > + xen_have_vector_callback = 1; > > OK, so you are using the IPI vector. And the value is: > > > +#define HVM_CALLBACK_VIA_TYPE_VECTOR 0x2 > > +#define HVM_CALLBACK_VIA_TYPE_SHIFT 56 > > +#define HVM_CALLBACK_VECTOR(x) (((uint64_t)HVM_CALLBACK_VIA_TYPE_VECTOR)<<\ > > + HVM_CALLBACK_VIA_TYPE_SHIFT | (x)) > > + > > #endif /* XEN_HVM_H__ */ > > 2 << 56 || 0xed. > > But looking back at the other patch, the arguments that are supposed > to be set/get via the HVM_PARAM_CALLBACK_IRQ call are: > > * How should CPU0 event-channel notifications be delivered? > + * val[63:56] == 0: val[55:0] is a delivery GSI (Global System > Interrupt). > + * val[63:56] == 1: val[55:0] is a delivery PCI INTx line, as follows: > + * Domain = val[47:32], Bus = val[31:16], > + * DevFn = val[15: 8], IntX = val[ 1: 0] > + * If val == 0 then CPU0 event-channel notifications are not delivered. > > I don''t see val[63:56] == 2 ? >the comment on the other patch needs to be update as well _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel
Konrad Rzeszutek Wilk
2010-Apr-23  15:33 UTC
Re: [Xen-devel] [PATCH 3 of 6] evtchn delivery on HVM
> > * How should CPU0 event-channel notifications be delivered? > > + * val[63:56] == 0: val[55:0] is a delivery GSI (Global System > > Interrupt). > > + * val[63:56] == 1: val[55:0] is a delivery PCI INTx line, as follows: > > + * Domain = val[47:32], Bus = val[31:16], > > + * DevFn = val[15: 8], IntX = val[ 1: 0] > > + * If val == 0 then CPU0 event-channel notifications are not delivered. > > > > I don''t see val[63:56] == 2 ? > > > > the comment on the other patch needs to be update as wellYup, and it looks that this also needs a patch to xen-unstable.hg as that header file is missing this data too. _______________________________________________ Xen-devel mailing list Xen-devel@lists.xensource.com http://lists.xensource.com/xen-devel