Daniel De Graaf
2013-Apr-11 15:35 UTC
[PATCH 1/2] xen/xsm: add hooks for claim and debug port
This replaces the IS_PRIV checks on these newly introduced operations with equivalent XSM hooks, and adds FLASK access vectors for them. Signed-off-by: Daniel De Graaf <dgdegra@tycho.nsa.gov> Cc: Jan Beulich <jbeulich@suse.com> Cc: Keir Fraser <keir@xen.org> Cc: Dan Magenheimer <dan.magenheimer@oracle.com> --- tools/flask/policy/policy/modules/xen/xen.if | 2 +- tools/flask/policy/policy/modules/xen/xen.te | 3 +++ xen/arch/x86/physdev.c | 15 +++++++++------ xen/common/memory.c | 15 ++++++++------- xen/include/xsm/dummy.h | 18 ++++++++++++++++++ xen/include/xsm/xsm.h | 18 ++++++++++++++++++ xen/xsm/dummy.c | 3 +++ xen/xsm/flask/hooks.c | 20 ++++++++++++++++++++ xen/xsm/flask/policy/access_vectors | 18 ++++++++++++++---- xen/xsm/flask/policy/security_classes | 1 + 10 files changed, 95 insertions(+), 18 deletions(-) diff --git a/tools/flask/policy/policy/modules/xen/xen.if b/tools/flask/policy/policy/modules/xen/xen.if index 2ce2212..fbb329d 100644 --- a/tools/flask/policy/policy/modules/xen/xen.if +++ b/tools/flask/policy/policy/modules/xen/xen.if @@ -49,7 +49,7 @@ define(`create_domain_common'', ` getdomaininfo hypercall setvcpucontext setextvcpucontext getscheduler getvcpuinfo getvcpuextstate getaddrsize getvcpuaffinity setvcpuaffinity }; - allow $1 $2:domain2 { set_cpuid settsc setscheduler }; + allow $1 $2:domain2 { set_cpuid settsc setscheduler setclaim }; allow $1 $2:security check_context; allow $1 $2:shadow enable; allow $1 $2:mmu { map_read map_write adjust memorymap physmap pinpage mmuext_op }; diff --git a/tools/flask/policy/policy/modules/xen/xen.te b/tools/flask/policy/policy/modules/xen/xen.te index 454e27e..c852214 100644 --- a/tools/flask/policy/policy/modules/xen/xen.te +++ b/tools/flask/policy/policy/modules/xen/xen.te @@ -64,6 +64,9 @@ allow dom0_t xen_t:xen { getidle debug getcpuinfo heap pm_op mca_op lockprof cpupool_op tmem_op tmem_control getscheduler setscheduler }; +allow dom0_t xen_t:xen2 { + debug_port_op +}; allow dom0_t xen_t:mmu memorymap; # Allow dom0 to use these domctls on itself. For domctls acting on other diff --git a/xen/arch/x86/physdev.c b/xen/arch/x86/physdev.c index 876ac9d..13296cc 100644 --- a/xen/arch/x86/physdev.c +++ b/xen/arch/x86/physdev.c @@ -691,12 +691,15 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) case PHYSDEVOP_dbgp_op: { struct physdev_dbgp_op op; - if ( !IS_PRIV(v->domain) ) - ret = -EPERM; - else if ( copy_from_guest(&op, arg, 1) ) - ret = -EFAULT; - else - ret = dbgp_op(&op); + ret = -EFAULT; + if ( copy_from_guest(&op, arg, 1) ) + break; + + ret = xsm_physdev_dbgp_op(XSM_PRIV, &op); + if ( ret ) + break; + + ret = dbgp_op(&op); break; } diff --git a/xen/common/memory.c b/xen/common/memory.c index 68501d1..3239d53 100644 --- a/xen/common/memory.c +++ b/xen/common/memory.c @@ -712,9 +712,6 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) } case XENMEM_claim_pages: - if ( !IS_PRIV(current->domain) ) - return -EPERM; - if ( copy_from_guest(&reservation, arg, 1) ) return -EFAULT; @@ -731,17 +728,21 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) if ( d == NULL ) return -EINVAL; - rc = domain_set_outstanding_pages(d, reservation.nr_extents); + rc = xsm_claim_pages(XSM_PRIV, d); + + if ( !rc ) + rc = domain_set_outstanding_pages(d, reservation.nr_extents); rcu_unlock_domain(d); break; case XENMEM_get_outstanding_pages: - if ( !IS_PRIV(current->domain) ) - return -EPERM; + rc = xsm_xenmem_get_outstanding_pages(XSM_PRIV); + + if ( !rc ) + rc = get_outstanding_claims(); - rc = get_outstanding_claims(); break; default: diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h index 025936a..7986cda 100644 --- a/xen/include/xsm/dummy.h +++ b/xen/include/xsm/dummy.h @@ -247,6 +247,24 @@ static XSM_INLINE int xsm_memory_pin_page(XSM_DEFAULT_ARG struct domain *d1, str return xsm_default_action(action, d1, d2); } +static XSM_INLINE int xsm_claim_pages(XSM_DEFAULT_ARG struct domain *d) +{ + XSM_ASSERT_ACTION(XSM_PRIV); + return xsm_default_action(action, current->domain, d); +} + +static XSM_INLINE int xsm_xenmem_get_outstanding_pages(XSM_DEFAULT_VOID) +{ + XSM_ASSERT_ACTION(XSM_PRIV); + return xsm_default_action(action, current->domain, NULL); +} + +static XSM_INLINE int xsm_physdev_dbgp_op(XSM_DEFAULT_ARG void *op) +{ + XSM_ASSERT_ACTION(XSM_PRIV); + return xsm_default_action(action, current->domain, NULL); +} + static XSM_INLINE int xsm_evtchn_unbound(XSM_DEFAULT_ARG struct domain *d, struct evtchn *chn, domid_t id2) { diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h index cba744c..d8ee0c8 100644 --- a/xen/include/xsm/xsm.h +++ b/xen/include/xsm/xsm.h @@ -92,6 +92,9 @@ struct xsm_operations { int (*memory_pin_page) (struct domain *d1, struct domain *d2, struct page_info *page); int (*add_to_physmap) (struct domain *d1, struct domain *d2); int (*remove_from_physmap) (struct domain *d1, struct domain *d2); + int (*claim_pages) (struct domain *d); + int (*xenmem_get_outstanding_pages) (void); + int (*physdev_dbgp_op) (void *op); int (*console_io) (struct domain *d, int cmd); @@ -348,6 +351,21 @@ static inline int xsm_remove_from_physmap(xsm_default_t def, struct domain *d1, return xsm_ops->remove_from_physmap(d1, d2); } +static inline int xsm_claim_pages(xsm_default_t def, struct domain *d) +{ + return xsm_ops->claim_pages(d); +} + +static inline int xsm_xenmem_get_outstanding_pages(xsm_default_t def) +{ + return xsm_ops->xenmem_get_outstanding_pages(); +} + +static inline int xsm_physdev_dbgp_op(xsm_default_t def, void *op) +{ + return xsm_ops->physdev_dbgp_op(op); +} + static inline int xsm_console_io (xsm_default_t def, struct domain *d, int cmd) { return xsm_ops->console_io(d, cmd); diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c index 6f1e0b4..7235d82 100644 --- a/xen/xsm/dummy.c +++ b/xen/xsm/dummy.c @@ -66,6 +66,9 @@ void xsm_fixup_ops (struct xsm_operations *ops) set_to_dummy_if_null(ops, memory_adjust_reservation); set_to_dummy_if_null(ops, memory_stat_reservation); set_to_dummy_if_null(ops, memory_pin_page); + set_to_dummy_if_null(ops, claim_pages); + set_to_dummy_if_null(ops, xenmem_get_outstanding_pages); + set_to_dummy_if_null(ops, physdev_dbgp_op); set_to_dummy_if_null(ops, console_io); diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c index 29a78dd..ca42891 100644 --- a/xen/xsm/flask/hooks.c +++ b/xen/xsm/flask/hooks.c @@ -417,6 +417,23 @@ static int flask_memory_pin_page(struct domain *d1, struct domain *d2, return domain_has_perm(d1, d2, SECCLASS_MMU, MMU__PINPAGE); } +static int flask_claim_pages(struct domain *d) +{ + return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__SETCLAIM); +} + +static int flask_xenmem_get_outstanding_pages(void) +{ + return avc_current_has_perm(SECINITSID_XEN, SECCLASS_XEN, + XEN__HEAP, NULL); +} + +static int flask_physdev_dbgp_op(void *op) +{ + return avc_current_has_perm(SECINITSID_XEN, SECCLASS_XEN2, + XEN2__DEBUG_PORT_OP, NULL); +} + static int flask_console_io(struct domain *d, int cmd) { u32 perm; @@ -1473,6 +1490,9 @@ static struct xsm_operations flask_ops = { .memory_adjust_reservation = flask_memory_adjust_reservation, .memory_stat_reservation = flask_memory_stat_reservation, .memory_pin_page = flask_memory_pin_page, + .claim_pages = flask_claim_pages, + .xenmem_get_outstanding_pages = flask_xenmem_get_outstanding_pages, + .physdev_dbgp_op = flask_physdev_dbgp_op, .console_io = flask_console_io, diff --git a/xen/xsm/flask/policy/access_vectors b/xen/xsm/flask/policy/access_vectors index 36cbacf..05bb52e 100644 --- a/xen/xsm/flask/policy/access_vectors +++ b/xen/xsm/flask/policy/access_vectors @@ -3,9 +3,9 @@ # # class class_name { permission_name ... } -# Class xen consists of dom0-only operations dealing with the hypervisor itself. -# Unless otherwise specified, the source is the domain executing the hypercall, -# and the target is the xen initial sid (type xen_t). +# Classes xen and xen2 consist of dom0-only operations dealing with the +# hypervisor itself. Unless otherwise specified, the source is the domain +# executing the hypercall, and the target is the xen initial sid (type xen_t). class xen { # XENPF_settime @@ -54,7 +54,7 @@ class xen debug # XEN_SYSCTL_getcpuinfo, XENPF_get_cpu_version, XENPF_get_cpuinfo getcpuinfo -# XEN_SYSCTL_availheap +# XEN_SYSCTL_availheap, XENMEM_get_outstanding_pages heap # XEN_SYSCTL_get_pmstat, XEN_SYSCTL_pm_op, XENPF_set_processor_pminfo, # XENPF_core_parking @@ -75,6 +75,14 @@ class xen setscheduler } +# This is a continuation of class xen, since only 32 permissions can be defined +# per class +class xen2 +{ +# PHYSDEVOP_dbgp_op + debug_port_op +} + # Classes domain and domain2 consist of operations that a domain performs on # another domain or on itself. Unless otherwise specified, the source is the # domain executing the hypercall, and the target is the domain being operated on @@ -190,6 +198,8 @@ class domain2 settsc # XEN_DOMCTL_scheduler_op with XEN_DOMCTL_SCHEDOP_putinfo setscheduler +# XENMEM_claim_pages + setclaim } # Similar to class domain, but primarily contains domctls related to HVM domains diff --git a/xen/xsm/flask/policy/security_classes b/xen/xsm/flask/policy/security_classes index ef134a7..ca191db 100644 --- a/xen/xsm/flask/policy/security_classes +++ b/xen/xsm/flask/policy/security_classes @@ -8,6 +8,7 @@ # for userspace object managers class xen +class xen2 class domain class domain2 class hvm -- 1.8.1.4
Daniel De Graaf
2013-Apr-11 15:35 UTC
[PATCH 2/2] hvm: convert access check for nested HVM to XSM
This adds an XSM hook for enabling nested HVM support, replacing an IS_PRIV check. This hook is a partial duplicate with the xsm_hvm_param hook, but using the existing hook would require adding the index to the hook and would require the use of a custom hook for the xsm-disabled case (using XSM_OTHER, which is less immediately readable) - whereas adding a new hook retains the clarity of the existing code. Defining this new permission allows a security policy writer to allow only certain guest types to use nested HVM features, or to disable it completely. Signed-off-by: Daniel De Graaf <dgdegra@tycho.nsa.gov> Cc: Ian Campbell <ian.campbell@citrix.com> Cc: Jan Beulich <JBeulich@suse.com> Cc: Keir Fraser <keir@xen.org> --- tools/flask/policy/policy/modules/xen/xen.if | 3 ++- xen/arch/x86/hvm/hvm.c | 6 ++---- xen/include/xsm/dummy.h | 6 ++++++ xen/include/xsm/xsm.h | 6 ++++++ xen/xsm/dummy.c | 1 + xen/xsm/flask/hooks.c | 6 ++++++ xen/xsm/flask/policy/access_vectors | 2 ++ 7 files changed, 25 insertions(+), 5 deletions(-) diff --git a/tools/flask/policy/policy/modules/xen/xen.if b/tools/flask/policy/policy/modules/xen/xen.if index fbb329d..f6f24a5 100644 --- a/tools/flask/policy/policy/modules/xen/xen.if +++ b/tools/flask/policy/policy/modules/xen/xen.if @@ -54,7 +54,8 @@ define(`create_domain_common'', ` allow $1 $2:shadow enable; allow $1 $2:mmu { map_read map_write adjust memorymap physmap pinpage mmuext_op }; allow $1 $2:grant setup; - allow $1 $2:hvm { cacheattr getparam hvmctl irqlevel pciroute sethvmc setparam pcilevel trackdirtyvram }; + allow $1 $2:hvm { cacheattr getparam hvmctl irqlevel pciroute sethvmc + setparam pcilevel trackdirtyvram nested }; '') # create_domain(priv, target) diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index ea7adf6..49b656e 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -3911,11 +3911,9 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg) rc = -EINVAL; break; case HVM_PARAM_NESTEDHVM: - if ( !IS_PRIV(current->domain) ) - { - rc = -EPERM; + rc = xsm_hvm_param_nested(XSM_PRIV, d); + if ( rc ) break; - } if ( a.value > 1 ) rc = -EINVAL; /* Remove the check below once we have diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h index 7986cda..4d63cf1 100644 --- a/xen/include/xsm/dummy.h +++ b/xen/include/xsm/dummy.h @@ -491,6 +491,12 @@ static XSM_INLINE int xsm_hvm_param(XSM_DEFAULT_ARG struct domain *d, unsigned l return xsm_default_action(action, current->domain, d); } +static XSM_INLINE int xsm_hvm_param_nested(XSM_DEFAULT_ARG struct domain *d) +{ + XSM_ASSERT_ACTION(XSM_PRIV); + return xsm_default_action(action, current->domain, d); +} + #ifdef CONFIG_X86 static XSM_INLINE int xsm_shadow_control(XSM_DEFAULT_ARG struct domain *d, uint32_t op) { diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h index d8ee0c8..9d3f214 100644 --- a/xen/include/xsm/xsm.h +++ b/xen/include/xsm/xsm.h @@ -133,6 +133,7 @@ struct xsm_operations { long (*do_xsm_op) (XEN_GUEST_HANDLE_PARAM(xsm_op_t) op); int (*hvm_param) (struct domain *d, unsigned long op); + int (*hvm_param_nested) (struct domain *d); #ifdef CONFIG_X86 int (*shadow_control) (struct domain *d, uint32_t op); @@ -511,6 +512,11 @@ static inline int xsm_hvm_param (xsm_default_t def, struct domain *d, unsigned l return xsm_ops->hvm_param(d, op); } +static inline int xsm_hvm_param_nested (xsm_default_t def, struct domain *d) +{ + return xsm_ops->hvm_param_nested(d); +} + #ifdef CONFIG_X86 static inline int xsm_shadow_control (xsm_default_t def, struct domain *d, uint32_t op) { diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c index 7235d82..49f4fb8 100644 --- a/xen/xsm/dummy.c +++ b/xen/xsm/dummy.c @@ -104,6 +104,7 @@ void xsm_fixup_ops (struct xsm_operations *ops) set_to_dummy_if_null(ops, tmem_op); set_to_dummy_if_null(ops, tmem_control); set_to_dummy_if_null(ops, hvm_param); + set_to_dummy_if_null(ops, hvm_param_nested); set_to_dummy_if_null(ops, do_xsm_op); diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c index ca42891..7909ad5 100644 --- a/xen/xsm/flask/hooks.c +++ b/xen/xsm/flask/hooks.c @@ -1107,6 +1107,11 @@ static int flask_hvm_param(struct domain *d, unsigned long op) return current_has_perm(d, SECCLASS_HVM, perm); } +static int flask_hvm_param_nested(struct domain *d) +{ + return current_has_perm(d, SECCLASS_HVM, HVM__NESTED); +} + #ifdef CONFIG_X86 static int flask_shadow_control(struct domain *d, uint32_t op) { @@ -1524,6 +1529,7 @@ static struct xsm_operations flask_ops = { .tmem_op = flask_tmem_op, .tmem_control = flask_tmem_control, .hvm_param = flask_hvm_param, + .hvm_param_nested = flask_hvm_param_nested, .do_xsm_op = do_flask_op, diff --git a/xen/xsm/flask/policy/access_vectors b/xen/xsm/flask/policy/access_vectors index 05bb52e..0b83fbb 100644 --- a/xen/xsm/flask/policy/access_vectors +++ b/xen/xsm/flask/policy/access_vectors @@ -242,6 +242,8 @@ class hvm # source = domain whose memory is being shared # target = client domain share_mem +# HVMOP_set_param setting HVM_PARAM_NESTEDHVM + nested } # Class event describes event channels. Interdomain event channels have their -- 1.8.1.4
Ian Campbell
2013-Apr-11 15:44 UTC
Re: [PATCH 1/2] xen/xsm: add hooks for claim and debug port
On Thu, 2013-04-11 at 16:35 +0100, Daniel De Graaf wrote:> This replaces the IS_PRIV checks on these newly introduced operationsgrep finds 33 instances of IS_PRIV (which includes IS_PRIV_FOR, and the #defines and the ones you are removing here). Should we be working to remove all of those and replace with xsm?. They aren''t all h/call perms checks so I guest not? I just fear you are going to face a never ending trickle of people adding new uses of IS_PRIV, perhaps a quick rename might help?> with equivalent XSM hooks, and adds FLASK access vectors for them. > > Signed-off-by: Daniel De Graaf <dgdegra@tycho.nsa.gov> > Cc: Jan Beulich <jbeulich@suse.com> > Cc: Keir Fraser <keir@xen.org> > Cc: Dan Magenheimer <dan.magenheimer@oracle.com> > --- > tools/flask/policy/policy/modules/xen/xen.if | 2 +- > tools/flask/policy/policy/modules/xen/xen.te | 3 +++ > xen/arch/x86/physdev.c | 15 +++++++++------ > xen/common/memory.c | 15 ++++++++------- > xen/include/xsm/dummy.h | 18 ++++++++++++++++++ > xen/include/xsm/xsm.h | 18 ++++++++++++++++++ > xen/xsm/dummy.c | 3 +++ > xen/xsm/flask/hooks.c | 20 ++++++++++++++++++++ > xen/xsm/flask/policy/access_vectors | 18 ++++++++++++++---- > xen/xsm/flask/policy/security_classes | 1 + > 10 files changed, 95 insertions(+), 18 deletions(-) > > diff --git a/tools/flask/policy/policy/modules/xen/xen.if b/tools/flask/policy/policy/modules/xen/xen.if > index 2ce2212..fbb329d 100644 > --- a/tools/flask/policy/policy/modules/xen/xen.if > +++ b/tools/flask/policy/policy/modules/xen/xen.if > @@ -49,7 +49,7 @@ define(`create_domain_common'', ` > getdomaininfo hypercall setvcpucontext setextvcpucontext > getscheduler getvcpuinfo getvcpuextstate getaddrsize > getvcpuaffinity setvcpuaffinity }; > - allow $1 $2:domain2 { set_cpuid settsc setscheduler }; > + allow $1 $2:domain2 { set_cpuid settsc setscheduler setclaim }; > allow $1 $2:security check_context; > allow $1 $2:shadow enable; > allow $1 $2:mmu { map_read map_write adjust memorymap physmap pinpage mmuext_op }; > diff --git a/tools/flask/policy/policy/modules/xen/xen.te b/tools/flask/policy/policy/modules/xen/xen.te > index 454e27e..c852214 100644 > --- a/tools/flask/policy/policy/modules/xen/xen.te > +++ b/tools/flask/policy/policy/modules/xen/xen.te > @@ -64,6 +64,9 @@ allow dom0_t xen_t:xen { > getidle debug getcpuinfo heap pm_op mca_op lockprof cpupool_op tmem_op > tmem_control getscheduler setscheduler > }; > +allow dom0_t xen_t:xen2 { > + debug_port_op > +}; > allow dom0_t xen_t:mmu memorymap; > > # Allow dom0 to use these domctls on itself. For domctls acting on other > diff --git a/xen/arch/x86/physdev.c b/xen/arch/x86/physdev.c > index 876ac9d..13296cc 100644 > --- a/xen/arch/x86/physdev.c > +++ b/xen/arch/x86/physdev.c > @@ -691,12 +691,15 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) > case PHYSDEVOP_dbgp_op: { > struct physdev_dbgp_op op; > > - if ( !IS_PRIV(v->domain) ) > - ret = -EPERM; > - else if ( copy_from_guest(&op, arg, 1) ) > - ret = -EFAULT; > - else > - ret = dbgp_op(&op); > + ret = -EFAULT; > + if ( copy_from_guest(&op, arg, 1) ) > + break; > + > + ret = xsm_physdev_dbgp_op(XSM_PRIV, &op); > + if ( ret ) > + break; > + > + ret = dbgp_op(&op); > break; > } > > diff --git a/xen/common/memory.c b/xen/common/memory.c > index 68501d1..3239d53 100644 > --- a/xen/common/memory.c > +++ b/xen/common/memory.c > @@ -712,9 +712,6 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) > } > > case XENMEM_claim_pages: > - if ( !IS_PRIV(current->domain) ) > - return -EPERM; > - > if ( copy_from_guest(&reservation, arg, 1) ) > return -EFAULT; > > @@ -731,17 +728,21 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) > if ( d == NULL ) > return -EINVAL; > > - rc = domain_set_outstanding_pages(d, reservation.nr_extents); > + rc = xsm_claim_pages(XSM_PRIV, d); > + > + if ( !rc ) > + rc = domain_set_outstanding_pages(d, reservation.nr_extents); > > rcu_unlock_domain(d); > > break; > > case XENMEM_get_outstanding_pages: > - if ( !IS_PRIV(current->domain) ) > - return -EPERM; > + rc = xsm_xenmem_get_outstanding_pages(XSM_PRIV); > + > + if ( !rc ) > + rc = get_outstanding_claims(); > > - rc = get_outstanding_claims(); > break; > > default: > diff --git a/xen/include/xsm/dummy.h b/xen/include/xsm/dummy.h > index 025936a..7986cda 100644 > --- a/xen/include/xsm/dummy.h > +++ b/xen/include/xsm/dummy.h > @@ -247,6 +247,24 @@ static XSM_INLINE int xsm_memory_pin_page(XSM_DEFAULT_ARG struct domain *d1, str > return xsm_default_action(action, d1, d2); > } > > +static XSM_INLINE int xsm_claim_pages(XSM_DEFAULT_ARG struct domain *d) > +{ > + XSM_ASSERT_ACTION(XSM_PRIV); > + return xsm_default_action(action, current->domain, d); > +} > + > +static XSM_INLINE int xsm_xenmem_get_outstanding_pages(XSM_DEFAULT_VOID) > +{ > + XSM_ASSERT_ACTION(XSM_PRIV); > + return xsm_default_action(action, current->domain, NULL); > +} > + > +static XSM_INLINE int xsm_physdev_dbgp_op(XSM_DEFAULT_ARG void *op) > +{ > + XSM_ASSERT_ACTION(XSM_PRIV); > + return xsm_default_action(action, current->domain, NULL); > +} > + > static XSM_INLINE int xsm_evtchn_unbound(XSM_DEFAULT_ARG struct domain *d, struct evtchn *chn, > domid_t id2) > { > diff --git a/xen/include/xsm/xsm.h b/xen/include/xsm/xsm.h > index cba744c..d8ee0c8 100644 > --- a/xen/include/xsm/xsm.h > +++ b/xen/include/xsm/xsm.h > @@ -92,6 +92,9 @@ struct xsm_operations { > int (*memory_pin_page) (struct domain *d1, struct domain *d2, struct page_info *page); > int (*add_to_physmap) (struct domain *d1, struct domain *d2); > int (*remove_from_physmap) (struct domain *d1, struct domain *d2); > + int (*claim_pages) (struct domain *d); > + int (*xenmem_get_outstanding_pages) (void); > + int (*physdev_dbgp_op) (void *op); > > int (*console_io) (struct domain *d, int cmd); > > @@ -348,6 +351,21 @@ static inline int xsm_remove_from_physmap(xsm_default_t def, struct domain *d1, > return xsm_ops->remove_from_physmap(d1, d2); > } > > +static inline int xsm_claim_pages(xsm_default_t def, struct domain *d) > +{ > + return xsm_ops->claim_pages(d); > +} > + > +static inline int xsm_xenmem_get_outstanding_pages(xsm_default_t def) > +{ > + return xsm_ops->xenmem_get_outstanding_pages(); > +} > + > +static inline int xsm_physdev_dbgp_op(xsm_default_t def, void *op) > +{ > + return xsm_ops->physdev_dbgp_op(op); > +} > + > static inline int xsm_console_io (xsm_default_t def, struct domain *d, int cmd) > { > return xsm_ops->console_io(d, cmd); > diff --git a/xen/xsm/dummy.c b/xen/xsm/dummy.c > index 6f1e0b4..7235d82 100644 > --- a/xen/xsm/dummy.c > +++ b/xen/xsm/dummy.c > @@ -66,6 +66,9 @@ void xsm_fixup_ops (struct xsm_operations *ops) > set_to_dummy_if_null(ops, memory_adjust_reservation); > set_to_dummy_if_null(ops, memory_stat_reservation); > set_to_dummy_if_null(ops, memory_pin_page); > + set_to_dummy_if_null(ops, claim_pages); > + set_to_dummy_if_null(ops, xenmem_get_outstanding_pages); > + set_to_dummy_if_null(ops, physdev_dbgp_op); > > set_to_dummy_if_null(ops, console_io); > > diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c > index 29a78dd..ca42891 100644 > --- a/xen/xsm/flask/hooks.c > +++ b/xen/xsm/flask/hooks.c > @@ -417,6 +417,23 @@ static int flask_memory_pin_page(struct domain *d1, struct domain *d2, > return domain_has_perm(d1, d2, SECCLASS_MMU, MMU__PINPAGE); > } > > +static int flask_claim_pages(struct domain *d) > +{ > + return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__SETCLAIM); > +} > + > +static int flask_xenmem_get_outstanding_pages(void) > +{ > + return avc_current_has_perm(SECINITSID_XEN, SECCLASS_XEN, > + XEN__HEAP, NULL); > +} > + > +static int flask_physdev_dbgp_op(void *op) > +{ > + return avc_current_has_perm(SECINITSID_XEN, SECCLASS_XEN2, > + XEN2__DEBUG_PORT_OP, NULL); > +} > + > static int flask_console_io(struct domain *d, int cmd) > { > u32 perm; > @@ -1473,6 +1490,9 @@ static struct xsm_operations flask_ops = { > .memory_adjust_reservation = flask_memory_adjust_reservation, > .memory_stat_reservation = flask_memory_stat_reservation, > .memory_pin_page = flask_memory_pin_page, > + .claim_pages = flask_claim_pages, > + .xenmem_get_outstanding_pages = flask_xenmem_get_outstanding_pages, > + .physdev_dbgp_op = flask_physdev_dbgp_op, > > .console_io = flask_console_io, > > diff --git a/xen/xsm/flask/policy/access_vectors b/xen/xsm/flask/policy/access_vectors > index 36cbacf..05bb52e 100644 > --- a/xen/xsm/flask/policy/access_vectors > +++ b/xen/xsm/flask/policy/access_vectors > @@ -3,9 +3,9 @@ > # > # class class_name { permission_name ... } > > -# Class xen consists of dom0-only operations dealing with the hypervisor itself. > -# Unless otherwise specified, the source is the domain executing the hypercall, > -# and the target is the xen initial sid (type xen_t). > +# Classes xen and xen2 consist of dom0-only operations dealing with the > +# hypervisor itself. Unless otherwise specified, the source is the domain > +# executing the hypercall, and the target is the xen initial sid (type xen_t). > class xen > { > # XENPF_settime > @@ -54,7 +54,7 @@ class xen > debug > # XEN_SYSCTL_getcpuinfo, XENPF_get_cpu_version, XENPF_get_cpuinfo > getcpuinfo > -# XEN_SYSCTL_availheap > +# XEN_SYSCTL_availheap, XENMEM_get_outstanding_pages > heap > # XEN_SYSCTL_get_pmstat, XEN_SYSCTL_pm_op, XENPF_set_processor_pminfo, > # XENPF_core_parking > @@ -75,6 +75,14 @@ class xen > setscheduler > } > > +# This is a continuation of class xen, since only 32 permissions can be defined > +# per class > +class xen2 > +{ > +# PHYSDEVOP_dbgp_op > + debug_port_op > +} > + > # Classes domain and domain2 consist of operations that a domain performs on > # another domain or on itself. Unless otherwise specified, the source is the > # domain executing the hypercall, and the target is the domain being operated on > @@ -190,6 +198,8 @@ class domain2 > settsc > # XEN_DOMCTL_scheduler_op with XEN_DOMCTL_SCHEDOP_putinfo > setscheduler > +# XENMEM_claim_pages > + setclaim > } > > # Similar to class domain, but primarily contains domctls related to HVM domains > diff --git a/xen/xsm/flask/policy/security_classes b/xen/xsm/flask/policy/security_classes > index ef134a7..ca191db 100644 > --- a/xen/xsm/flask/policy/security_classes > +++ b/xen/xsm/flask/policy/security_classes > @@ -8,6 +8,7 @@ > # for userspace object managers > > class xen > +class xen2 > class domain > class domain2 > class hvm > -- > 1.8.1.4 > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xen.org > http://lists.xen.org/xen-devel
Jan Beulich
2013-Apr-11 16:02 UTC
Re: [PATCH 1/2] xen/xsm: add hooks for claim and debug port
>>> On 11.04.13 at 17:35, Daniel De Graaf <dgdegra@tycho.nsa.gov> wrote: > This replaces the IS_PRIV checks on these newly introduced operations > with equivalent XSM hooks, and adds FLASK access vectors for them.Doing this for the tmem ones obviously makes sense. I wonder though what the point is of doing this for the debug port one. When the hypervisor uses a debug port based console, it is mandatory for the Dom0 kernel''s EHCI driver to issues these calls when resetting the controller (or else the console will stop working, and Xen will hang at least temporarily). So having _any_ means to deny this operation seems counterproductive to me. Furthermore, an EHCI controller that the hypervisor runs its console through can''t be assigned to other than Dom0 (for that very reason), so if you imply the option of a driver domain, then this aspect needs to be worked out first, _before_ adding access control. So first of all, please split the patch for the uncontroversial part to go in. Jan