Jan Beulich
2012-Sep-05 12:37 UTC
[PATCH 05/11] tmem: don''t access guest memory without using the accessors intended for this
This is not permitted, not even for buffers coming from Dom0 (and it would also break the moment Dom0 runs in HVM mode). An implication from the changes here is that tmh_copy_page() can''t be used anymore for control operations calling tmh_copy_{from,to}_client() (as those pass the buffer by virtual address rather than MFN). Note that tmemc_save_get_next_page() previously didn''t set the returned handle''s pool_id field, while the new code does. It need to be confirmed that this is not a problem (otherwise the copy-out operation will require further tmh_...() abstractions to be added). Further note that the patch removes (rather than adjusts) an invalid call to unmap_domain_page() (no matching map_domain_page()) from tmh_compress_from_client() and adds a missing one to an error return path in tmh_copy_from_client(). Finally note that the patch adds a previously missing return statement to cli_get_page() (without which that function could de-reference a NULL pointer, triggerable from guest mode). This is part of XSA-15 / CVE-2012-3497. Signed-off-by: Jan Beulich <jbeulich@suse.com> --- a/xen/common/tmem.c +++ b/xen/common/tmem.c @@ -388,11 +388,13 @@ static NOINLINE int pcd_copy_to_client(t pcd = pgp->pcd; if ( pgp->size < PAGE_SIZE && pgp->size != 0 && pcd->size < PAGE_SIZE && pcd->size != 0 ) - ret = tmh_decompress_to_client(cmfn, pcd->cdata, pcd->size, NULL); + ret = tmh_decompress_to_client(cmfn, pcd->cdata, pcd->size, + tmh_cli_buf_null); else if ( tmh_tze_enabled() && pcd->size < PAGE_SIZE ) ret = tmh_copy_tze_to_client(cmfn, pcd->tze, pcd->size); else - ret = tmh_copy_to_client(cmfn, pcd->pfp, 0, 0, PAGE_SIZE, NULL); + ret = tmh_copy_to_client(cmfn, pcd->pfp, 0, 0, PAGE_SIZE, + tmh_cli_buf_null); tmem_read_unlock(&pcd_tree_rwlocks[firstbyte]); return ret; } @@ -1444,7 +1446,7 @@ static inline void tmem_ensure_avail_pag /************ TMEM CORE OPERATIONS ************************************/ static NOINLINE int do_tmem_put_compress(pgp_t *pgp, tmem_cli_mfn_t cmfn, - void *cva) + tmem_cli_va_t clibuf) { void *dst, *p; size_t size; @@ -1463,7 +1465,7 @@ static NOINLINE int do_tmem_put_compress if ( pgp->pfp != NULL ) pgp_free_data(pgp, pgp->us.obj->pool); START_CYC_COUNTER(compress); - ret = tmh_compress_from_client(cmfn, &dst, &size, cva); + ret = tmh_compress_from_client(cmfn, &dst, &size, clibuf); if ( (ret == -EFAULT) || (ret == 0) ) goto out; else if ( (size == 0) || (size >= tmem_subpage_maxsize()) ) { @@ -1490,7 +1492,8 @@ out: } static NOINLINE int do_tmem_dup_put(pgp_t *pgp, tmem_cli_mfn_t cmfn, - pagesize_t tmem_offset, pagesize_t pfn_offset, pagesize_t len, void *cva) + pagesize_t tmem_offset, pagesize_t pfn_offset, pagesize_t len, + tmem_cli_va_t clibuf) { pool_t *pool; obj_t *obj; @@ -1512,7 +1515,7 @@ static NOINLINE int do_tmem_dup_put(pgp_ /* can we successfully manipulate pgp to change out the data? */ if ( len != 0 && client->compress && pgp->size != 0 ) { - ret = do_tmem_put_compress(pgp,cmfn,cva); + ret = do_tmem_put_compress(pgp, cmfn, clibuf); if ( ret == 1 ) goto done; else if ( ret == 0 ) @@ -1530,7 +1533,8 @@ copy_uncompressed: goto failed_dup; pgp->size = 0; /* tmh_copy_from_client properly handles len==0 and offsets != 0 */ - ret = tmh_copy_from_client(pgp->pfp,cmfn,tmem_offset,pfn_offset,len,0); + ret = tmh_copy_from_client(pgp->pfp, cmfn, tmem_offset, pfn_offset, len, + tmh_cli_buf_null); if ( ret == -EFAULT ) goto bad_copy; if ( tmh_dedup_enabled() && !is_persistent(pool) ) @@ -1582,7 +1586,7 @@ cleanup: static NOINLINE int do_tmem_put(pool_t *pool, OID *oidp, uint32_t index, tmem_cli_mfn_t cmfn, pagesize_t tmem_offset, - pagesize_t pfn_offset, pagesize_t len, void *cva) + pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_t clibuf) { obj_t *obj = NULL, *objfound = NULL, *objnew = NULL; pgp_t *pgp = NULL, *pgpdel = NULL; @@ -1596,7 +1600,8 @@ static NOINLINE int do_tmem_put(pool_t * { ASSERT_SPINLOCK(&objfound->obj_spinlock); if ((pgp = pgp_lookup_in_obj(objfound, index)) != NULL) - return do_tmem_dup_put(pgp,cmfn,tmem_offset,pfn_offset,len,cva); + return do_tmem_dup_put(pgp, cmfn, tmem_offset, pfn_offset, len, + clibuf); } /* no puts allowed into a frozen pool (except dup puts) */ @@ -1631,7 +1636,7 @@ static NOINLINE int do_tmem_put(pool_t * if ( len != 0 && client->compress ) { ASSERT(pgp->pfp == NULL); - ret = do_tmem_put_compress(pgp,cmfn,cva); + ret = do_tmem_put_compress(pgp, cmfn, clibuf); if ( ret == 1 ) goto insert_page; if ( ret == -ENOMEM ) @@ -1655,7 +1660,8 @@ copy_uncompressed: goto delete_and_free; } /* tmh_copy_from_client properly handles len==0 (TMEM_NEW_PAGE) */ - ret = tmh_copy_from_client(pgp->pfp,cmfn,tmem_offset,pfn_offset,len,cva); + ret = tmh_copy_from_client(pgp->pfp, cmfn, tmem_offset, pfn_offset, len, + clibuf); if ( ret == -EFAULT ) goto bad_copy; if ( tmh_dedup_enabled() && !is_persistent(pool) ) @@ -1725,12 +1731,13 @@ free: static NOINLINE int do_tmem_get(pool_t *pool, OID *oidp, uint32_t index, tmem_cli_mfn_t cmfn, pagesize_t tmem_offset, - pagesize_t pfn_offset, pagesize_t len, void *cva) + pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_t clibuf) { obj_t *obj; pgp_t *pgp; client_t *client = pool->client; DECL_LOCAL_CYC_COUNTER(decompress); + int rc = -EFAULT; if ( !_atomic_read(pool->pgp_count) ) return -EEMPTY; @@ -1755,16 +1762,18 @@ static NOINLINE int do_tmem_get(pool_t * if ( tmh_dedup_enabled() && !is_persistent(pool) && pgp->firstbyte != NOT_SHAREABLE ) { - if ( pcd_copy_to_client(cmfn, pgp) == -EFAULT ) + rc = pcd_copy_to_client(cmfn, pgp); + if ( rc <= 0 ) goto bad_copy; } else if ( pgp->size != 0 ) { START_CYC_COUNTER(decompress); - if ( tmh_decompress_to_client(cmfn, pgp->cdata, - pgp->size, cva) == -EFAULT ) + rc = tmh_decompress_to_client(cmfn, pgp->cdata, + pgp->size, clibuf); + if ( rc <= 0 ) goto bad_copy; END_CYC_COUNTER(decompress); } else if ( tmh_copy_to_client(cmfn, pgp->pfp, tmem_offset, - pfn_offset, len, cva) == -EFAULT) + pfn_offset, len, clibuf) == -EFAULT) goto bad_copy; if ( is_ephemeral(pool) ) { @@ -1804,8 +1813,7 @@ static NOINLINE int do_tmem_get(pool_t * bad_copy: /* this should only happen if the client passed a bad mfn */ failed_copies++; - return -EFAULT; - + return rc; } static NOINLINE int do_tmem_flush_page(pool_t *pool, OID *oidp, uint32_t index) @@ -2345,7 +2353,6 @@ static NOINLINE int tmemc_save_subop(int pool_t *pool = (client == NULL || pool_id >= MAX_POOLS_PER_DOMAIN) ? NULL : client->pools[pool_id]; uint32_t p; - uint64_t *uuid; pgp_t *pgp, *pgp2; int rc = -1; @@ -2409,9 +2416,7 @@ static NOINLINE int tmemc_save_subop(int case TMEMC_SAVE_GET_POOL_UUID: if ( pool == NULL ) break; - uuid = (uint64_t *)buf.p; - *uuid++ = pool->uuid[0]; - *uuid = pool->uuid[1]; + tmh_copy_to_client_buf(buf, pool->uuid, 2); rc = 0; case TMEMC_SAVE_END: if ( client == NULL ) @@ -2436,7 +2441,7 @@ static NOINLINE int tmemc_save_get_next_ pgp_t *pgp; OID oid; int ret = 0; - struct tmem_handle *h; + struct tmem_handle h; unsigned int pagesize = 1 << (pool->pageshift+12); if ( pool == NULL || is_ephemeral(pool) ) @@ -2467,11 +2472,13 @@ static NOINLINE int tmemc_save_get_next_ pgp_t,us.pool_pers_pages); pool->cur_pgp = pgp; oid = pgp->us.obj->oid; - h = (struct tmem_handle *)buf.p; - *(OID *)&h->oid[0] = oid; - h->index = pgp->index; - buf.p = (void *)(h+1); - ret = do_tmem_get(pool, &oid, h->index,0,0,0,pagesize,buf.p); + h.pool_id = pool_id; + BUILD_BUG_ON(sizeof(h.oid) != sizeof(oid)); + memcpy(h.oid, oid.oid, sizeof(h.oid)); + h.index = pgp->index; + tmh_copy_to_client_buf(buf, &h, 1); + tmh_client_buf_add(buf, sizeof(h)); + ret = do_tmem_get(pool, &oid, pgp->index, 0, 0, 0, pagesize, buf); out: tmem_spin_unlock(&pers_lists_spinlock); @@ -2483,7 +2490,7 @@ static NOINLINE int tmemc_save_get_next_ { client_t *client = tmh_client_from_cli_id(cli_id); pgp_t *pgp; - struct tmem_handle *h; + struct tmem_handle h; int ret = 0; if ( client == NULL ) @@ -2509,10 +2516,11 @@ static NOINLINE int tmemc_save_get_next_ pgp_t,client_inv_pages); client->cur_pgp = pgp; } - h = (struct tmem_handle *)buf.p; - h->pool_id = pgp->pool_id; - *(OID *)&h->oid = pgp->inv_oid; - h->index = pgp->index; + h.pool_id = pgp->pool_id; + BUILD_BUG_ON(sizeof(h.oid) != sizeof(pgp->inv_oid)); + memcpy(h.oid, pgp->inv_oid.oid, sizeof(h.oid)); + h.index = pgp->index; + tmh_copy_to_client_buf(buf, &h, 1); ret = 1; out: tmem_spin_unlock(&pers_lists_spinlock); @@ -2528,7 +2536,7 @@ static int tmemc_restore_put_page(int cl if ( pool == NULL ) return -1; - return do_tmem_put(pool,oidp,index,0,0,0,bufsize,buf.p); + return do_tmem_put(pool, oidp, index, 0, 0, 0, bufsize, buf); } static int tmemc_restore_flush_page(int cli_id, uint32_t pool_id, OID *oidp, @@ -2732,19 +2740,19 @@ EXPORT long do_tmem_op(tmem_cli_op_t uop break; case TMEM_NEW_PAGE: tmem_ensure_avail_pages(); - rc = do_tmem_put(pool, oidp, - op.u.gen.index, op.u.gen.cmfn, 0, 0, 0, NULL); + rc = do_tmem_put(pool, oidp, op.u.gen.index, op.u.gen.cmfn, 0, 0, 0, + tmh_cli_buf_null); break; case TMEM_PUT_PAGE: tmem_ensure_avail_pages(); - rc = do_tmem_put(pool, oidp, - op.u.gen.index, op.u.gen.cmfn, 0, 0, PAGE_SIZE, NULL); + rc = do_tmem_put(pool, oidp, op.u.gen.index, op.u.gen.cmfn, 0, 0, + PAGE_SIZE, tmh_cli_buf_null); if (rc == 1) succ_put = 1; else non_succ_put = 1; break; case TMEM_GET_PAGE: rc = do_tmem_get(pool, oidp, op.u.gen.index, op.u.gen.cmfn, - 0, 0, PAGE_SIZE, 0); + 0, 0, PAGE_SIZE, tmh_cli_buf_null); if (rc == 1) succ_get = 1; else non_succ_get = 1; break; @@ -2763,13 +2771,13 @@ EXPORT long do_tmem_op(tmem_cli_op_t uop case TMEM_READ: rc = do_tmem_get(pool, oidp, op.u.gen.index, op.u.gen.cmfn, op.u.gen.tmem_offset, op.u.gen.pfn_offset, - op.u.gen.len,0); + op.u.gen.len, tmh_cli_buf_null); break; case TMEM_WRITE: rc = do_tmem_put(pool, oidp, op.u.gen.index, op.u.gen.cmfn, op.u.gen.tmem_offset, op.u.gen.pfn_offset, - op.u.gen.len, NULL); + op.u.gen.len, tmh_cli_buf_null); break; case TMEM_XCHG: /* need to hold global lock to ensure xchg is atomic */ --- a/xen/common/tmem_xen.c +++ b/xen/common/tmem_xen.c @@ -51,6 +51,7 @@ DECL_CYC_COUNTER(pg_copy); #define LZO_DSTMEM_PAGES 2 static DEFINE_PER_CPU_READ_MOSTLY(unsigned char *, workmem); static DEFINE_PER_CPU_READ_MOSTLY(unsigned char *, dstmem); +static DEFINE_PER_CPU_READ_MOSTLY(void *, scratch_page); #ifdef COMPARE_COPY_PAGE_SSE2 #include <asm/flushtlb.h> /* REMOVE ME AFTER TEST */ @@ -115,6 +116,7 @@ static inline void *cli_get_page(tmem_cl { if ( page ) put_page(page); + return NULL; } if ( cli_write && !get_page_type(page, PGT_writable_page) ) @@ -144,12 +146,12 @@ static inline void cli_put_page(tmem_cli EXPORT int tmh_copy_from_client(pfp_t *pfp, tmem_cli_mfn_t cmfn, pagesize_t tmem_offset, - pagesize_t pfn_offset, pagesize_t len, void *cli_va) + pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_t clibuf) { unsigned long tmem_mfn, cli_mfn = 0; - void *tmem_va; + char *tmem_va, *cli_va = NULL; pfp_t *cli_pfp = NULL; - bool_t tmemc = cli_va != NULL; /* if true, cli_va is control-op buffer */ + int rc = 1; ASSERT(pfp != NULL); tmem_mfn = page_to_mfn(pfp); @@ -160,62 +162,76 @@ EXPORT int tmh_copy_from_client(pfp_t *p unmap_domain_page(tmem_va); return 1; } - if ( !tmemc ) + if ( guest_handle_is_null(clibuf) ) { cli_va = cli_get_page(cmfn, &cli_mfn, &cli_pfp, 0); if ( cli_va == NULL ) + { + unmap_domain_page(tmem_va); return -EFAULT; + } } mb(); - if (len == PAGE_SIZE && !tmem_offset && !pfn_offset) + if ( len == PAGE_SIZE && !tmem_offset && !pfn_offset && cli_va ) tmh_copy_page(tmem_va, cli_va); else if ( (tmem_offset+len <= PAGE_SIZE) && (pfn_offset+len <= PAGE_SIZE) ) - memcpy((char *)tmem_va+tmem_offset,(char *)cli_va+pfn_offset,len); - if ( !tmemc ) + { + if ( cli_va ) + memcpy(tmem_va + tmem_offset, cli_va + pfn_offset, len); + else if ( copy_from_guest_offset(tmem_va + tmem_offset, clibuf, + pfn_offset, len) ) + rc = -EFAULT; + } + if ( cli_va ) cli_put_page(cmfn, cli_va, cli_pfp, cli_mfn, 0); unmap_domain_page(tmem_va); - return 1; + return rc; } EXPORT int tmh_compress_from_client(tmem_cli_mfn_t cmfn, - void **out_va, size_t *out_len, void *cli_va) + void **out_va, size_t *out_len, tmem_cli_va_t clibuf) { int ret = 0; unsigned char *dmem = this_cpu(dstmem); unsigned char *wmem = this_cpu(workmem); + char *scratch = this_cpu(scratch_page); pfp_t *cli_pfp = NULL; unsigned long cli_mfn = 0; - bool_t tmemc = cli_va != NULL; /* if true, cli_va is control-op buffer */ + void *cli_va = NULL; if ( dmem == NULL || wmem == NULL ) return 0; /* no buffer, so can''t compress */ - if ( !tmemc ) + if ( guest_handle_is_null(clibuf) ) { cli_va = cli_get_page(cmfn, &cli_mfn, &cli_pfp, 0); if ( cli_va == NULL ) return -EFAULT; } + else if ( !scratch ) + return 0; + else if ( copy_from_guest(scratch, clibuf, PAGE_SIZE) ) + return -EFAULT; mb(); - ret = lzo1x_1_compress(cli_va, PAGE_SIZE, dmem, out_len, wmem); + ret = lzo1x_1_compress(cli_va ?: scratch, PAGE_SIZE, dmem, out_len, wmem); ASSERT(ret == LZO_E_OK); *out_va = dmem; - if ( !tmemc ) + if ( cli_va ) cli_put_page(cmfn, cli_va, cli_pfp, cli_mfn, 0); - unmap_domain_page(cli_va); return 1; } EXPORT int tmh_copy_to_client(tmem_cli_mfn_t cmfn, pfp_t *pfp, - pagesize_t tmem_offset, pagesize_t pfn_offset, pagesize_t len, void *cli_va) + pagesize_t tmem_offset, pagesize_t pfn_offset, pagesize_t len, + tmem_cli_va_t clibuf) { unsigned long tmem_mfn, cli_mfn = 0; - void *tmem_va; + char *tmem_va, *cli_va = NULL; pfp_t *cli_pfp = NULL; - bool_t tmemc = cli_va != NULL; /* if true, cli_va is control-op buffer */ + int rc = 1; ASSERT(pfp != NULL); - if ( !tmemc ) + if ( guest_handle_is_null(clibuf) ) { cli_va = cli_get_page(cmfn, &cli_mfn, &cli_pfp, 1); if ( cli_va == NULL ) @@ -223,37 +239,48 @@ EXPORT int tmh_copy_to_client(tmem_cli_m } tmem_mfn = page_to_mfn(pfp); tmem_va = map_domain_page(tmem_mfn); - if (len == PAGE_SIZE && !tmem_offset && !pfn_offset) + if ( len == PAGE_SIZE && !tmem_offset && !pfn_offset && cli_va ) tmh_copy_page(cli_va, tmem_va); else if ( (tmem_offset+len <= PAGE_SIZE) && (pfn_offset+len <= PAGE_SIZE) ) - memcpy((char *)cli_va+pfn_offset,(char *)tmem_va+tmem_offset,len); + { + if ( cli_va ) + memcpy(cli_va + pfn_offset, tmem_va + tmem_offset, len); + else if ( copy_to_guest_offset(clibuf, pfn_offset, + tmem_va + tmem_offset, len) ) + rc = -EFAULT; + } unmap_domain_page(tmem_va); - if ( !tmemc ) + if ( cli_va ) cli_put_page(cmfn, cli_va, cli_pfp, cli_mfn, 1); mb(); - return 1; + return rc; } EXPORT int tmh_decompress_to_client(tmem_cli_mfn_t cmfn, void *tmem_va, - size_t size, void *cli_va) + size_t size, tmem_cli_va_t clibuf) { unsigned long cli_mfn = 0; pfp_t *cli_pfp = NULL; + void *cli_va = NULL; + char *scratch = this_cpu(scratch_page); size_t out_len = PAGE_SIZE; - bool_t tmemc = cli_va != NULL; /* if true, cli_va is control-op buffer */ int ret; - if ( !tmemc ) + if ( guest_handle_is_null(clibuf) ) { cli_va = cli_get_page(cmfn, &cli_mfn, &cli_pfp, 1); if ( cli_va == NULL ) return -EFAULT; } - ret = lzo1x_decompress_safe(tmem_va, size, cli_va, &out_len); + else if ( !scratch ) + return 0; + ret = lzo1x_decompress_safe(tmem_va, size, cli_va ?: scratch, &out_len); ASSERT(ret == LZO_E_OK); ASSERT(out_len == PAGE_SIZE); - if ( !tmemc ) + if ( cli_va ) cli_put_page(cmfn, cli_va, cli_pfp, cli_mfn, 1); + else if ( copy_to_guest(clibuf, scratch, PAGE_SIZE) ) + return -EFAULT; mb(); return 1; } @@ -423,6 +450,11 @@ static int cpu_callback( struct page_info *p = alloc_domheap_pages(0, workmem_order, 0); per_cpu(workmem, cpu) = p ? page_to_virt(p) : NULL; } + if ( per_cpu(scratch_page, cpu) == NULL ) + { + struct page_info *p = alloc_domheap_page(NULL, 0); + per_cpu(scratch_page, cpu) = p ? page_to_virt(p) : NULL; + } break; } case CPU_DEAD: @@ -439,6 +471,11 @@ static int cpu_callback( free_domheap_pages(p, workmem_order); per_cpu(workmem, cpu) = NULL; } + if ( per_cpu(scratch_page, cpu) != NULL ) + { + free_domheap_page(virt_to_page(per_cpu(scratch_page, cpu))); + per_cpu(scratch_page, cpu) = NULL; + } break; } default: --- a/xen/include/xen/tmem_xen.h +++ b/xen/include/xen/tmem_xen.h @@ -482,27 +482,33 @@ static inline int tmh_get_tmemop_from_cl return copy_from_guest(op, uops, 1); } +#define tmh_cli_buf_null guest_handle_from_ptr(NULL, char) + static inline void tmh_copy_to_client_buf_offset(tmem_cli_va_t clibuf, int off, char *tmembuf, int len) { copy_to_guest_offset(clibuf,off,tmembuf,len); } +#define tmh_copy_to_client_buf(clibuf, tmembuf, cnt) \ + copy_to_guest(guest_handle_cast(clibuf, void), tmembuf, cnt) + +#define tmh_client_buf_add guest_handle_add_offset + #define TMH_CLI_ID_NULL ((cli_id_t)((domid_t)-1L)) #define tmh_cli_id_str "domid" #define tmh_client_str "domain" -extern int tmh_decompress_to_client(tmem_cli_mfn_t,void*,size_t,void*); +int tmh_decompress_to_client(tmem_cli_mfn_t, void *, size_t, tmem_cli_va_t); -extern int tmh_compress_from_client(tmem_cli_mfn_t,void**,size_t *,void*); +int tmh_compress_from_client(tmem_cli_mfn_t, void **, size_t *, tmem_cli_va_t); -extern int tmh_copy_from_client(pfp_t *pfp, - tmem_cli_mfn_t cmfn, pagesize_t tmem_offset, - pagesize_t pfn_offset, pagesize_t len, void *cva); +int tmh_copy_from_client(pfp_t *, tmem_cli_mfn_t, pagesize_t tmem_offset, + pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_t); -extern int tmh_copy_to_client(tmem_cli_mfn_t cmfn, pfp_t *pfp, - pagesize_t tmem_offset, pagesize_t pfn_offset, pagesize_t len, void *cva); +int tmh_copy_to_client(tmem_cli_mfn_t, pfp_t *, pagesize_t tmem_offset, + pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_t); extern int tmh_copy_tze_to_client(tmem_cli_mfn_t cmfn, void *tmem_va, pagesize_t len); _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org http://lists.xen.org/xen-devel
Dan Magenheimer
2012-Sep-05 16:50 UTC
Re: [PATCH 05/11] tmem: don''t access guest memory without using the accessors intended for this
> From: Jan Beulich [mailto:JBeulich@suse.com] > Sent: Wednesday, September 05, 2012 6:37 AM > To: xen-devel > Cc: Dan Magenheimer; Zhenzhong Duan > Subject: [PATCH 05/11] tmem: don''t access guest memory without using the accessors intended for this > > This is not permitted, not even for buffers coming from Dom0 (and it > would also break the moment Dom0 runs in HVM mode). An implication from > the changes here is that tmh_copy_page() can''t be used anymore for > control operations calling tmh_copy_{from,to}_client() (as those pass > the buffer by virtual address rather than MFN). > > Note that tmemc_save_get_next_page() previously didn''t set the returned > handle''s pool_id field, while the new code does. It need to be > confirmed that this is not a problem (otherwise the copy-out operation > will require further tmh_...() abstractions to be added). > > Further note that the patch removes (rather than adjusts) an invalid > call to unmap_domain_page() (no matching map_domain_page()) from > tmh_compress_from_client() and adds a missing one to an error return > path in tmh_copy_from_client(). > > Finally note that the patch adds a previously missing return statement > to cli_get_page() (without which that function could de-reference a > NULL pointer, triggerable from guest mode). > > This is part of XSA-15 / CVE-2012-3497. > > Signed-off-by: Jan Beulich <jbeulich@suse.com>I''m a bit baffled by all the unrelated changes and cleanups, but they all appear to be valid fixes and, most importantly, the related security issues appear to be resolved. I''m also unable right now to plumb the depths of the guest copying macros so will have to trust Jan''s good judgement. One point in particular, it''s difficult to determine if the following line is copying two bytes (wrong) or two uint64_t''s (correct).> + tmh_copy_to_client_buf(buf, pool->uuid, 2);Last, this patch almost certainly breaks save/restore/migration of tmem-enabled guests, but that functionality (which once worked fine) has apparently been broken for awhile. So let''s go with this new code and fix save/restore/migration on top of it. Acked-by: Dan Magenheimer <dan.magenheimer@oracle.com> (no further comments in the code, so patch elided)
Jan Beulich
2012-Sep-06 07:22 UTC
Re: [PATCH 05/11] tmem: don''t access guest memory without using the accessors intended for this
>>> On 05.09.12 at 18:50, Dan Magenheimer <dan.magenheimer@oracle.com> wrote: >> From: Jan Beulich [mailto:JBeulich@suse.com] >> Sent: Wednesday, September 05, 2012 6:37 AM >> To: xen-devel >> Cc: Dan Magenheimer; Zhenzhong Duan >> Subject: [PATCH 05/11] tmem: don''t access guest memory without using the > accessors intended for this >> >> This is not permitted, not even for buffers coming from Dom0 (and it >> would also break the moment Dom0 runs in HVM mode). An implication from >> the changes here is that tmh_copy_page() can''t be used anymore for >> control operations calling tmh_copy_{from,to}_client() (as those pass >> the buffer by virtual address rather than MFN). >> >> Note that tmemc_save_get_next_page() previously didn''t set the returned >> handle''s pool_id field, while the new code does. It need to be >> confirmed that this is not a problem (otherwise the copy-out operation >> will require further tmh_...() abstractions to be added). >> >> Further note that the patch removes (rather than adjusts) an invalid >> call to unmap_domain_page() (no matching map_domain_page()) from >> tmh_compress_from_client() and adds a missing one to an error return >> path in tmh_copy_from_client(). >> >> Finally note that the patch adds a previously missing return statement >> to cli_get_page() (without which that function could de-reference a >> NULL pointer, triggerable from guest mode). >> >> This is part of XSA-15 / CVE-2012-3497. >> >> Signed-off-by: Jan Beulich <jbeulich@suse.com> > > I''m a bit baffled by all the unrelated changes and cleanups, but > they all appear to be valid fixes and, most importantly, the > related security issues appear to be resolved.Having gone through the patch once more, I''d be really curious where you spotted unrelated changes (apart from e.g. adding proper white space use on lines that needed changing anyway). With the size of the patch, and with this one having been done when we still thought we would issue the patches together with the advisory, I would really hope not to be caught to have done unnecessary changes here (while still preserving generic style of the code, see below).> I''m also unable right now to plumb the depths of the guest copying > macros so will have to trust Jan''s good judgement. One point in > particular, it''s difficult to determine if the following line is > copying two bytes (wrong) or two uint64_t''s (correct). > >> + tmh_copy_to_client_buf(buf, pool->uuid, 2);With #define tmh_copy_to_client_buf(clibuf, tmembuf, cnt) \ copy_to_guest(guest_handle_cast(clibuf, void), tmembuf, cnt) it is clear that it copies two elements of whatever tmembuf''s type is, in the case at hand uint64_t. I would have preferred to use copy_to_guest() directly, but that appeared to be against the spirit of the rest of the file, which is why I added these new wrapper macros. Jan
Dan Magenheimer
2012-Sep-06 15:44 UTC
Re: [PATCH 05/11] tmem: don''t access guest memory without using the accessors intended for this
> From: Jan Beulich [mailto:JBeulich@suse.com] > Subject: RE: [PATCH 05/11] tmem: don''t access guest memory without using the accessors intended for > this > > >>> On 05.09.12 at 18:50, Dan Magenheimer <dan.magenheimer@oracle.com> wrote: > >> From: Jan Beulich [mailto:JBeulich@suse.com] > >> Sent: Wednesday, September 05, 2012 6:37 AM > >> To: xen-devel > >> Cc: Dan Magenheimer; Zhenzhong Duan > >> Subject: [PATCH 05/11] tmem: don''t access guest memory without using the > > accessors intended for this > >> > >> This is not permitted, not even for buffers coming from Dom0 (and it > >> would also break the moment Dom0 runs in HVM mode). An implication from > >> the changes here is that tmh_copy_page() can''t be used anymore for > >> control operations calling tmh_copy_{from,to}_client() (as those pass > >> the buffer by virtual address rather than MFN). > >> > >> Note that tmemc_save_get_next_page() previously didn''t set the returned > >> handle''s pool_id field, while the new code does. It need to be > >> confirmed that this is not a problem (otherwise the copy-out operation > >> will require further tmh_...() abstractions to be added). > >> > >> Further note that the patch removes (rather than adjusts) an invalid > >> call to unmap_domain_page() (no matching map_domain_page()) from > >> tmh_compress_from_client() and adds a missing one to an error return > >> path in tmh_copy_from_client(). > >> > >> Finally note that the patch adds a previously missing return statement > >> to cli_get_page() (without which that function could de-reference a > >> NULL pointer, triggerable from guest mode). > >> > >> This is part of XSA-15 / CVE-2012-3497. > >> > >> Signed-off-by: Jan Beulich <jbeulich@suse.com> > > > > I''m a bit baffled by all the unrelated changes and cleanups, but > > they all appear to be valid fixes and, most importantly, the > > related security issues appear to be resolved. > > Having gone through the patch once more, I''d be really > curious where you spotted unrelated changes (apart from > e.g. adding proper white space use on lines that needed > changing anyway). > > With the size of the patch, and with this one having been > done when we still thought we would issue the patches > together with the advisory, I would really hope not to be > caught to have done unnecessary changes here (while > still preserving generic style of the code, see below).Hi Jan -- Again, I am not criticizing the end result or any part of the patch, just noting that some of it _could_ have been separated to a different patch, which would have made it _much_ more obvious what was the core fix for the security issue. No need to reiterate your reasons, I''m only providing more detail here because it sounds as if you are asking sincerely, not defensively. - changing NULL to tmh_cli_buf_null - changing parameter void *cva to tmem_cli_va_t clibuf, which results in - changing all lines that use that that parameter which gave you the license to - cleanup the whitespace in all those lines - all code using -EFAULT that you changed to "< 0" works correctly (though is admittedly fragile) - inlining the use of the bool "tmemc" - the addition of scratch (which I think I understand may patch a security hole undocumented in the commit comment?) Again, all valid cleanups, and some may even decrease the probability that future code changes will create more security issues, but most are IMHO unnecessary to fix the very specific security issue at hand. Does that make sense?> > I''m also unable right now to plumb the depths of the guest copying > > macros so will have to trust Jan''s good judgement. One point in > > particular, it''s difficult to determine if the following line is > > copying two bytes (wrong) or two uint64_t''s (correct). > > > >> + tmh_copy_to_client_buf(buf, pool->uuid, 2); > > With > > #define tmh_copy_to_client_buf(clibuf, tmembuf, cnt) \ > copy_to_guest(guest_handle_cast(clibuf, void), tmembuf, cnt) > > it is clear that it copies two elements of whatever tmembuf''s > type is, in the case at hand uint64_t.Again, just trying to be honest and helpful... it may be clear to you, but I would venture to guess it is _not_ clear to the vast majority of (even highly experienced) system programmers who have not studied the guest copying code in gory detail. IMHO, the guest copying code is incredible in what it does and indecipherable below the first layer. Only the page macros in the Linux mm subsystem rival it in layers of obfuscation ;-) Is there any detailed documentation about how it works? If not, it would be good to add some. One last time: _not_ criticizing! Dan
Jan Beulich
2012-Sep-07 08:01 UTC
Re: [PATCH 05/11] tmem: don''t access guest memory without using the accessors intended for this
>>> On 06.09.12 at 17:44, Dan Magenheimer <dan.magenheimer@oracle.com> wrote: >> > I''m a bit baffled by all the unrelated changes and cleanups, but >> > they all appear to be valid fixes and, most importantly, the >> > related security issues appear to be resolved. >> >> Having gone through the patch once more, I''d be really >> curious where you spotted unrelated changes (apart from >> e.g. adding proper white space use on lines that needed >> changing anyway). >> >> With the size of the patch, and with this one having been >> done when we still thought we would issue the patches >> together with the advisory, I would really hope not to be >> caught to have done unnecessary changes here (while >> still preserving generic style of the code, see below). > > Again, I am not criticizing the end result or any part of > the patch, just noting that some of it _could_ have been > separated to a different patch, which would have made it > _much_ more obvious what was the core fix for the security issue. > No need to reiterate your reasons, I''m only providing more > detail here because it sounds as if you are asking sincerely, > not defensively.With the below, I''m afraid you didn''t review the patch properly, or am still having problems understanding the security aspects here.> - changing NULL to tmh_cli_buf_nullThe latter is not a simple #define of NULL.> - changing parameter void *cva to tmem_cli_va_t clibuf, > which results inSimilarly, tmem_cli_va_t is not an equivalent of "void *".> - changing all lines that use that that parameter > which gave you the license toPreserving the name of the parameter was just calling for overlooking things. Hence, with the type change I also changed the name (and where necessary introduced new local variables with the old type and name, serving the original purpose).> - cleanup the whitespace in all those linesAs said, I permitted myself to do this on lines I had to touch anyway.> - all code using -EFAULT that you changed to "< 0" > works correctly (though is admittedly fragile)I don''t think so - note that the changes were to "<= 0", as some of the functions only return 1 as success indicator (and pass 0 apparently to indicate didn''t do anything or some such - this aspect of the original code was confusing me quite a bit, and I can''t exclude I broke something there).> - inlining the use of the bool "tmemc"Yes, this one could have been preserved, but (almost) all of its uses would have required changes anyway.> - the addition of scratch (which I think I understand may > patch a security hole undocumented in the commit comment?)No, this was a requirement for fixing what the comment says is being fixed: Not being allowed to directly access guest memory, scratch space is needed for calling the compression functions. The alternative would have been to pass guest handles to the compression code, which would have implied touching that code too. I think you agree that would have been a much worse choice. Jan