"SENTINELS" and "DECL_CYC_COUNTER" are hacky code for debugging, there should not exist in upstream code. Signed-off-by: Bob Liu <bob.liu@oracle.com> --- xen/common/tmem.c | 192 +++++++++----------------------------------- xen/common/tmem_xen.c | 5 -- xen/include/xen/tmem_xen.h | 51 ------------ 3 files changed, 36 insertions(+), 212 deletions(-) diff --git a/xen/common/tmem.c b/xen/common/tmem.c index 3d15ead..081772e 100644 --- a/xen/common/tmem.c +++ b/xen/common/tmem.c @@ -29,39 +29,6 @@ #define TMEM_SPEC_VERSION 1 -/************ DEBUG and STATISTICS (+ some compression testing) *******/ - -#ifndef NDEBUG -#define SENTINELS -#define NOINLINE noinline -#else -#define NOINLINE -#endif - -#ifdef SENTINELS -#define DECL_SENTINEL unsigned long sentinel; -#define SET_SENTINEL(_x,_y) _x->sentinel = _y##_SENTINEL -#define INVERT_SENTINEL(_x,_y) _x->sentinel = ~_y##_SENTINEL -#define ASSERT_SENTINEL(_x,_y) \ - ASSERT(_x->sentinel != ~_y##_SENTINEL);ASSERT(_x->sentinel == _y##_SENTINEL) -#if defined(CONFIG_ARM) -#define POOL_SENTINEL 0x87658765 -#define OBJ_SENTINEL 0x12345678 -#define OBJNODE_SENTINEL 0xfedcba09 -#define PGD_SENTINEL 0x43214321 -#else -#define POOL_SENTINEL 0x8765876587658765 -#define OBJ_SENTINEL 0x1234567812345678 -#define OBJNODE_SENTINEL 0xfedcba0987654321 -#define PGD_SENTINEL 0x4321432143214321 -#endif -#else -#define DECL_SENTINEL -#define SET_SENTINEL(_x,_y) do { } while (0) -#define ASSERT_SENTINEL(_x,_y) do { } while (0) -#define INVERT_SENTINEL(_x,_y) do { } while (0) -#endif - /* global statistics (none need to be locked) */ static unsigned long total_tmem_ops = 0; static unsigned long errored_tmem_ops = 0; @@ -83,16 +50,6 @@ static unsigned long failed_copies; static unsigned long pcd_tot_tze_size = 0; static unsigned long pcd_tot_csize = 0; -DECL_CYC_COUNTER(succ_get); -DECL_CYC_COUNTER(succ_put); -DECL_CYC_COUNTER(non_succ_get); -DECL_CYC_COUNTER(non_succ_put); -DECL_CYC_COUNTER(flush); -DECL_CYC_COUNTER(flush_obj); -EXTERN_CYC_COUNTER(pg_copy); -DECL_CYC_COUNTER(compress); -DECL_CYC_COUNTER(decompress); - /************ CORE DATA STRUCTURES ************************************/ #define MAX_POOLS_PER_DOMAIN 16 @@ -166,7 +123,6 @@ struct tmem_pool { unsigned long gets, found_gets; unsigned long flushs, flushs_found; unsigned long flush_objs, flush_objs_found; - DECL_SENTINEL }; #define is_persistent(_p) (_p->persistent) @@ -179,7 +135,6 @@ struct oid { }; struct tmem_object_root { - DECL_SENTINEL struct oid oid; struct rb_node rb_tree_node; /* protected by pool->pool_rwlock */ unsigned long objnode_count; /* atomicity depends on obj_spinlock */ @@ -193,7 +148,6 @@ struct tmem_object_root { struct tmem_object_node { struct tmem_object_root *obj; - DECL_SENTINEL struct radix_tree_node rtn; }; @@ -228,7 +182,6 @@ struct tmem_page_descriptor { uint64_t timestamp; uint32_t pool_id; /* used for invalid list only */ }; - DECL_SENTINEL }; #define PCD_TZE_MAX_SIZE (PAGE_SIZE - (PAGE_SIZE/64)) @@ -299,7 +252,7 @@ static atomic_t global_rtree_node_count = ATOMIC_INIT(0); /************ MEMORY ALLOCATION INTERFACE *****************************/ -static NOINLINE void *tmem_malloc(size_t size, struct tmem_pool *pool) +static void *tmem_malloc(size_t size, struct tmem_pool *pool) { void *v = NULL; @@ -318,7 +271,7 @@ static NOINLINE void *tmem_malloc(size_t size, struct tmem_pool *pool) return v; } -static NOINLINE void tmem_free(void *p, struct tmem_pool *pool) +static void tmem_free(void *p, struct tmem_pool *pool) { if ( pool == NULL || !is_persistent(pool) ) { @@ -332,7 +285,7 @@ static NOINLINE void tmem_free(void *p, struct tmem_pool *pool) } } -static NOINLINE struct page_info *tmem_page_alloc(struct tmem_pool *pool) +static struct page_info *tmem_page_alloc(struct tmem_pool *pool) { struct page_info *pfp = NULL; @@ -347,7 +300,7 @@ static NOINLINE struct page_info *tmem_page_alloc(struct tmem_pool *pool) return pfp; } -static NOINLINE void tmem_page_free(struct tmem_pool *pool, struct page_info *pfp) +static void tmem_page_free(struct tmem_pool *pool, struct page_info *pfp) { ASSERT(pfp); if ( pool == NULL || !is_persistent(pool) ) @@ -361,7 +314,7 @@ static NOINLINE void tmem_page_free(struct tmem_pool *pool, struct page_info *pf #define NOT_SHAREABLE ((uint16_t)-1UL) -static NOINLINE int pcd_copy_to_client(xen_pfn_t cmfn, struct tmem_page_descriptor *pgp) +static int pcd_copy_to_client(xen_pfn_t cmfn, struct tmem_page_descriptor *pgp) { uint8_t firstbyte = pgp->firstbyte; struct tmem_page_content_descriptor *pcd; @@ -385,7 +338,7 @@ static NOINLINE int pcd_copy_to_client(xen_pfn_t cmfn, struct tmem_page_descript /* ensure pgp no longer points to pcd, nor vice-versa */ /* take pcd rwlock unless have_pcd_rwlock is set, always unlock when done */ -static NOINLINE void pcd_disassociate(struct tmem_page_descriptor *pgp, struct tmem_pool *pool, bool_t have_pcd_rwlock) +static void pcd_disassociate(struct tmem_page_descriptor *pgp, struct tmem_pool *pool, bool_t have_pcd_rwlock) { struct tmem_page_content_descriptor *pcd = pgp->pcd; struct page_info *pfp = pgp->pcd->pfp; @@ -448,7 +401,7 @@ static NOINLINE void pcd_disassociate(struct tmem_page_descriptor *pgp, struct t } -static NOINLINE int pcd_associate(struct tmem_page_descriptor *pgp, char *cdata, pagesize_t csize) +static int pcd_associate(struct tmem_page_descriptor *pgp, char *cdata, pagesize_t csize) { struct rb_node **new, *parent = NULL; struct rb_root *root; @@ -585,7 +538,7 @@ unlock: /************ PAGE DESCRIPTOR MANIPULATION ROUTINES *******************/ /* allocate a struct tmem_page_descriptor and associate it with an object */ -static NOINLINE struct tmem_page_descriptor *pgp_alloc(struct tmem_object_root *obj) +static struct tmem_page_descriptor *pgp_alloc(struct tmem_object_root *obj) { struct tmem_page_descriptor *pgp; struct tmem_pool *pool; @@ -608,7 +561,6 @@ static NOINLINE struct tmem_page_descriptor *pgp_alloc(struct tmem_object_root * pgp->size = -1; pgp->index = -1; pgp->timestamp = get_cycles(); - SET_SENTINEL(pgp,PGD); atomic_inc_and_max(global_pgp_count); atomic_inc_and_max(pool->pgp_count); return pgp; @@ -618,13 +570,11 @@ static struct tmem_page_descriptor *pgp_lookup_in_obj(struct tmem_object_root *o { ASSERT(obj != NULL); ASSERT_SPINLOCK(&obj->obj_spinlock); - ASSERT_SENTINEL(obj,OBJ); ASSERT(obj->pool != NULL); - ASSERT_SENTINEL(obj->pool,POOL); return radix_tree_lookup(&obj->tree_root, index); } -static NOINLINE void pgp_free_data(struct tmem_page_descriptor *pgp, struct tmem_pool *pool) +static void pgp_free_data(struct tmem_page_descriptor *pgp, struct tmem_pool *pool) { pagesize_t pgp_size = pgp->size; @@ -645,14 +595,11 @@ static NOINLINE void pgp_free_data(struct tmem_page_descriptor *pgp, struct tmem pgp->size = -1; } -static NOINLINE void pgp_free(struct tmem_page_descriptor *pgp, int from_delete) +static void pgp_free(struct tmem_page_descriptor *pgp, int from_delete) { struct tmem_pool *pool = NULL; - ASSERT_SENTINEL(pgp,PGD); ASSERT(pgp->us.obj != NULL); - ASSERT_SENTINEL(pgp->us.obj,OBJ); - ASSERT_SENTINEL(pgp->us.obj->pool,POOL); ASSERT(pgp->us.obj->pool->client != NULL); if ( from_delete ) ASSERT(pgp_lookup_in_obj(pgp->us.obj,pgp->index) == NULL); @@ -673,19 +620,15 @@ static NOINLINE void pgp_free(struct tmem_page_descriptor *pgp, int from_delete) pgp->pool_id = pool->pool_id; return; } - INVERT_SENTINEL(pgp,PGD); pgp->us.obj = NULL; pgp->index = -1; tmem_free(pgp, pool); } -static NOINLINE void pgp_free_from_inv_list(struct client *client, struct tmem_page_descriptor *pgp) +static void pgp_free_from_inv_list(struct client *client, struct tmem_page_descriptor *pgp) { struct tmem_pool *pool = client->pools[pgp->pool_id]; - ASSERT_SENTINEL(pool,POOL); - ASSERT_SENTINEL(pgp,PGD); - INVERT_SENTINEL(pgp,PGD); pgp->us.obj = NULL; pgp->index = -1; tmem_free(pgp, pool); @@ -733,7 +676,7 @@ static void pgp_delist(struct tmem_page_descriptor *pgp, bool_t no_eph_lock) } /* remove page from lists (but not from parent object) and free it */ -static NOINLINE void pgp_delete(struct tmem_page_descriptor *pgp, bool_t no_eph_lock) +static void pgp_delete(struct tmem_page_descriptor *pgp, bool_t no_eph_lock) { uint64_t life; @@ -747,7 +690,7 @@ static NOINLINE void pgp_delete(struct tmem_page_descriptor *pgp, bool_t no_eph_ } /* called only indirectly by radix_tree_destroy */ -static NOINLINE void pgp_destroy(void *v) +static void pgp_destroy(void *v) { struct tmem_page_descriptor *pgp = (struct tmem_page_descriptor *)v; @@ -770,15 +713,13 @@ static int pgp_add_to_obj(struct tmem_object_root *obj, uint32_t index, struct t return ret; } -static NOINLINE struct tmem_page_descriptor *pgp_delete_from_obj(struct tmem_object_root *obj, uint32_t index) +static struct tmem_page_descriptor *pgp_delete_from_obj(struct tmem_object_root *obj, uint32_t index) { struct tmem_page_descriptor *pgp; ASSERT(obj != NULL); ASSERT_SPINLOCK(&obj->obj_spinlock); - ASSERT_SENTINEL(obj,OBJ); ASSERT(obj->pool != NULL); - ASSERT_SENTINEL(obj->pool,POOL); pgp = radix_tree_delete(&obj->tree_root, index); if ( pgp != NULL ) obj->pgp_count--; @@ -790,19 +731,16 @@ static NOINLINE struct tmem_page_descriptor *pgp_delete_from_obj(struct tmem_obj /************ RADIX TREE NODE MANIPULATION ROUTINES *******************/ /* called only indirectly from radix_tree_insert */ -static NOINLINE struct radix_tree_node *rtn_alloc(void *arg) +static struct radix_tree_node *rtn_alloc(void *arg) { struct tmem_object_node *objnode; struct tmem_object_root *obj = (struct tmem_object_root *)arg; - ASSERT_SENTINEL(obj,OBJ); ASSERT(obj->pool != NULL); - ASSERT_SENTINEL(obj->pool,POOL); objnode = tmem_malloc(sizeof(struct tmem_object_node),obj->pool); if (objnode == NULL) return NULL; objnode->obj = obj; - SET_SENTINEL(objnode,OBJNODE); memset(&objnode->rtn, 0, sizeof(struct radix_tree_node)); if (++obj->pool->objnode_count > obj->pool->objnode_count_max) obj->pool->objnode_count_max = obj->pool->objnode_count; @@ -819,14 +757,10 @@ static void rtn_free(struct radix_tree_node *rtn, void *arg) ASSERT(rtn != NULL); objnode = container_of(rtn,struct tmem_object_node,rtn); - ASSERT_SENTINEL(objnode,OBJNODE); - INVERT_SENTINEL(objnode,OBJNODE); ASSERT(objnode->obj != NULL); ASSERT_SPINLOCK(&objnode->obj->obj_spinlock); - ASSERT_SENTINEL(objnode->obj,OBJ); pool = objnode->obj->pool; ASSERT(pool != NULL); - ASSERT_SENTINEL(pool,POOL); pool->objnode_count--; objnode->obj->objnode_count--; objnode->obj = NULL; @@ -872,7 +806,7 @@ unsigned oid_hash(struct oid *oidp) } /* searches for object==oid in pool, returns locked object if found */ -static NOINLINE struct tmem_object_root * obj_find(struct tmem_pool *pool, struct oid *oidp) +static struct tmem_object_root * obj_find(struct tmem_pool *pool, struct oid *oidp) { struct rb_node *node; struct tmem_object_root *obj; @@ -910,14 +844,13 @@ restart_find: } /* free an object that has no more pgps in it */ -static NOINLINE void obj_free(struct tmem_object_root *obj, int no_rebalance) +static void obj_free(struct tmem_object_root *obj, int no_rebalance) { struct tmem_pool *pool; struct oid old_oid; ASSERT_SPINLOCK(&obj->obj_spinlock); ASSERT(obj != NULL); - ASSERT_SENTINEL(obj,OBJ); ASSERT(obj->pgp_count == 0); pool = obj->pool; ASSERT(pool != NULL); @@ -929,7 +862,6 @@ static NOINLINE void obj_free(struct tmem_object_root *obj, int no_rebalance) ASSERT(obj->tree_root.rnode == NULL); pool->obj_count--; ASSERT(pool->obj_count >= 0); - INVERT_SENTINEL(obj,OBJ); obj->pool = NULL; old_oid = obj->oid; oid_set_invalid(&obj->oid); @@ -942,7 +874,7 @@ static NOINLINE void obj_free(struct tmem_object_root *obj, int no_rebalance) tmem_free(obj, pool); } -static NOINLINE int obj_rb_insert(struct rb_root *root, struct tmem_object_root *obj) +static int obj_rb_insert(struct rb_root *root, struct tmem_object_root *obj) { struct rb_node **new, *parent = NULL; struct tmem_object_root *this; @@ -973,7 +905,7 @@ static NOINLINE int obj_rb_insert(struct rb_root *root, struct tmem_object_root * allocate, initialize, and insert an tmem_object_root * (should be called only if find failed) */ -static NOINLINE struct tmem_object_root * obj_new(struct tmem_pool *pool, struct oid *oidp) +static struct tmem_object_root * obj_new(struct tmem_pool *pool, struct oid *oidp) { struct tmem_object_root *obj; @@ -993,7 +925,6 @@ static NOINLINE struct tmem_object_root * obj_new(struct tmem_pool *pool, struct obj->objnode_count = 0; obj->pgp_count = 0; obj->last_client = TMEM_CLI_ID_NULL; - SET_SENTINEL(obj,OBJ); tmem_spin_lock(&obj->obj_spinlock); obj_rb_insert(&pool->obj_rb_root[oid_hash(oidp)], obj); obj->no_evict = 1; @@ -1002,7 +933,7 @@ static NOINLINE struct tmem_object_root * obj_new(struct tmem_pool *pool, struct } /* free an object after destroying any pgps in it */ -static NOINLINE void obj_destroy(struct tmem_object_root *obj, int no_rebalance) +static void obj_destroy(struct tmem_object_root *obj, int no_rebalance) { ASSERT_WRITELOCK(&obj->pool->pool_rwlock); radix_tree_destroy(&obj->tree_root, pgp_destroy); @@ -1066,14 +997,11 @@ static struct tmem_pool * pool_alloc(void) pool->flushs_found = pool->flushs = 0; pool->flush_objs_found = pool->flush_objs = 0; pool->is_dying = 0; - SET_SENTINEL(pool,POOL); return pool; } -static NOINLINE void pool_free(struct tmem_pool *pool) +static void pool_free(struct tmem_pool *pool) { - ASSERT_SENTINEL(pool,POOL); - INVERT_SENTINEL(pool,POOL); pool->client = NULL; list_del(&pool->pool_list); xfree(pool); @@ -1097,7 +1025,7 @@ static int shared_pool_join(struct tmem_pool *pool, struct client *new_client) } /* reassign "ownership" of the pool to another client that shares this pool */ -static NOINLINE void shared_pool_reassign(struct tmem_pool *pool) +static void shared_pool_reassign(struct tmem_pool *pool) { struct share_list *sl; int poolid; @@ -1128,7 +1056,7 @@ static NOINLINE void shared_pool_reassign(struct tmem_pool *pool) /* destroy all objects with last_client same as passed cli_id, remove pool''s cli_id from list of sharers of this pool */ -static NOINLINE int shared_pool_quit(struct tmem_pool *pool, domid_t cli_id) +static int shared_pool_quit(struct tmem_pool *pool, domid_t cli_id) { struct share_list *sl; int s_poolid; @@ -1374,12 +1302,10 @@ static int tmem_evict(void) found: ASSERT(pgp != NULL); - ASSERT_SENTINEL(pgp,PGD); obj = pgp->us.obj; ASSERT(obj != NULL); ASSERT(obj->no_evict == 0); ASSERT(obj->pool != NULL); - ASSERT_SENTINEL(obj,OBJ); pool = obj->pool; ASSERT_SPINLOCK(&obj->obj_spinlock); @@ -1454,13 +1380,12 @@ static inline void tmem_ensure_avail_pages(void) /************ TMEM CORE OPERATIONS ************************************/ -static NOINLINE int do_tmem_put_compress(struct tmem_page_descriptor *pgp, xen_pfn_t cmfn, +static int do_tmem_put_compress(struct tmem_page_descriptor *pgp, xen_pfn_t cmfn, tmem_cli_va_param_t clibuf) { void *dst, *p; size_t size; int ret = 0; - DECL_LOCAL_CYC_COUNTER(compress); ASSERT(pgp != NULL); ASSERT(pgp->us.obj != NULL); @@ -1470,7 +1395,6 @@ static NOINLINE int do_tmem_put_compress(struct tmem_page_descriptor *pgp, xen_p if ( pgp->pfp != NULL ) pgp_free_data(pgp, pgp->us.obj->pool); - START_CYC_COUNTER(compress); ret = tmem_compress_from_client(cmfn, &dst, &size, clibuf); if ( ret <= 0 ) goto out; @@ -1493,11 +1417,10 @@ static NOINLINE int do_tmem_put_compress(struct tmem_page_descriptor *pgp, xen_p ret = 1; out: - END_CYC_COUNTER(compress); return ret; } -static NOINLINE int do_tmem_dup_put(struct tmem_page_descriptor *pgp, xen_pfn_t cmfn, +static int do_tmem_dup_put(struct tmem_page_descriptor *pgp, xen_pfn_t cmfn, pagesize_t tmem_offset, pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_param_t clibuf) { @@ -1587,7 +1510,7 @@ cleanup: } -static NOINLINE int do_tmem_put(struct tmem_pool *pool, +static int do_tmem_put(struct tmem_pool *pool, struct oid *oidp, uint32_t index, xen_pfn_t cmfn, pagesize_t tmem_offset, pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_param_t clibuf) @@ -1731,14 +1654,13 @@ free: return ret; } -static NOINLINE int do_tmem_get(struct tmem_pool *pool, struct oid *oidp, uint32_t index, +static int do_tmem_get(struct tmem_pool *pool, struct oid *oidp, uint32_t index, xen_pfn_t cmfn, pagesize_t tmem_offset, pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_param_t clibuf) { struct tmem_object_root *obj; struct tmem_page_descriptor *pgp; struct client *client = pool->client; - DECL_LOCAL_CYC_COUNTER(decompress); int rc; if ( !_atomic_read(pool->pgp_count) ) @@ -1766,10 +1688,8 @@ static NOINLINE int do_tmem_get(struct tmem_pool *pool, struct oid *oidp, uint32 rc = pcd_copy_to_client(cmfn, pgp); else if ( pgp->size != 0 ) { - START_CYC_COUNTER(decompress); rc = tmem_decompress_to_client(cmfn, pgp->cdata, pgp->size, clibuf); - END_CYC_COUNTER(decompress); } else rc = tmem_copy_to_client(cmfn, pgp->pfp, tmem_offset, @@ -1818,7 +1738,7 @@ bad_copy: return rc; } -static NOINLINE int do_tmem_flush_page(struct tmem_pool *pool, struct oid *oidp, uint32_t index) +static int do_tmem_flush_page(struct tmem_pool *pool, struct oid *oidp, uint32_t index) { struct tmem_object_root *obj; struct tmem_page_descriptor *pgp; @@ -1853,7 +1773,7 @@ out: return 1; } -static NOINLINE int do_tmem_flush_object(struct tmem_pool *pool, struct oid *oidp) +static int do_tmem_flush_object(struct tmem_pool *pool, struct oid *oidp) { struct tmem_object_root *obj; @@ -1873,7 +1793,7 @@ out: return 1; } -static NOINLINE int do_tmem_destroy_pool(uint32_t pool_id) +static int do_tmem_destroy_pool(uint32_t pool_id) { struct client *client = tmem_client_from_current(); struct tmem_pool *pool; @@ -1889,7 +1809,7 @@ static NOINLINE int do_tmem_destroy_pool(uint32_t pool_id) return 1; } -static NOINLINE int do_tmem_new_pool(domid_t this_cli_id, +static int do_tmem_new_pool(domid_t this_cli_id, uint32_t d_poolid, uint32_t flags, uint64_t uuid_lo, uint64_t uuid_hi) { @@ -2169,7 +2089,6 @@ static int tmemc_list_shared(tmem_cli_va_param_t buf, int off, uint32_t len, return sum; } -#ifdef TMEM_PERF static int tmemc_list_global_perf(tmem_cli_va_param_t buf, int off, uint32_t len, bool_t use_long) { @@ -2177,15 +2096,6 @@ static int tmemc_list_global_perf(tmem_cli_va_param_t buf, int off, int n = 0, sum = 0; n = scnprintf(info+n,BSIZE-n,"T="); - n += SCNPRINTF_CYC_COUNTER(info+n,BSIZE-n,succ_get,"G"); - n += SCNPRINTF_CYC_COUNTER(info+n,BSIZE-n,succ_put,"P"); - n += SCNPRINTF_CYC_COUNTER(info+n,BSIZE-n,non_succ_get,"g"); - n += SCNPRINTF_CYC_COUNTER(info+n,BSIZE-n,non_succ_put,"p"); - n += SCNPRINTF_CYC_COUNTER(info+n,BSIZE-n,flush,"F"); - n += SCNPRINTF_CYC_COUNTER(info+n,BSIZE-n,flush_obj,"O"); - n += SCNPRINTF_CYC_COUNTER(info+n,BSIZE-n,pg_copy,"C"); - n += SCNPRINTF_CYC_COUNTER(info+n,BSIZE-n,compress,"c"); - n += SCNPRINTF_CYC_COUNTER(info+n,BSIZE-n,decompress,"d"); n--; /* overwrite trailing comma */ n += scnprintf(info+n,BSIZE-n,"\n"); if ( sum + n >= len ) @@ -2194,9 +2104,6 @@ static int tmemc_list_global_perf(tmem_cli_va_param_t buf, int off, sum += n; return sum; } -#else -#define tmemc_list_global_perf(_buf,_off,_len,_use) (0) -#endif static int tmemc_list_global(tmem_cli_va_param_t buf, int off, uint32_t len, bool_t use_long) @@ -2304,7 +2211,7 @@ static int tmemc_set_var(domid_t cli_id, uint32_t subop, uint32_t arg1) return 0; } -static NOINLINE int tmemc_shared_pool_auth(domid_t cli_id, uint64_t uuid_lo, +static int tmemc_shared_pool_auth(domid_t cli_id, uint64_t uuid_lo, uint64_t uuid_hi, bool_t auth) { struct client *client; @@ -2341,7 +2248,7 @@ static NOINLINE int tmemc_shared_pool_auth(domid_t cli_id, uint64_t uuid_lo, return 1; } -static NOINLINE int tmemc_save_subop(int cli_id, uint32_t pool_id, +static int tmemc_save_subop(int cli_id, uint32_t pool_id, uint32_t subop, tmem_cli_va_param_t buf, uint32_t arg1) { struct client *client = tmem_client_from_cli_id(cli_id); @@ -2430,7 +2337,7 @@ static NOINLINE int tmemc_save_subop(int cli_id, uint32_t pool_id, return rc; } -static NOINLINE int tmemc_save_get_next_page(int cli_id, uint32_t pool_id, +static int tmemc_save_get_next_page(int cli_id, uint32_t pool_id, tmem_cli_va_param_t buf, uint32_t bufsize) { struct client *client = tmem_client_from_cli_id(cli_id); @@ -2485,7 +2392,7 @@ out: return ret; } -static NOINLINE int tmemc_save_get_next_inv(int cli_id, tmem_cli_va_param_t buf, +static int tmemc_save_get_next_inv(int cli_id, tmem_cli_va_param_t buf, uint32_t bufsize) { struct client *client = tmem_client_from_cli_id(cli_id); @@ -2551,7 +2458,7 @@ static int tmemc_restore_flush_page(int cli_id, uint32_t pool_id, struct oid *oi return do_tmem_flush_page(pool,oidp,index); } -static NOINLINE int do_tmem_control(struct tmem_op *op) +static int do_tmem_control(struct tmem_op *op) { int ret; uint32_t pool_id = op->pool_id; @@ -2638,12 +2545,6 @@ EXPORT long do_tmem_op(tmem_cli_op_t uops) bool_t non_succ_get = 0, non_succ_put = 0; bool_t flush = 0, flush_obj = 0; bool_t tmem_write_lock_set = 0, tmem_read_lock_set = 0; - DECL_LOCAL_CYC_COUNTER(succ_get); - DECL_LOCAL_CYC_COUNTER(succ_put); - DECL_LOCAL_CYC_COUNTER(non_succ_get); - DECL_LOCAL_CYC_COUNTER(non_succ_put); - DECL_LOCAL_CYC_COUNTER(flush); - DECL_LOCAL_CYC_COUNTER(flush_obj); if ( !tmem_initialized ) return -ENODEV; @@ -2661,13 +2562,6 @@ EXPORT long do_tmem_op(tmem_cli_op_t uops) spin_lock(&tmem_spinlock); } - START_CYC_COUNTER(succ_get); - DUP_START_CYC_COUNTER(succ_put,succ_get); - DUP_START_CYC_COUNTER(non_succ_get,succ_get); - DUP_START_CYC_COUNTER(non_succ_put,succ_get); - DUP_START_CYC_COUNTER(flush,succ_get); - DUP_START_CYC_COUNTER(flush_obj,succ_get); - if ( client != NULL && tmem_client_is_dying(client) ) { rc = -ENODEV; @@ -2743,7 +2637,6 @@ EXPORT long do_tmem_op(tmem_cli_op_t uops) rc = -ENODEV; goto out; } - ASSERT_SENTINEL(pool,POOL); } oidp = (struct oid *)&op.u.gen.oid[0]; @@ -2808,19 +2701,6 @@ EXPORT long do_tmem_op(tmem_cli_op_t uops) out: if ( rc < 0 ) errored_tmem_ops++; - if ( succ_get ) - END_CYC_COUNTER_CLI(succ_get,client); - else if ( succ_put ) - END_CYC_COUNTER_CLI(succ_put,client); - else if ( non_succ_get ) - END_CYC_COUNTER_CLI(non_succ_get,client); - else if ( non_succ_put ) - END_CYC_COUNTER_CLI(non_succ_put,client); - else if ( flush ) - END_CYC_COUNTER_CLI(flush,client); - else if ( flush_obj ) - END_CYC_COUNTER_CLI(flush_obj,client); - if ( tmem_lock_all ) { if ( tmem_lock_all > 1 ) diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c index d6e2e0d..1e002ae 100644 --- a/xen/common/tmem_xen.c +++ b/xen/common/tmem_xen.c @@ -36,8 +36,6 @@ integer_param("tmem_lock", opt_tmem_lock); EXPORT atomic_t freeable_page_count = ATOMIC_INIT(0); -DECL_CYC_COUNTER(pg_copy); - /* these are a concurrency bottleneck, could be percpu and dynamically * allocated iff opt_tmem_compress */ #define LZO_WORKMEM_BYTES LZO1X_1_MEM_COMPRESS @@ -48,10 +46,7 @@ static DEFINE_PER_CPU_READ_MOSTLY(void *, scratch_page); void tmem_copy_page(char *to, char*from) { - DECL_LOCAL_CYC_COUNTER(pg_copy); - START_CYC_COUNTER(pg_copy); memcpy(to, from, PAGE_SIZE); - END_CYC_COUNTER(pg_copy); } #if defined(CONFIG_ARM) diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h index 034fd5c..4e4c227 100644 --- a/xen/include/xen/tmem_xen.h +++ b/xen/include/xen/tmem_xen.h @@ -399,55 +399,4 @@ extern void tmem_persistent_pool_page_put(void *page_va); #define tmem_client_warn(fmt, args...) printk(XENLOG_G_WARNING fmt, ##args) #define tmem_client_info(fmt, args...) printk(XENLOG_G_INFO fmt, ##args) -#define TMEM_PERF -#ifdef TMEM_PERF -#define DECL_CYC_COUNTER(x) \ - uint64_t x##_sum_cycles = 0, x##_count = 0; \ - uint32_t x##_min_cycles = 0x7fffffff, x##_max_cycles = 0; -#define EXTERN_CYC_COUNTER(x) \ - extern uint64_t x##_sum_cycles, x##_count; \ - extern uint32_t x##_min_cycles, x##_max_cycles; -#define DECL_LOCAL_CYC_COUNTER(x) \ - int64_t x##_start = 0 -#define START_CYC_COUNTER(x) x##_start = get_cycles() -#define DUP_START_CYC_COUNTER(x,y) x##_start = y##_start -/* following might race, but since its advisory only, don''t care */ -#define END_CYC_COUNTER(x) \ - do { \ - x##_start = get_cycles() - x##_start; \ - if (x##_start > 0 && x##_start < 1000000000) { \ - x##_sum_cycles += x##_start; x##_count++; \ - if ((uint32_t)x##_start < x##_min_cycles) x##_min_cycles = x##_start; \ - if ((uint32_t)x##_start > x##_max_cycles) x##_max_cycles = x##_start; \ - } \ - } while (0) -#define END_CYC_COUNTER_CLI(x,y) \ - do { \ - x##_start = get_cycles() - x##_start; \ - if (x##_start > 0 && x##_start < 1000000000) { \ - x##_sum_cycles += x##_start; x##_count++; \ - if ((uint32_t)x##_start < x##_min_cycles) x##_min_cycles = x##_start; \ - if ((uint32_t)x##_start > x##_max_cycles) x##_max_cycles = x##_start; \ - y->total_cycles += x##_start; \ - } \ - } while (0) -#define RESET_CYC_COUNTER(x) { x##_sum_cycles = 0, x##_count = 0; \ - x##_min_cycles = 0x7fffffff, x##_max_cycles = 0; } -#define SCNPRINTF_CYC_COUNTER(buf,size,x,tag) \ - scnprintf(buf,size, \ - tag"n:%"PRIu64","tag"t:%"PRIu64","tag"x:%"PRId32","tag"m:%"PRId32",", \ - x##_count,x##_sum_cycles,x##_max_cycles,x##_min_cycles) -#else -#define DECL_CYC_COUNTER(x) -#define EXTERN_CYC_COUNTER(x) \ - extern uint64_t x##_sum_cycles, x##_count; \ - extern uint32_t x##_min_cycles, x##_max_cycles; -#define DECL_LOCAL_CYC_COUNTER(x) do { } while (0) -#define START_CYC_COUNTER(x) do { } while (0) -#define DUP_START_CYC_COUNTER(x) do { } while (0) -#define END_CYC_COUNTER(x) do { } while (0) -#define SCNPRINTF_CYC_COUNTER(buf,size,x,tag) (0) -#define RESET_CYC_COUNTER(x) do { } while (0) -#endif - #endif /* __XEN_TMEM_XEN_H__ */ -- 1.7.10.4
Bob Liu
2013-Nov-20 08:46 UTC
[PATCH 02/16] tmem: cleanup: drop useless function ''tmem_copy_page''
Use memcpy directly. Signed-off-by: Bob Liu <bob.liu@oracle.com> --- xen/common/tmem_xen.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c index 1e002ae..af67703 100644 --- a/xen/common/tmem_xen.c +++ b/xen/common/tmem_xen.c @@ -44,11 +44,6 @@ static DEFINE_PER_CPU_READ_MOSTLY(unsigned char *, workmem); static DEFINE_PER_CPU_READ_MOSTLY(unsigned char *, dstmem); static DEFINE_PER_CPU_READ_MOSTLY(void *, scratch_page); -void tmem_copy_page(char *to, char*from) -{ - memcpy(to, from, PAGE_SIZE); -} - #if defined(CONFIG_ARM) static inline void *cli_get_page(xen_pfn_t cmfn, unsigned long *pcli_mfn, struct page_info **pcli_pfp, bool_t cli_write) @@ -135,7 +130,7 @@ EXPORT int tmem_copy_from_client(struct page_info *pfp, } smp_mb(); if ( len == PAGE_SIZE && !tmem_offset && !pfn_offset && cli_va ) - tmem_copy_page(tmem_va, cli_va); + memcpy(tmem_va, cli_va, PAGE_SIZE); else if ( (tmem_offset+len <= PAGE_SIZE) && (pfn_offset+len <= PAGE_SIZE) ) { @@ -206,7 +201,7 @@ EXPORT int tmem_copy_to_client(xen_pfn_t cmfn, struct page_info *pfp, tmem_mfn = page_to_mfn(pfp); tmem_va = map_domain_page(tmem_mfn); if ( len == PAGE_SIZE && !tmem_offset && !pfn_offset && cli_va ) - tmem_copy_page(cli_va, tmem_va); + memcpy(cli_va, tmem_va, PAGE_SIZE); else if ( (tmem_offset+len <= PAGE_SIZE) && (pfn_offset+len <= PAGE_SIZE) ) { if ( cli_va ) -- 1.7.10.4
TMEM_READ/TMEM_WRITE/TMEM_XCHG/TMEM_NEW_PAGE are never be used. Signed-off-by: Bob Liu <bob.liu@oracle.com> --- xen/common/tmem.c | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/xen/common/tmem.c b/xen/common/tmem.c index 081772e..c272930 100644 --- a/xen/common/tmem.c +++ b/xen/common/tmem.c @@ -2646,11 +2646,6 @@ EXPORT long do_tmem_op(tmem_cli_op_t uops) rc = do_tmem_new_pool(TMEM_CLI_ID_NULL, 0, op.u.creat.flags, op.u.creat.uuid[0], op.u.creat.uuid[1]); break; - case TMEM_NEW_PAGE: - tmem_ensure_avail_pages(); - rc = do_tmem_put(pool, oidp, op.u.gen.index, op.u.gen.cmfn, 0, 0, 0, - tmem_cli_buf_null); - break; case TMEM_PUT_PAGE: tmem_ensure_avail_pages(); rc = do_tmem_put(pool, oidp, op.u.gen.index, op.u.gen.cmfn, 0, 0, @@ -2676,22 +2671,6 @@ EXPORT long do_tmem_op(tmem_cli_op_t uops) flush = 1; rc = do_tmem_destroy_pool(op.pool_id); break; - case TMEM_READ: - rc = do_tmem_get(pool, oidp, op.u.gen.index, op.u.gen.cmfn, - op.u.gen.tmem_offset, op.u.gen.pfn_offset, - op.u.gen.len, tmem_cli_buf_null); - break; - case TMEM_WRITE: - rc = do_tmem_put(pool, oidp, - op.u.gen.index, op.u.gen.cmfn, - op.u.gen.tmem_offset, op.u.gen.pfn_offset, - op.u.gen.len, tmem_cli_buf_null); - break; - case TMEM_XCHG: - /* need to hold global lock to ensure xchg is atomic */ - tmem_client_warn("tmem_xchg op not implemented yet\n"); - rc = 0; - break; default: tmem_client_warn("tmem: op %d not implemented\n", op.cmd); rc = 0; -- 1.7.10.4
Bob Liu
2013-Nov-20 08:46 UTC
[PATCH 04/16] tmem: cleanup: rm unneeded parameters from put path
tmem only takes a page as unit, so parameters tmem_offset, pfn_offset and len are meanless, this patch remove those parameters from do_tmem_put() related path Signed-off-by: Bob Liu <bob.liu@oracle.com> --- xen/common/tmem.c | 32 +++++++++++++++----------------- xen/common/tmem_xen.c | 27 +++++---------------------- xen/include/xen/tmem_xen.h | 3 +-- 3 files changed, 21 insertions(+), 41 deletions(-) diff --git a/xen/common/tmem.c b/xen/common/tmem.c index c272930..e2e69bf 100644 --- a/xen/common/tmem.c +++ b/xen/common/tmem.c @@ -1421,7 +1421,6 @@ out: } static int do_tmem_dup_put(struct tmem_page_descriptor *pgp, xen_pfn_t cmfn, - pagesize_t tmem_offset, pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_param_t clibuf) { struct tmem_pool *pool; @@ -1442,7 +1441,7 @@ static int do_tmem_dup_put(struct tmem_page_descriptor *pgp, xen_pfn_t cmfn, if ( client->live_migrating ) goto failed_dup; /* no dups allowed when migrating */ /* can we successfully manipulate pgp to change out the data? */ - if ( len != 0 && client->compress && pgp->size != 0 ) + if ( client->compress && pgp->size != 0 ) { ret = do_tmem_put_compress(pgp, cmfn, clibuf); if ( ret == 1 ) @@ -1461,9 +1460,7 @@ copy_uncompressed: if ( ( pgp->pfp = tmem_page_alloc(pool) ) == NULL ) goto failed_dup; pgp->size = 0; - /* tmem_copy_from_client properly handles len==0 and offsets != 0 */ - ret = tmem_copy_from_client(pgp->pfp, cmfn, tmem_offset, pfn_offset, len, - tmem_cli_buf_null); + ret = tmem_copy_from_client(pgp->pfp, cmfn, tmem_cli_buf_null); if ( ret < 0 ) goto bad_copy; if ( tmem_dedup_enabled() && !is_persistent(pool) ) @@ -1509,11 +1506,9 @@ cleanup: return ret; } - static int do_tmem_put(struct tmem_pool *pool, struct oid *oidp, uint32_t index, - xen_pfn_t cmfn, pagesize_t tmem_offset, - pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_param_t clibuf) + xen_pfn_t cmfn, tmem_cli_va_param_t clibuf) { struct tmem_object_root *obj = NULL, *objfound = NULL, *objnew = NULL; struct tmem_page_descriptor *pgp = NULL, *pgpdel = NULL; @@ -1527,8 +1522,7 @@ static int do_tmem_put(struct tmem_pool *pool, { ASSERT_SPINLOCK(&objfound->obj_spinlock); if ((pgp = pgp_lookup_in_obj(objfound, index)) != NULL) - return do_tmem_dup_put(pgp, cmfn, tmem_offset, pfn_offset, len, - clibuf); + return do_tmem_dup_put(pgp, cmfn, clibuf); } /* no puts allowed into a frozen pool (except dup puts) */ @@ -1560,7 +1554,7 @@ static int do_tmem_put(struct tmem_pool *pool, pgp->index = index; pgp->size = 0; - if ( len != 0 && client->compress ) + if ( client->compress ) { ASSERT(pgp->pfp == NULL); ret = do_tmem_put_compress(pgp, cmfn, clibuf); @@ -1586,9 +1580,7 @@ copy_uncompressed: ret = -ENOMEM; goto delete_and_free; } - /* tmem_copy_from_client properly handles len==0 (TMEM_NEW_PAGE) */ - ret = tmem_copy_from_client(pgp->pfp, cmfn, tmem_offset, pfn_offset, len, - clibuf); + ret = tmem_copy_from_client(pgp->pfp, cmfn, clibuf); if ( ret < 0 ) goto bad_copy; if ( tmem_dedup_enabled() && !is_persistent(pool) ) @@ -2440,10 +2432,17 @@ static int tmemc_restore_put_page(int cli_id, uint32_t pool_id, struct oid *oidp struct client *client = tmem_client_from_cli_id(cli_id); struct tmem_pool *pool = (client == NULL || pool_id >= MAX_POOLS_PER_DOMAIN) ? NULL : client->pools[pool_id]; + unsigned int pagesize; if ( pool == NULL ) return -1; - return do_tmem_put(pool, oidp, index, 0, 0, 0, bufsize, buf); + pagesize = 1 << (pool->pageshift + 12); + if (bufsize != pagesize) { + tmem_client_err("tmem: %s bufsize %d not page aligend(%d)\n", __func__, + bufsize, pagesize); + return -1; + } + return do_tmem_put(pool, oidp, index, 0, buf); } static int tmemc_restore_flush_page(int cli_id, uint32_t pool_id, struct oid *oidp, @@ -2648,8 +2647,7 @@ EXPORT long do_tmem_op(tmem_cli_op_t uops) break; case TMEM_PUT_PAGE: tmem_ensure_avail_pages(); - rc = do_tmem_put(pool, oidp, op.u.gen.index, op.u.gen.cmfn, 0, 0, - PAGE_SIZE, tmem_cli_buf_null); + rc = do_tmem_put(pool, oidp, op.u.gen.index, op.u.gen.cmfn, tmem_cli_buf_null); if (rc == 1) succ_put = 1; else non_succ_put = 1; break; diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c index af67703..165c7cf 100644 --- a/xen/common/tmem_xen.c +++ b/xen/common/tmem_xen.c @@ -100,25 +100,16 @@ static inline void cli_put_page(void *cli_va, struct page_info *cli_pfp, #endif EXPORT int tmem_copy_from_client(struct page_info *pfp, - xen_pfn_t cmfn, pagesize_t tmem_offset, - pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_param_t clibuf) + xen_pfn_t cmfn, tmem_cli_va_param_t clibuf) { unsigned long tmem_mfn, cli_mfn = 0; char *tmem_va, *cli_va = NULL; struct page_info *cli_pfp = NULL; int rc = 1; - if ( tmem_offset > PAGE_SIZE || pfn_offset > PAGE_SIZE || len > PAGE_SIZE ) - return -EINVAL; ASSERT(pfp != NULL); tmem_mfn = page_to_mfn(pfp); tmem_va = map_domain_page(tmem_mfn); - if ( tmem_offset == 0 && pfn_offset == 0 && len == 0 ) - { - memset(tmem_va, 0, PAGE_SIZE); - unmap_domain_page(tmem_va); - return 1; - } if ( guest_handle_is_null(clibuf) ) { cli_va = cli_get_page(cmfn, &cli_mfn, &cli_pfp, 0); @@ -129,21 +120,13 @@ EXPORT int tmem_copy_from_client(struct page_info *pfp, } } smp_mb(); - if ( len == PAGE_SIZE && !tmem_offset && !pfn_offset && cli_va ) - memcpy(tmem_va, cli_va, PAGE_SIZE); - else if ( (tmem_offset+len <= PAGE_SIZE) && - (pfn_offset+len <= PAGE_SIZE) ) + if ( cli_va ) { - if ( cli_va ) - memcpy(tmem_va + tmem_offset, cli_va + pfn_offset, len); - else if ( copy_from_guest_offset(tmem_va + tmem_offset, clibuf, - pfn_offset, len) ) - rc = -EFAULT; + memcpy(tmem_va, cli_va, PAGE_SIZE); + cli_put_page(cli_va, cli_pfp, cli_mfn, 0); } - else if ( len ) + else rc = -EINVAL; - if ( cli_va ) - cli_put_page(cli_va, cli_pfp, cli_mfn, 0); unmap_domain_page(tmem_va); return rc; } diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h index 4e4c227..25ce268 100644 --- a/xen/include/xen/tmem_xen.h +++ b/xen/include/xen/tmem_xen.h @@ -385,8 +385,7 @@ int tmem_decompress_to_client(xen_pfn_t, void *, size_t, int tmem_compress_from_client(xen_pfn_t, void **, size_t *, tmem_cli_va_param_t); -int tmem_copy_from_client(struct page_info *, xen_pfn_t, pagesize_t tmem_offset, - pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_param_t); +int tmem_copy_from_client(struct page_info *, xen_pfn_t, tmem_cli_va_param_t); int tmem_copy_to_client(xen_pfn_t, struct page_info *, pagesize_t tmem_offset, pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_param_t); -- 1.7.10.4
Bob Liu
2013-Nov-20 08:46 UTC
[PATCH 05/16] tmem: cleanup: rm unneeded parameters from get path
tmem only takes a page as unit, so parameters tmem_offset, pfn_offset and len are meanless, this patch remove them from do_tmem_get() related path. Signed-off-by: Bob Liu <bob.liu@oracle.com> --- xen/common/tmem.c | 19 ++++++++----------- xen/common/tmem_xen.c | 18 ++++-------------- xen/include/xen/tmem_xen.h | 3 +-- 3 files changed, 13 insertions(+), 27 deletions(-) diff --git a/xen/common/tmem.c b/xen/common/tmem.c index e2e69bf..de3559d 100644 --- a/xen/common/tmem.c +++ b/xen/common/tmem.c @@ -330,8 +330,7 @@ static int pcd_copy_to_client(xen_pfn_t cmfn, struct tmem_page_descriptor *pgp) else if ( tmem_tze_enabled() && pcd->size < PAGE_SIZE ) ret = tmem_copy_tze_to_client(cmfn, pcd->tze, pcd->size); else - ret = tmem_copy_to_client(cmfn, pcd->pfp, 0, 0, PAGE_SIZE, - tmem_cli_buf_null); + ret = tmem_copy_to_client(cmfn, pcd->pfp, tmem_cli_buf_null); tmem_read_unlock(&pcd_tree_rwlocks[firstbyte]); return ret; } @@ -1647,8 +1646,7 @@ free: } static int do_tmem_get(struct tmem_pool *pool, struct oid *oidp, uint32_t index, - xen_pfn_t cmfn, pagesize_t tmem_offset, - pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_param_t clibuf) + xen_pfn_t cmfn, tmem_cli_va_param_t clibuf) { struct tmem_object_root *obj; struct tmem_page_descriptor *pgp; @@ -1680,12 +1678,10 @@ static int do_tmem_get(struct tmem_pool *pool, struct oid *oidp, uint32_t index, rc = pcd_copy_to_client(cmfn, pgp); else if ( pgp->size != 0 ) { - rc = tmem_decompress_to_client(cmfn, pgp->cdata, - pgp->size, clibuf); + rc = tmem_decompress_to_client(cmfn, pgp->cdata, pgp->size, clibuf); } else - rc = tmem_copy_to_client(cmfn, pgp->pfp, tmem_offset, - pfn_offset, len, clibuf); + rc = tmem_copy_to_client(cmfn, pgp->pfp, clibuf); if ( rc <= 0 ) goto bad_copy; @@ -2377,7 +2373,7 @@ static int tmemc_save_get_next_page(int cli_id, uint32_t pool_id, h.index = pgp->index; tmem_copy_to_client_buf(buf, &h, 1); tmem_client_buf_add(buf, sizeof(h)); - ret = do_tmem_get(pool, &oid, pgp->index, 0, 0, 0, pagesize, buf); + ret = do_tmem_get(pool, &oid, pgp->index, 0, buf); out: tmem_spin_unlock(&pers_lists_spinlock); @@ -2647,13 +2643,14 @@ EXPORT long do_tmem_op(tmem_cli_op_t uops) break; case TMEM_PUT_PAGE: tmem_ensure_avail_pages(); - rc = do_tmem_put(pool, oidp, op.u.gen.index, op.u.gen.cmfn, tmem_cli_buf_null); + rc = do_tmem_put(pool, oidp, op.u.gen.index, op.u.gen.cmfn, + tmem_cli_buf_null); if (rc == 1) succ_put = 1; else non_succ_put = 1; break; case TMEM_GET_PAGE: rc = do_tmem_get(pool, oidp, op.u.gen.index, op.u.gen.cmfn, - 0, 0, PAGE_SIZE, tmem_cli_buf_null); + tmem_cli_buf_null); if (rc == 1) succ_get = 1; else non_succ_get = 1; break; diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c index 165c7cf..efc2259 100644 --- a/xen/common/tmem_xen.c +++ b/xen/common/tmem_xen.c @@ -164,7 +164,6 @@ EXPORT int tmem_compress_from_client(xen_pfn_t cmfn, } EXPORT int tmem_copy_to_client(xen_pfn_t cmfn, struct page_info *pfp, - pagesize_t tmem_offset, pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_param_t clibuf) { unsigned long tmem_mfn, cli_mfn = 0; @@ -172,8 +171,6 @@ EXPORT int tmem_copy_to_client(xen_pfn_t cmfn, struct page_info *pfp, struct page_info *cli_pfp = NULL; int rc = 1; - if ( tmem_offset > PAGE_SIZE || pfn_offset > PAGE_SIZE || len > PAGE_SIZE ) - return -EINVAL; ASSERT(pfp != NULL); if ( guest_handle_is_null(clibuf) ) { @@ -183,21 +180,14 @@ EXPORT int tmem_copy_to_client(xen_pfn_t cmfn, struct page_info *pfp, } tmem_mfn = page_to_mfn(pfp); tmem_va = map_domain_page(tmem_mfn); - if ( len == PAGE_SIZE && !tmem_offset && !pfn_offset && cli_va ) - memcpy(cli_va, tmem_va, PAGE_SIZE); - else if ( (tmem_offset+len <= PAGE_SIZE) && (pfn_offset+len <= PAGE_SIZE) ) + if ( cli_va ) { - if ( cli_va ) - memcpy(cli_va + pfn_offset, tmem_va + tmem_offset, len); - else if ( copy_to_guest_offset(clibuf, pfn_offset, - tmem_va + tmem_offset, len) ) - rc = -EFAULT; + memcpy(cli_va, tmem_va, PAGE_SIZE); + cli_put_page(cli_va, cli_pfp, cli_mfn, 1); } - else if ( len ) + else rc = -EINVAL; unmap_domain_page(tmem_va); - if ( cli_va ) - cli_put_page(cli_va, cli_pfp, cli_mfn, 1); smp_mb(); return rc; } diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h index 25ce268..ae6acf3 100644 --- a/xen/include/xen/tmem_xen.h +++ b/xen/include/xen/tmem_xen.h @@ -387,8 +387,7 @@ int tmem_compress_from_client(xen_pfn_t, void **, size_t *, int tmem_copy_from_client(struct page_info *, xen_pfn_t, tmem_cli_va_param_t); -int tmem_copy_to_client(xen_pfn_t, struct page_info *, pagesize_t tmem_offset, - pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_param_t); +int tmem_copy_to_client(xen_pfn_t, struct page_info *, tmem_cli_va_param_t); extern int tmem_copy_tze_to_client(xen_pfn_t cmfn, void *tmem_va, pagesize_t len); extern void *tmem_persistent_pool_page_get(unsigned long size); -- 1.7.10.4
Reorg code logic of do_tmem_put() to make it more readable and also drop useless local parameters. Signed-off-by: Bob Liu <bob.liu@oracle.com> --- xen/common/tmem.c | 87 ++++++++++++++++++++++++++++++----------------------- 1 file changed, 49 insertions(+), 38 deletions(-) diff --git a/xen/common/tmem.c b/xen/common/tmem.c index de3559d..1c9dd5a 100644 --- a/xen/common/tmem.c +++ b/xen/common/tmem.c @@ -1509,50 +1509,56 @@ static int do_tmem_put(struct tmem_pool *pool, struct oid *oidp, uint32_t index, xen_pfn_t cmfn, tmem_cli_va_param_t clibuf) { - struct tmem_object_root *obj = NULL, *objfound = NULL, *objnew = NULL; - struct tmem_page_descriptor *pgp = NULL, *pgpdel = NULL; - struct client *client = pool->client; - int ret = client->frozen ? -EFROZEN : -ENOMEM; + struct tmem_object_root *obj = NULL; + struct tmem_page_descriptor *pgp = NULL; + struct client *client; + int ret, newobj = 0; ASSERT(pool != NULL); + client = pool->client; + ret = client->frozen ? -EFROZEN : -ENOMEM; pool->puts++; /* does page already exist (dup)? if so, handle specially */ - if ( (obj = objfound = obj_find(pool,oidp)) != NULL ) + if ( (obj = obj_find(pool,oidp)) != NULL ) { - ASSERT_SPINLOCK(&objfound->obj_spinlock); - if ((pgp = pgp_lookup_in_obj(objfound, index)) != NULL) + if ((pgp = pgp_lookup_in_obj(obj, index)) != NULL) + { return do_tmem_dup_put(pgp, cmfn, clibuf); + } + else + { + /* no puts allowed into a frozen pool (except dup puts) */ + if ( client->frozen ) + goto unlock_obj; + } } - - /* no puts allowed into a frozen pool (except dup puts) */ - if ( client->frozen ) - goto free; - - if ( (objfound == NULL) ) + else { + /* no puts allowed into a frozen pool (except dup puts) */ + if ( client->frozen ) + return ret; tmem_write_lock(&pool->pool_rwlock); - if ( (obj = objnew = obj_new(pool,oidp)) == NULL ) + if ( (obj = obj_new(pool,oidp)) == NULL ) { tmem_write_unlock(&pool->pool_rwlock); return -ENOMEM; } - ASSERT_SPINLOCK(&objnew->obj_spinlock); + newobj= 1; tmem_write_unlock(&pool->pool_rwlock); } - ASSERT((obj != NULL)&&((objnew==obj)||(objfound==obj))&&(objnew!=objfound)); + /* When arrive here, we have a spinlocked obj for use */ ASSERT_SPINLOCK(&obj->obj_spinlock); if ( (pgp = pgp_alloc(obj)) == NULL ) - goto free; + goto unlock_obj; ret = pgp_add_to_obj(obj, index, pgp); if ( ret == -ENOMEM ) /* warning, may result in partially built radix tree ("stump") */ - goto free; - ASSERT(ret != -EEXIST); + goto free_pgp; + pgp->index = index; pgp->size = 0; - if ( client->compress ) { ASSERT(pgp->pfp == NULL); @@ -1562,7 +1568,7 @@ static int do_tmem_put(struct tmem_pool *pool, if ( ret == -ENOMEM ) { client->compress_nomem++; - goto delete_and_free; + goto del_pgp_from_obj; } if ( ret == 0 ) { @@ -1577,15 +1583,16 @@ copy_uncompressed: if ( ( pgp->pfp = tmem_page_alloc(pool) ) == NULL ) { ret = -ENOMEM; - goto delete_and_free; + goto del_pgp_from_obj; } ret = tmem_copy_from_client(pgp->pfp, cmfn, clibuf); if ( ret < 0 ) goto bad_copy; + if ( tmem_dedup_enabled() && !is_persistent(pool) ) { if ( pcd_associate(pgp,NULL,0) == -ENOMEM ) - goto delete_and_free; + goto del_pgp_from_obj; } insert_page: @@ -1601,18 +1608,23 @@ insert_page: if (++client->eph_count > client->eph_count_max) client->eph_count_max = client->eph_count; tmem_spin_unlock(&eph_lists_spinlock); - } else { /* is_persistent */ + } + else + { /* is_persistent */ tmem_spin_lock(&pers_lists_spinlock); list_add_tail(&pgp->us.pool_pers_pages, &pool->persistent_page_list); tmem_spin_unlock(&pers_lists_spinlock); } - ASSERT( ((objnew==obj)||(objfound==obj)) && (objnew!=objfound)); + if ( is_shared(pool) ) obj->last_client = client->cli_id; obj->no_evict = 0; + + /* free the obj spinlock */ tmem_spin_unlock(&obj->obj_spinlock); pool->good_puts++; + if ( is_persistent(pool) ) client->succ_pers_puts++; else @@ -1622,25 +1634,24 @@ insert_page: bad_copy: failed_copies++; -delete_and_free: +del_pgp_from_obj: ASSERT((obj != NULL) && (pgp != NULL) && (pgp->index != -1)); - pgpdel = pgp_delete_from_obj(obj, pgp->index); - ASSERT(pgp == pgpdel); + pgp_delete_from_obj(obj, pgp->index); -free: - if ( pgp ) - pgp_delete(pgp,0); - if ( objfound ) - { - objfound->no_evict = 0; - tmem_spin_unlock(&objfound->obj_spinlock); - } - if ( objnew ) +free_pgp: + pgp_delete(pgp,0); +unlock_obj: + if ( newobj ) { tmem_write_lock(&pool->pool_rwlock); - obj_free(objnew,0); + obj_free(obj,0); tmem_write_unlock(&pool->pool_rwlock); } + else + { + obj->no_evict = 0; + tmem_spin_unlock(&obj->obj_spinlock); + } pool->no_mem_puts++; return ret; } -- 1.7.10.4
Bob Liu
2013-Nov-20 08:46 UTC
[PATCH 07/16] tmem: drop unneeded is_ephemeral() and is_private()
Can use !is_persistent() and !is_shared() to replace them directly. Signed-off-by: Bob Liu <bob.liu@oracle.com> --- xen/common/tmem.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/xen/common/tmem.c b/xen/common/tmem.c index 1c9dd5a..f37e054 100644 --- a/xen/common/tmem.c +++ b/xen/common/tmem.c @@ -126,9 +126,7 @@ struct tmem_pool { }; #define is_persistent(_p) (_p->persistent) -#define is_ephemeral(_p) (!(_p->persistent)) #define is_shared(_p) (_p->shared) -#define is_private(_p) (!(_p->shared)) struct oid { uint64_t oid[3]; @@ -604,7 +602,7 @@ static void pgp_free(struct tmem_page_descriptor *pgp, int from_delete) ASSERT(pgp_lookup_in_obj(pgp->us.obj,pgp->index) == NULL); ASSERT(pgp->us.obj->pool != NULL); pool = pgp->us.obj->pool; - if ( is_ephemeral(pool) ) + if ( !is_persistent(pool) ) { ASSERT(list_empty(&pgp->global_eph_pages)); ASSERT(list_empty(&pgp->us.client_eph_pages)); @@ -643,7 +641,7 @@ static void pgp_delist(struct tmem_page_descriptor *pgp, bool_t no_eph_lock) ASSERT(pgp->us.obj->pool != NULL); client = pgp->us.obj->pool->client; ASSERT(client != NULL); - if ( is_ephemeral(pgp->us.obj->pool) ) + if ( !is_persistent(pgp->us.obj->pool) ) { if ( !no_eph_lock ) tmem_spin_lock(&eph_lists_spinlock); @@ -1596,7 +1594,7 @@ copy_uncompressed: } insert_page: - if ( is_ephemeral(pool) ) + if ( !is_persistent(pool) ) { tmem_spin_lock(&eph_lists_spinlock); list_add_tail(&pgp->global_eph_pages, @@ -1696,9 +1694,9 @@ static int do_tmem_get(struct tmem_pool *pool, struct oid *oidp, uint32_t index, if ( rc <= 0 ) goto bad_copy; - if ( is_ephemeral(pool) ) + if ( !is_persistent(pool) ) { - if ( is_private(pool) ) + if ( !is_shared(pool) ) { pgp_delete(pgp,0); if ( obj->pgp_count == 0 ) @@ -1724,10 +1722,10 @@ static int do_tmem_get(struct tmem_pool *pool, struct oid *oidp, uint32_t index, tmem_spin_unlock(&obj->obj_spinlock); } pool->found_gets++; - if ( is_ephemeral(pool) ) - client->succ_eph_gets++; - else + if ( is_persistent(pool) ) client->succ_pers_gets++; + else + client->succ_eph_gets++; return 1; bad_copy: @@ -2348,7 +2346,7 @@ static int tmemc_save_get_next_page(int cli_id, uint32_t pool_id, struct tmem_handle h; unsigned int pagesize; - if ( pool == NULL || is_ephemeral(pool) ) + if ( pool == NULL || !is_persistent(pool) ) return -1; pagesize = 1 << (pool->pageshift + 12); -- 1.7.10.4
Bob Liu
2013-Nov-20 08:46 UTC
[PATCH 08/16] tmem: cleanup: rm useless EXPORT/FORWARD define
It''s meanless to define EXPORT/FORWARD and nobody use them. Signed-off-by: Bob Liu <bob.liu@oracle.com> --- xen/common/tmem.c | 16 ++++++---------- xen/common/tmem_xen.c | 38 ++++++++++++++++++-------------------- 2 files changed, 24 insertions(+), 30 deletions(-) diff --git a/xen/common/tmem.c b/xen/common/tmem.c index f37e054..6afbdbc 100644 --- a/xen/common/tmem.c +++ b/xen/common/tmem.c @@ -24,9 +24,6 @@ #include <xen/list.h> #include <xen/init.h> -#define EXPORT /* indicates code other modules are dependent upon */ -#define FORWARD - #define TMEM_SPEC_VERSION 1 /* global statistics (none need to be locked) */ @@ -212,8 +209,8 @@ static int tmem_initialized = 0; /************ CONCURRENCY ***********************************************/ -EXPORT DEFINE_SPINLOCK(tmem_spinlock); /* used iff tmem_lock_all */ -EXPORT DEFINE_RWLOCK(tmem_rwlock); /* used iff !tmem_lock_all */ +DEFINE_SPINLOCK(tmem_spinlock); /* used iff tmem_lock_all */ +DEFINE_RWLOCK(tmem_rwlock); /* used iff !tmem_lock_all */ static DEFINE_SPINLOCK(eph_lists_spinlock); /* protects global AND clients */ static DEFINE_SPINLOCK(pers_lists_spinlock); @@ -2538,7 +2535,7 @@ static int do_tmem_control(struct tmem_op *op) /************ EXPORTed FUNCTIONS **************************************/ -EXPORT long do_tmem_op(tmem_cli_op_t uops) +long do_tmem_op(tmem_cli_op_t uops) { struct tmem_op op; struct client *client = tmem_client_from_current(); @@ -2703,7 +2700,7 @@ out: } /* this should be called when the host is destroying a client */ -EXPORT void tmem_destroy(void *v) +void tmem_destroy(void *v) { struct client *client = (struct client *)v; @@ -2732,7 +2729,7 @@ EXPORT void tmem_destroy(void *v) } /* freezing all pools guarantees that no additional memory will be consumed */ -EXPORT void tmem_freeze_all(unsigned char key) +void tmem_freeze_all(unsigned char key) { static int freeze = 0; @@ -2751,8 +2748,7 @@ EXPORT void tmem_freeze_all(unsigned char key) } #define MAX_EVICTS 10 /* should be variable or set via TMEMC_ ?? */ - -EXPORT void *tmem_relinquish_pages(unsigned int order, unsigned int memflags) +void *tmem_relinquish_pages(unsigned int order, unsigned int memflags) { struct page_info *pfp; unsigned long evicts_per_relinq = 0; diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c index efc2259..fbd1acc 100644 --- a/xen/common/tmem_xen.c +++ b/xen/common/tmem_xen.c @@ -14,27 +14,25 @@ #include <xen/cpu.h> #include <xen/init.h> -#define EXPORT /* indicates code other modules are dependent upon */ - -EXPORT bool_t __read_mostly opt_tmem = 0; +bool_t __read_mostly opt_tmem = 0; boolean_param("tmem", opt_tmem); -EXPORT bool_t __read_mostly opt_tmem_compress = 0; +bool_t __read_mostly opt_tmem_compress = 0; boolean_param("tmem_compress", opt_tmem_compress); -EXPORT bool_t __read_mostly opt_tmem_dedup = 0; +bool_t __read_mostly opt_tmem_dedup = 0; boolean_param("tmem_dedup", opt_tmem_dedup); -EXPORT bool_t __read_mostly opt_tmem_tze = 0; +bool_t __read_mostly opt_tmem_tze = 0; boolean_param("tmem_tze", opt_tmem_tze); -EXPORT bool_t __read_mostly opt_tmem_shared_auth = 0; +bool_t __read_mostly opt_tmem_shared_auth = 0; boolean_param("tmem_shared_auth", opt_tmem_shared_auth); -EXPORT int __read_mostly opt_tmem_lock = 0; +int __read_mostly opt_tmem_lock = 0; integer_param("tmem_lock", opt_tmem_lock); -EXPORT atomic_t freeable_page_count = ATOMIC_INIT(0); +atomic_t freeable_page_count = ATOMIC_INIT(0); /* these are a concurrency bottleneck, could be percpu and dynamically * allocated iff opt_tmem_compress */ @@ -99,7 +97,7 @@ static inline void cli_put_page(void *cli_va, struct page_info *cli_pfp, } #endif -EXPORT int tmem_copy_from_client(struct page_info *pfp, +int tmem_copy_from_client(struct page_info *pfp, xen_pfn_t cmfn, tmem_cli_va_param_t clibuf) { unsigned long tmem_mfn, cli_mfn = 0; @@ -131,7 +129,7 @@ EXPORT int tmem_copy_from_client(struct page_info *pfp, return rc; } -EXPORT int tmem_compress_from_client(xen_pfn_t cmfn, +int tmem_compress_from_client(xen_pfn_t cmfn, void **out_va, size_t *out_len, tmem_cli_va_param_t clibuf) { int ret = 0; @@ -163,7 +161,7 @@ EXPORT int tmem_compress_from_client(xen_pfn_t cmfn, return 1; } -EXPORT int tmem_copy_to_client(xen_pfn_t cmfn, struct page_info *pfp, +int tmem_copy_to_client(xen_pfn_t cmfn, struct page_info *pfp, tmem_cli_va_param_t clibuf) { unsigned long tmem_mfn, cli_mfn = 0; @@ -192,7 +190,7 @@ EXPORT int tmem_copy_to_client(xen_pfn_t cmfn, struct page_info *pfp, return rc; } -EXPORT int tmem_decompress_to_client(xen_pfn_t cmfn, void *tmem_va, +int tmem_decompress_to_client(xen_pfn_t cmfn, void *tmem_va, size_t size, tmem_cli_va_param_t clibuf) { unsigned long cli_mfn = 0; @@ -221,7 +219,7 @@ EXPORT int tmem_decompress_to_client(xen_pfn_t cmfn, void *tmem_va, return 1; } -EXPORT int tmem_copy_tze_to_client(xen_pfn_t cmfn, void *tmem_va, +int tmem_copy_tze_to_client(xen_pfn_t cmfn, void *tmem_va, pagesize_t len) { void *cli_va; @@ -245,12 +243,12 @@ EXPORT int tmem_copy_tze_to_client(xen_pfn_t cmfn, void *tmem_va, /****************** XEN-SPECIFIC MEMORY ALLOCATION ********************/ -EXPORT struct xmem_pool *tmem_mempool = 0; -EXPORT unsigned int tmem_mempool_maxalloc = 0; +struct xmem_pool *tmem_mempool = 0; +unsigned int tmem_mempool_maxalloc = 0; -EXPORT DEFINE_SPINLOCK(tmem_page_list_lock); -EXPORT PAGE_LIST_HEAD(tmem_page_list); -EXPORT unsigned long tmem_page_list_pages = 0; +DEFINE_SPINLOCK(tmem_page_list_lock); +PAGE_LIST_HEAD(tmem_page_list); +unsigned long tmem_page_list_pages = 0; static noinline void *tmem_mempool_page_get(unsigned long size) { @@ -352,7 +350,7 @@ static struct notifier_block cpu_nfb = { .notifier_call = cpu_callback }; -EXPORT int __init tmem_init(void) +int __init tmem_init(void) { unsigned int cpu; -- 1.7.10.4
tmemc_list() collects a lot of statistics which are not well defined, unreadable and most of them are useless. This patch drop those mess code and leave TMEMC_LIST subops unimplemented temporary. Signed-off-by: Bob Liu <bob.liu@oracle.com> --- xen/common/tmem.c | 171 +---------------------------------------------------- 1 file changed, 2 insertions(+), 169 deletions(-) diff --git a/xen/common/tmem.c b/xen/common/tmem.c index 6afbdbc..48b67bf 100644 --- a/xen/common/tmem.c +++ b/xen/common/tmem.c @@ -29,12 +29,10 @@ /* global statistics (none need to be locked) */ static unsigned long total_tmem_ops = 0; static unsigned long errored_tmem_ops = 0; -static unsigned long total_flush_pool = 0; static unsigned long alloc_failed = 0, alloc_page_failed = 0; static unsigned long evicted_pgs = 0, evict_attempts = 0; static unsigned long relinq_pgs = 0, relinq_attempts = 0; static unsigned long max_evicts_per_relinq = 0; -static unsigned long low_on_memory = 0; static unsigned long deduped_puts = 0; static unsigned long tot_good_eph_puts = 0; static int global_obj_count_max = 0; @@ -1980,176 +1978,11 @@ static int tmemc_flush_mem(domid_t cli_id, uint32_t kb) return flushed_kb; } -/* - * These tmemc_list* routines output lots of stats in a format that is - * intended to be program-parseable, not human-readable. Further, by - * tying each group of stats to a line format indicator (e.g. G= for - * global stats) and each individual stat to a two-letter specifier - * (e.g. Ec:nnnnn in the G= line says there are nnnnn pages in the - * global ephemeral pool), it should allow the stats reported to be - * forward and backwards compatible as tmem evolves. - */ -#define BSIZE 1024 - -static int tmemc_list_client(struct client *c, tmem_cli_va_param_t buf, - int off, uint32_t len, bool_t use_long) -{ - char info[BSIZE]; - int i, n = 0, sum = 0; - struct tmem_pool *p; - bool_t s; - - n = scnprintf(info,BSIZE,"C=CI:%d,ww:%d,ca:%d,co:%d,fr:%d," - "Tc:%"PRIu64",Ge:%ld,Pp:%ld,Gp:%ld%c", - c->cli_id, c->weight, c->cap, c->compress, c->frozen, - c->total_cycles, c->succ_eph_gets, c->succ_pers_puts, c->succ_pers_gets, - use_long ? '','' : ''\n''); - if (use_long) - n += scnprintf(info+n,BSIZE-n, - "Ec:%ld,Em:%ld,cp:%ld,cb:%"PRId64",cn:%ld,cm:%ld\n", - c->eph_count, c->eph_count_max, - c->compressed_pages, c->compressed_sum_size, - c->compress_poor, c->compress_nomem); - tmem_copy_to_client_buf_offset(buf,off+sum,info,n+1); - sum += n; - for ( i = 0; i < MAX_POOLS_PER_DOMAIN; i++ ) - { - if ( (p = c->pools[i]) == NULL ) - continue; - s = is_shared(p); - n = scnprintf(info,BSIZE,"P=CI:%d,PI:%d," - "PT:%c%c,U0:%"PRIx64",U1:%"PRIx64"%c", - c->cli_id, p->pool_id, - is_persistent(p) ? ''P'' : ''E'', s ? ''S'' : ''P'', - (uint64_t)(s ? p->uuid[0] : 0), - (uint64_t)(s ? p->uuid[1] : 0LL), - use_long ? '','' : ''\n''); - if (use_long) - n += scnprintf(info+n,BSIZE-n, - "Pc:%d,Pm:%d,Oc:%ld,Om:%ld,Nc:%lu,Nm:%lu," - "ps:%lu,pt:%lu,pd:%lu,pr:%lu,px:%lu,gs:%lu,gt:%lu," - "fs:%lu,ft:%lu,os:%lu,ot:%lu\n", - _atomic_read(p->pgp_count), p->pgp_count_max, - p->obj_count, p->obj_count_max, - p->objnode_count, p->objnode_count_max, - p->good_puts, p->puts,p->dup_puts_flushed, p->dup_puts_replaced, - p->no_mem_puts, - p->found_gets, p->gets, - p->flushs_found, p->flushs, p->flush_objs_found, p->flush_objs); - if ( sum + n >= len ) - return sum; - tmem_copy_to_client_buf_offset(buf,off+sum,info,n+1); - sum += n; - } - return sum; -} - -static int tmemc_list_shared(tmem_cli_va_param_t buf, int off, uint32_t len, - bool_t use_long) -{ - char info[BSIZE]; - int i, n = 0, sum = 0; - struct tmem_pool *p; - struct share_list *sl; - - for ( i = 0; i < MAX_GLOBAL_SHARED_POOLS; i++ ) - { - if ( (p = global_shared_pools[i]) == NULL ) - continue; - n = scnprintf(info+n,BSIZE-n,"S=SI:%d,PT:%c%c,U0:%"PRIx64",U1:%"PRIx64, - i, is_persistent(p) ? ''P'' : ''E'', - is_shared(p) ? ''S'' : ''P'', - p->uuid[0], p->uuid[1]); - list_for_each_entry(sl,&p->share_list, share_list) - n += scnprintf(info+n,BSIZE-n,",SC:%d",sl->client->cli_id); - n += scnprintf(info+n,BSIZE-n,"%c", use_long ? '','' : ''\n''); - if (use_long) - n += scnprintf(info+n,BSIZE-n, - "Pc:%d,Pm:%d,Oc:%ld,Om:%ld,Nc:%lu,Nm:%lu," - "ps:%lu,pt:%lu,pd:%lu,pr:%lu,px:%lu,gs:%lu,gt:%lu," - "fs:%lu,ft:%lu,os:%lu,ot:%lu\n", - _atomic_read(p->pgp_count), p->pgp_count_max, - p->obj_count, p->obj_count_max, - p->objnode_count, p->objnode_count_max, - p->good_puts, p->puts,p->dup_puts_flushed, p->dup_puts_replaced, - p->no_mem_puts, - p->found_gets, p->gets, - p->flushs_found, p->flushs, p->flush_objs_found, p->flush_objs); - if ( sum + n >= len ) - return sum; - tmem_copy_to_client_buf_offset(buf,off+sum,info,n+1); - sum += n; - } - return sum; -} - -static int tmemc_list_global_perf(tmem_cli_va_param_t buf, int off, - uint32_t len, bool_t use_long) -{ - char info[BSIZE]; - int n = 0, sum = 0; - - n = scnprintf(info+n,BSIZE-n,"T="); - n--; /* overwrite trailing comma */ - n += scnprintf(info+n,BSIZE-n,"\n"); - if ( sum + n >= len ) - return sum; - tmem_copy_to_client_buf_offset(buf,off+sum,info,n+1); - sum += n; - return sum; -} - -static int tmemc_list_global(tmem_cli_va_param_t buf, int off, uint32_t len, - bool_t use_long) -{ - char info[BSIZE]; - int n = 0, sum = off; - - n += scnprintf(info,BSIZE,"G=" - "Tt:%lu,Te:%lu,Cf:%lu,Af:%lu,Pf:%lu,Ta:%lu," - "Lm:%lu,Et:%lu,Ea:%lu,Rt:%lu,Ra:%lu,Rx:%lu,Fp:%lu%c", - total_tmem_ops, errored_tmem_ops, failed_copies, - alloc_failed, alloc_page_failed, tmem_page_list_pages, - low_on_memory, evicted_pgs, - evict_attempts, relinq_pgs, relinq_attempts, max_evicts_per_relinq, - total_flush_pool, use_long ? '','' : ''\n''); - if (use_long) - n += scnprintf(info+n,BSIZE-n, - "Ec:%ld,Em:%ld,Oc:%d,Om:%d,Nc:%d,Nm:%d,Pc:%d,Pm:%d," - "Fc:%d,Fm:%d,Sc:%d,Sm:%d,Ep:%lu,Gd:%lu,Zt:%lu,Gz:%lu\n", - global_eph_count, global_eph_count_max, - _atomic_read(global_obj_count), global_obj_count_max, - _atomic_read(global_rtree_node_count), global_rtree_node_count_max, - _atomic_read(global_pgp_count), global_pgp_count_max, - _atomic_read(global_page_count), global_page_count_max, - _atomic_read(global_pcd_count), global_pcd_count_max, - tot_good_eph_puts,deduped_puts,pcd_tot_tze_size,pcd_tot_csize); - if ( sum + n >= len ) - return sum; - tmem_copy_to_client_buf_offset(buf,off+sum,info,n+1); - sum += n; - return sum; -} - static int tmemc_list(domid_t cli_id, tmem_cli_va_param_t buf, uint32_t len, bool_t use_long) { - struct client *client; - int off = 0; - - if ( cli_id == TMEM_CLI_ID_NULL ) { - off = tmemc_list_global(buf,0,len,use_long); - off += tmemc_list_shared(buf,off,len-off,use_long); - list_for_each_entry(client,&global_client_list,client_list) - off += tmemc_list_client(client, buf, off, len-off, use_long); - off += tmemc_list_global_perf(buf,off,len-off,use_long); - } - else if ( (client = tmem_client_from_cli_id(cli_id)) == NULL) - return -1; - else - off = tmemc_list_client(client, buf, 0, len, use_long); - - return 0; + tmem_client_info("tmemc_list is not implemented yet!\n"); + return -1; } static int tmemc_set_var_one(struct client *client, uint32_t subop, uint32_t arg1) -- 1.7.10.4
They are so many runtime statistics which are unreadable and hard to maintain. This patch drop them temporary for code cleanup until we have clear define what runtime statistics are really needed. Signed-off-by: Bob Liu <bob.liu@oracle.com> --- xen/common/tmem.c | 187 ++++------------------------------------------------- 1 file changed, 13 insertions(+), 174 deletions(-) diff --git a/xen/common/tmem.c b/xen/common/tmem.c index 48b67bf..c1b3e21 100644 --- a/xen/common/tmem.c +++ b/xen/common/tmem.c @@ -26,27 +26,7 @@ #define TMEM_SPEC_VERSION 1 -/* global statistics (none need to be locked) */ -static unsigned long total_tmem_ops = 0; -static unsigned long errored_tmem_ops = 0; -static unsigned long alloc_failed = 0, alloc_page_failed = 0; -static unsigned long evicted_pgs = 0, evict_attempts = 0; -static unsigned long relinq_pgs = 0, relinq_attempts = 0; -static unsigned long max_evicts_per_relinq = 0; -static unsigned long deduped_puts = 0; -static unsigned long tot_good_eph_puts = 0; -static int global_obj_count_max = 0; -static int global_pgp_count_max = 0; -static int global_pcd_count_max = 0; -static int global_page_count_max = 0; -static int global_rtree_node_count_max = 0; -static long global_eph_count_max = 0; -static unsigned long failed_copies; -static unsigned long pcd_tot_tze_size = 0; -static unsigned long pcd_tot_csize = 0; - /************ CORE DATA STRUCTURES ************************************/ - #define MAX_POOLS_PER_DOMAIN 16 #define MAX_GLOBAL_SHARED_POOLS 16 @@ -59,7 +39,7 @@ struct client { struct domain *domain; struct xmem_pool *persistent_pool; struct list_head ephemeral_page_list; - long eph_count, eph_count_max; + long eph_count; domid_t cli_id; uint32_t weight; uint32_t cap; @@ -71,12 +51,6 @@ struct client { bool_t was_frozen; struct list_head persistent_invalidated_list; struct tmem_page_descriptor *cur_pgp; - /* statistics collection */ - unsigned long compress_poor, compress_nomem; - unsigned long compressed_pages; - uint64_t compressed_sum_size; - uint64_t total_cycles; - unsigned long succ_pers_puts, succ_eph_gets, succ_pers_gets; /* shared pool authentication */ uint64_t shared_auth_uuid[MAX_GLOBAL_SHARED_POOLS][2]; }; @@ -107,17 +81,6 @@ struct tmem_pool { struct tmem_page_descriptor *cur_pgp; /* statistics collection */ atomic_t pgp_count; - int pgp_count_max; - long obj_count; /* atomicity depends on pool_rwlock held for write */ - long obj_count_max; - unsigned long objnode_count, objnode_count_max; - uint64_t sum_life_cycles; - uint64_t sum_evicted_cycles; - unsigned long puts, good_puts, no_mem_puts; - unsigned long dup_puts_flushed, dup_puts_replaced; - unsigned long gets, found_gets; - unsigned long flushs, flushs_found; - unsigned long flush_objs, flush_objs_found; }; #define is_persistent(_p) (_p->persistent) @@ -130,7 +93,6 @@ struct oid { struct tmem_object_root { struct oid oid; struct rb_node rb_tree_node; /* protected by pool->pool_rwlock */ - unsigned long objnode_count; /* atomicity depends on obj_spinlock */ long pgp_count; /* atomicity depends on obj_spinlock */ struct radix_tree_root tree_root; /* tree of pages within object */ struct tmem_pool *pool; @@ -196,7 +158,6 @@ struct rb_root pcd_tree_roots[256]; /* choose based on first byte of page */ rwlock_t pcd_tree_rwlocks[256]; /* poor man''s concurrency for now */ static LIST_HEAD(global_ephemeral_page_list); /* all pages in ephemeral pools */ - static LIST_HEAD(global_client_list); static LIST_HEAD(global_pool_list); @@ -206,7 +167,6 @@ static atomic_t client_weight_total = ATOMIC_INIT(0); static int tmem_initialized = 0; /************ CONCURRENCY ***********************************************/ - DEFINE_SPINLOCK(tmem_spinlock); /* used iff tmem_lock_all */ DEFINE_RWLOCK(tmem_rwlock); /* used iff !tmem_lock_all */ static DEFINE_SPINLOCK(eph_lists_spinlock); /* protects global AND clients */ @@ -226,23 +186,6 @@ static DEFINE_SPINLOCK(pers_lists_spinlock); /* global counters (should use long_atomic_t access) */ static long global_eph_count = 0; /* atomicity depends on eph_lists_spinlock */ -static atomic_t global_obj_count = ATOMIC_INIT(0); -static atomic_t global_pgp_count = ATOMIC_INIT(0); -static atomic_t global_pcd_count = ATOMIC_INIT(0); -static atomic_t global_page_count = ATOMIC_INIT(0); -static atomic_t global_rtree_node_count = ATOMIC_INIT(0); - -#define atomic_inc_and_max(_c) do { \ - atomic_inc(&_c); \ - if ( _atomic_read(_c) > _c##_max ) \ - _c##_max = _atomic_read(_c); \ -} while (0) - -#define atomic_dec_and_assert(_c) do { \ - atomic_dec(&_c); \ - ASSERT(_atomic_read(_c) >= 0); \ -} while (0) - /************ MEMORY ALLOCATION INTERFACE *****************************/ static void *tmem_malloc(size_t size, struct tmem_pool *pool) @@ -259,8 +202,6 @@ static void *tmem_malloc(size_t size, struct tmem_pool *pool) ASSERT( tmem_mempool != NULL ); v = xmem_pool_alloc(size, tmem_mempool); } - if ( v == NULL ) - alloc_failed++; return v; } @@ -286,10 +227,6 @@ static struct page_info *tmem_page_alloc(struct tmem_pool *pool) pfp = tmem_alloc_page_thispool(pool->client->domain); else pfp = tmem_alloc_page(pool,0); - if ( pfp == NULL ) - alloc_page_failed++; - else - atomic_inc_and_max(global_page_count); return pfp; } @@ -300,7 +237,6 @@ static void tmem_page_free(struct tmem_pool *pool, struct page_info *pfp) tmem_free_page(pfp); else tmem_free_page_thispool(pfp); - atomic_dec_and_assert(global_page_count); } /************ PAGE CONTENT DESCRIPTOR MANIPULATION ROUTINES ***********/ @@ -339,7 +275,6 @@ static void pcd_disassociate(struct tmem_page_descriptor *pgp, struct tmem_pool pagesize_t pcd_size = pcd->size; pagesize_t pgp_size = pgp->size; char *pcd_cdata = pgp->pcd->cdata; - pagesize_t pcd_csize = pgp->pcd->size; ASSERT(tmem_dedup_enabled()); ASSERT(firstbyte != NOT_SHAREABLE); @@ -368,25 +303,18 @@ static void pcd_disassociate(struct tmem_page_descriptor *pgp, struct tmem_pool RB_CLEAR_NODE(&pcd->pcd_rb_tree_node); /* now free up the pcd memory */ tmem_free(pcd, NULL); - atomic_dec_and_assert(global_pcd_count); if ( pgp_size != 0 && pcd_size < PAGE_SIZE ) { /* compressed data */ tmem_free(pcd_cdata, pool); - pcd_tot_csize -= pcd_csize; } else if ( pcd_size != PAGE_SIZE ) { /* trailing zero data */ - pcd_tot_tze_size -= pcd_size; if ( pcd_size ) tmem_free(pcd_tze, pool); } else { /* real physical page */ - if ( tmem_tze_enabled() ) - pcd_tot_tze_size -= PAGE_SIZE; - if ( tmem_compression_enabled() ) - pcd_tot_csize -= PAGE_SIZE; tmem_page_free(pool,pfp); } tmem_write_unlock(&pcd_tree_rwlocks[firstbyte]); @@ -467,7 +395,6 @@ static int pcd_associate(struct tmem_page_descriptor *pgp, char *cdata, pagesize /* but if compressed, data is assumed static so don''t free! */ if ( cdata == NULL ) tmem_page_free(pgp->us.obj->pool,pgp->pfp); - deduped_puts++; goto match; } } @@ -485,7 +412,6 @@ static int pcd_associate(struct tmem_page_descriptor *pgp, char *cdata, pagesize goto unlock; } } - atomic_inc_and_max(global_pcd_count); RB_CLEAR_NODE(&pcd->pcd_rb_tree_node); /* is this necessary */ INIT_LIST_HEAD(&pcd->pgp_list); /* is this necessary */ pcd->pgp_ref_count = 0; @@ -493,7 +419,6 @@ static int pcd_associate(struct tmem_page_descriptor *pgp, char *cdata, pagesize { memcpy(pcd->cdata,cdata,csize); pcd->size = csize; - pcd_tot_csize += csize; } else if ( pfp_size == 0 ) { ASSERT(tmem_tze_enabled()); pcd->size = 0; @@ -502,15 +427,10 @@ static int pcd_associate(struct tmem_page_descriptor *pgp, char *cdata, pagesize ((pcd->tze = tmem_malloc(pfp_size,pgp->us.obj->pool)) != NULL) ) { tmem_tze_copy_from_pfp(pcd->tze,pgp->pfp,pfp_size); pcd->size = pfp_size; - pcd_tot_tze_size += pfp_size; tmem_page_free(pgp->us.obj->pool,pgp->pfp); } else { pcd->pfp = pgp->pfp; pcd->size = PAGE_SIZE; - if ( tmem_tze_enabled() ) - pcd_tot_tze_size += PAGE_SIZE; - if ( tmem_compression_enabled() ) - pcd_tot_csize += PAGE_SIZE; } rb_link_node(&pcd->pcd_rb_tree_node, parent, new); rb_insert_color(&pcd->pcd_rb_tree_node, root); @@ -553,8 +473,7 @@ static struct tmem_page_descriptor *pgp_alloc(struct tmem_object_root *obj) pgp->size = -1; pgp->index = -1; pgp->timestamp = get_cycles(); - atomic_inc_and_max(global_pgp_count); - atomic_inc_and_max(pool->pgp_count); + atomic_inc(&pool->pgp_count); return pgp; } @@ -578,11 +497,6 @@ static void pgp_free_data(struct tmem_page_descriptor *pgp, struct tmem_pool *po tmem_free(pgp->cdata, pool); else tmem_page_free(pgp->us.obj->pool,pgp->pfp); - if ( pool != NULL && pgp_size ) - { - pool->client->compressed_pages--; - pool->client->compressed_sum_size -= pgp_size; - } pgp->pfp = NULL; pgp->size = -1; } @@ -603,8 +517,7 @@ static void pgp_free(struct tmem_page_descriptor *pgp, int from_delete) ASSERT(list_empty(&pgp->us.client_eph_pages)); } pgp_free_data(pgp, pool); - atomic_dec_and_assert(global_pgp_count); - atomic_dec_and_assert(pool->pgp_count); + atomic_dec(&pool->pgp_count); pgp->size = -1; if ( is_persistent(pool) && pool->client->live_migrating ) { @@ -676,7 +589,6 @@ static void pgp_delete(struct tmem_page_descriptor *pgp, bool_t no_eph_lock) ASSERT(pgp->us.obj != NULL); ASSERT(pgp->us.obj->pool != NULL); life = get_cycles() - pgp->timestamp; - pgp->us.obj->pool->sum_life_cycles += life; pgp_delist(pgp, no_eph_lock); pgp_free(pgp,1); } @@ -734,10 +646,6 @@ static struct radix_tree_node *rtn_alloc(void *arg) return NULL; objnode->obj = obj; memset(&objnode->rtn, 0, sizeof(struct radix_tree_node)); - if (++obj->pool->objnode_count > obj->pool->objnode_count_max) - obj->pool->objnode_count_max = obj->pool->objnode_count; - atomic_inc_and_max(global_rtree_node_count); - obj->objnode_count++; return &objnode->rtn; } @@ -753,11 +661,8 @@ static void rtn_free(struct radix_tree_node *rtn, void *arg) ASSERT_SPINLOCK(&objnode->obj->obj_spinlock); pool = objnode->obj->pool; ASSERT(pool != NULL); - pool->objnode_count--; - objnode->obj->objnode_count--; objnode->obj = NULL; tmem_free(objnode, pool); - atomic_dec_and_assert(global_rtree_node_count); } /************ POOL OBJECT COLLECTION MANIPULATION ROUTINES *******************/ @@ -850,15 +755,11 @@ static void obj_free(struct tmem_object_root *obj, int no_rebalance) ASSERT_WRITELOCK(&pool->pool_rwlock); if ( obj->tree_root.rnode != NULL ) /* may be a "stump" with no leaves */ radix_tree_destroy(&obj->tree_root, pgp_destroy); - ASSERT((long)obj->objnode_count == 0); ASSERT(obj->tree_root.rnode == NULL); - pool->obj_count--; - ASSERT(pool->obj_count >= 0); obj->pool = NULL; old_oid = obj->oid; oid_set_invalid(&obj->oid); obj->last_client = TMEM_CLI_ID_NULL; - atomic_dec_and_assert(global_obj_count); /* use no_rebalance only if all objects are being destroyed anyway */ if ( !no_rebalance ) rb_erase(&obj->rb_tree_node,&pool->obj_rb_root[oid_hash(&old_oid)]); @@ -905,16 +806,11 @@ static struct tmem_object_root * obj_new(struct tmem_pool *pool, struct oid *oid ASSERT_WRITELOCK(&pool->pool_rwlock); if ( (obj = tmem_malloc(sizeof(struct tmem_object_root), pool)) == NULL ) return NULL; - pool->obj_count++; - if (pool->obj_count > pool->obj_count_max) - pool->obj_count_max = pool->obj_count; - atomic_inc_and_max(global_obj_count); radix_tree_init(&obj->tree_root); radix_tree_set_alloc_callbacks(&obj->tree_root, rtn_alloc, rtn_free, obj); spin_lock_init(&obj->obj_spinlock); obj->pool = pool; obj->oid = *oidp; - obj->objnode_count = 0; obj->pgp_count = 0; obj->last_client = TMEM_CLI_ID_NULL; tmem_spin_lock(&obj->obj_spinlock); @@ -978,16 +874,9 @@ static struct tmem_pool * pool_alloc(void) INIT_LIST_HEAD(&pool->persistent_page_list); pool->cur_pgp = NULL; rwlock_init(&pool->pool_rwlock); - pool->pgp_count_max = pool->obj_count_max = 0; - pool->objnode_count = pool->objnode_count_max = 0; atomic_set(&pool->pgp_count,0); - pool->obj_count = 0; pool->shared_count = 0; + pool->shared_count = 0; pool->pageshift = PAGE_SHIFT - 12; - pool->good_puts = pool->puts = pool->dup_puts_flushed = 0; - pool->dup_puts_replaced = pool->no_mem_puts = 0; - pool->found_gets = pool->gets = 0; - pool->flushs_found = pool->flushs = 0; - pool->flush_objs_found = pool->flush_objs = 0; pool->is_dying = 0; return pool; } @@ -1163,9 +1052,7 @@ static struct client *client_create(domid_t cli_id) INIT_LIST_HEAD(&client->ephemeral_page_list); INIT_LIST_HEAD(&client->persistent_invalidated_list); client->cur_pgp = NULL; - client->eph_count = client->eph_count_max = 0; - client->total_cycles = 0; client->succ_pers_puts = 0; - client->succ_eph_gets = 0; client->succ_pers_gets = 0; + client->eph_count = 0; tmem_client_info("ok\n"); return client; @@ -1273,7 +1160,6 @@ static int tmem_evict(void) int ret = 0; bool_t hold_pool_rwlock = 0; - evict_attempts++; tmem_spin_lock(&eph_lists_spinlock); if ( (client != NULL) && client_over_quota(client) && !list_empty(&client->ephemeral_page_list) ) @@ -1318,7 +1204,6 @@ found: tmem_spin_unlock(&obj->obj_spinlock); if ( hold_pool_rwlock ) tmem_write_unlock(&pool->pool_rwlock); - evicted_pgs++; ret = 1; out: @@ -1404,8 +1289,6 @@ static int do_tmem_put_compress(struct tmem_page_descriptor *pgp, xen_pfn_t cmfn pgp->cdata = p; } pgp->size = size; - pgp->us.obj->pool->client->compressed_pages++; - pgp->us.obj->pool->client->compressed_sum_size += size; ret = 1; out: @@ -1443,7 +1326,7 @@ static int do_tmem_dup_put(struct tmem_page_descriptor *pgp, xen_pfn_t cmfn, else if ( ret == -ENOMEM ) goto failed_dup; else if ( ret == -EFAULT ) - goto bad_copy; + goto cleanup; } copy_uncompressed: @@ -1454,7 +1337,7 @@ copy_uncompressed: pgp->size = 0; ret = tmem_copy_from_client(pgp->pfp, cmfn, tmem_cli_buf_null); if ( ret < 0 ) - goto bad_copy; + goto cleanup; if ( tmem_dedup_enabled() && !is_persistent(pool) ) { if ( pcd_associate(pgp,NULL,0) == -ENOMEM ) @@ -1467,16 +1350,8 @@ done: obj->last_client = client->cli_id; obj->no_evict = 0; tmem_spin_unlock(&obj->obj_spinlock); - pool->dup_puts_replaced++; - pool->good_puts++; - if ( is_persistent(pool) ) - client->succ_pers_puts++; return 1; -bad_copy: - failed_copies++; - goto cleanup; - failed_dup: /* couldn''t change out the data, flush the old data and return * -ENOSPC instead of -ENOMEM to differentiate failed _dup_ put */ @@ -1494,7 +1369,6 @@ cleanup: obj->no_evict = 0; tmem_spin_unlock(&obj->obj_spinlock); } - pool->dup_puts_flushed++; return ret; } @@ -1510,7 +1384,6 @@ static int do_tmem_put(struct tmem_pool *pool, ASSERT(pool != NULL); client = pool->client; ret = client->frozen ? -EFROZEN : -ENOMEM; - pool->puts++; /* does page already exist (dup)? if so, handle specially */ if ( (obj = obj_find(pool,oidp)) != NULL ) { @@ -1560,16 +1433,14 @@ static int do_tmem_put(struct tmem_pool *pool, goto insert_page; if ( ret == -ENOMEM ) { - client->compress_nomem++; goto del_pgp_from_obj; } if ( ret == 0 ) { - client->compress_poor++; goto copy_uncompressed; } if ( ret == -EFAULT ) - goto bad_copy; + goto del_pgp_from_obj; } copy_uncompressed: @@ -1580,7 +1451,7 @@ copy_uncompressed: } ret = tmem_copy_from_client(pgp->pfp, cmfn, clibuf); if ( ret < 0 ) - goto bad_copy; + goto del_pgp_from_obj; if ( tmem_dedup_enabled() && !is_persistent(pool) ) { @@ -1594,12 +1465,10 @@ insert_page: tmem_spin_lock(&eph_lists_spinlock); list_add_tail(&pgp->global_eph_pages, &global_ephemeral_page_list); - if (++global_eph_count > global_eph_count_max) - global_eph_count_max = global_eph_count; + ++global_eph_count; list_add_tail(&pgp->us.client_eph_pages, &client->ephemeral_page_list); - if (++client->eph_count > client->eph_count_max) - client->eph_count_max = client->eph_count; + ++client->eph_count; tmem_spin_unlock(&eph_lists_spinlock); } else @@ -1616,17 +1485,8 @@ insert_page: /* free the obj spinlock */ tmem_spin_unlock(&obj->obj_spinlock); - pool->good_puts++; - - if ( is_persistent(pool) ) - client->succ_pers_puts++; - else - tot_good_eph_puts++; return 1; -bad_copy: - failed_copies++; - del_pgp_from_obj: ASSERT((obj != NULL) && (pgp != NULL) && (pgp->index != -1)); pgp_delete_from_obj(obj, pgp->index); @@ -1645,7 +1505,6 @@ unlock_obj: obj->no_evict = 0; tmem_spin_unlock(&obj->obj_spinlock); } - pool->no_mem_puts++; return ret; } @@ -1660,7 +1519,6 @@ static int do_tmem_get(struct tmem_pool *pool, struct oid *oidp, uint32_t index, if ( !_atomic_read(pool->pgp_count) ) return -EEMPTY; - pool->gets++; obj = obj_find(pool,oidp); if ( obj == NULL ) return 0; @@ -1687,7 +1545,7 @@ static int do_tmem_get(struct tmem_pool *pool, struct oid *oidp, uint32_t index, else rc = tmem_copy_to_client(cmfn, pgp->pfp, clibuf); if ( rc <= 0 ) - goto bad_copy; + goto out; if ( !is_persistent(pool) ) { @@ -1716,17 +1574,11 @@ static int do_tmem_get(struct tmem_pool *pool, struct oid *oidp, uint32_t index, obj->no_evict = 0; tmem_spin_unlock(&obj->obj_spinlock); } - pool->found_gets++; - if ( is_persistent(pool) ) - client->succ_pers_gets++; - else - client->succ_eph_gets++; return 1; -bad_copy: +out: obj->no_evict = 0; tmem_spin_unlock(&obj->obj_spinlock); - failed_copies++; return rc; } @@ -1735,7 +1587,6 @@ static int do_tmem_flush_page(struct tmem_pool *pool, struct oid *oidp, uint32_t struct tmem_object_root *obj; struct tmem_page_descriptor *pgp; - pool->flushs++; obj = obj_find(pool,oidp); if ( obj == NULL ) goto out; @@ -1756,7 +1607,6 @@ static int do_tmem_flush_page(struct tmem_pool *pool, struct oid *oidp, uint32_t obj->no_evict = 0; tmem_spin_unlock(&obj->obj_spinlock); } - pool->flushs_found++; out: if ( pool->client->frozen ) @@ -1769,13 +1619,11 @@ static int do_tmem_flush_object(struct tmem_pool *pool, struct oid *oidp) { struct tmem_object_root *obj; - pool->flush_objs++; obj = obj_find(pool,oidp); if ( obj == NULL ) goto out; tmem_write_lock(&pool->pool_rwlock); obj_destroy(obj,0); - pool->flush_objs_found++; tmem_write_unlock(&pool->pool_rwlock); out: @@ -2386,8 +2234,6 @@ long do_tmem_op(tmem_cli_op_t uops) if ( !tmem_current_permitted() ) return -EPERM; - total_tmem_ops++; - if ( tmem_lock_all ) { if ( tmem_lock_all > 1 ) @@ -2402,7 +2248,6 @@ long do_tmem_op(tmem_cli_op_t uops) if ( tmem_lock_all ) goto out; simple_error: - errored_tmem_ops++; return rc; } @@ -2512,8 +2357,6 @@ long do_tmem_op(tmem_cli_op_t uops) } out: - if ( rc < 0 ) - errored_tmem_ops++; if ( tmem_lock_all ) { if ( tmem_lock_all > 1 ) @@ -2590,7 +2433,6 @@ void *tmem_relinquish_pages(unsigned int order, unsigned int memflags) if (!tmem_enabled() || !tmem_freeable_pages()) return NULL; - relinq_attempts++; if ( order > 0 ) { #ifndef NDEBUG @@ -2613,13 +2455,10 @@ void *tmem_relinquish_pages(unsigned int order, unsigned int memflags) break; evicts_per_relinq++; } - if ( evicts_per_relinq > max_evicts_per_relinq ) - max_evicts_per_relinq = evicts_per_relinq; if ( pfp != NULL ) { if ( !(memflags & MEMF_tmem) ) scrub_one_page(pfp); - relinq_pgs++; } if ( tmem_called_from_tmem(memflags) ) -- 1.7.10.4
tmem_lock_all is used for debug only, remove it from upstream to make tmem source code more readable and easy to maintain. Signed-off-by: Bob Liu <bob.liu@oracle.com> --- xen/common/tmem.c | 267 +++++++++++++++++--------------------------- xen/common/tmem_xen.c | 3 - xen/include/xen/tmem_xen.h | 8 -- 3 files changed, 101 insertions(+), 177 deletions(-) diff --git a/xen/common/tmem.c b/xen/common/tmem.c index c1b3e21..67fa1ee 100644 --- a/xen/common/tmem.c +++ b/xen/common/tmem.c @@ -167,22 +167,12 @@ static atomic_t client_weight_total = ATOMIC_INIT(0); static int tmem_initialized = 0; /************ CONCURRENCY ***********************************************/ -DEFINE_SPINLOCK(tmem_spinlock); /* used iff tmem_lock_all */ -DEFINE_RWLOCK(tmem_rwlock); /* used iff !tmem_lock_all */ +DEFINE_RWLOCK(tmem_rwlock); static DEFINE_SPINLOCK(eph_lists_spinlock); /* protects global AND clients */ static DEFINE_SPINLOCK(pers_lists_spinlock); -#define tmem_spin_lock(_l) do {if (!tmem_lock_all) spin_lock(_l);}while(0) -#define tmem_spin_unlock(_l) do {if (!tmem_lock_all) spin_unlock(_l);}while(0) -#define tmem_read_lock(_l) do {if (!tmem_lock_all) read_lock(_l);}while(0) -#define tmem_read_unlock(_l) do {if (!tmem_lock_all) read_unlock(_l);}while(0) -#define tmem_write_lock(_l) do {if (!tmem_lock_all) write_lock(_l);}while(0) -#define tmem_write_unlock(_l) do {if (!tmem_lock_all) write_unlock(_l);}while(0) -#define tmem_write_trylock(_l) ((tmem_lock_all)?1:write_trylock(_l)) -#define tmem_spin_trylock(_l) (tmem_lock_all?1:spin_trylock(_l)) - -#define ASSERT_SPINLOCK(_l) ASSERT(tmem_lock_all || spin_is_locked(_l)) -#define ASSERT_WRITELOCK(_l) ASSERT(tmem_lock_all || rw_is_write_locked(_l)) +#define ASSERT_SPINLOCK(_l) ASSERT(spin_is_locked(_l)) +#define ASSERT_WRITELOCK(_l) ASSERT(rw_is_write_locked(_l)) /* global counters (should use long_atomic_t access) */ static long global_eph_count = 0; /* atomicity depends on eph_lists_spinlock */ @@ -250,7 +240,7 @@ static int pcd_copy_to_client(xen_pfn_t cmfn, struct tmem_page_descriptor *pgp) int ret; ASSERT(tmem_dedup_enabled()); - tmem_read_lock(&pcd_tree_rwlocks[firstbyte]); + read_lock(&pcd_tree_rwlocks[firstbyte]); pcd = pgp->pcd; if ( pgp->size < PAGE_SIZE && pgp->size != 0 && pcd->size < PAGE_SIZE && pcd->size != 0 ) @@ -260,7 +250,7 @@ static int pcd_copy_to_client(xen_pfn_t cmfn, struct tmem_page_descriptor *pgp) ret = tmem_copy_tze_to_client(cmfn, pcd->tze, pcd->size); else ret = tmem_copy_to_client(cmfn, pcd->pfp, tmem_cli_buf_null); - tmem_read_unlock(&pcd_tree_rwlocks[firstbyte]); + read_unlock(&pcd_tree_rwlocks[firstbyte]); return ret; } @@ -283,14 +273,14 @@ static void pcd_disassociate(struct tmem_page_descriptor *pgp, struct tmem_pool if ( have_pcd_rwlock ) ASSERT_WRITELOCK(&pcd_tree_rwlocks[firstbyte]); else - tmem_write_lock(&pcd_tree_rwlocks[firstbyte]); + write_lock(&pcd_tree_rwlocks[firstbyte]); list_del_init(&pgp->pcd_siblings); pgp->pcd = NULL; pgp->firstbyte = NOT_SHAREABLE; pgp->size = -1; if ( --pcd->pgp_ref_count ) { - tmem_write_unlock(&pcd_tree_rwlocks[firstbyte]); + write_unlock(&pcd_tree_rwlocks[firstbyte]); return; } @@ -317,7 +307,7 @@ static void pcd_disassociate(struct tmem_page_descriptor *pgp, struct tmem_pool /* real physical page */ tmem_page_free(pool,pfp); } - tmem_write_unlock(&pcd_tree_rwlocks[firstbyte]); + write_unlock(&pcd_tree_rwlocks[firstbyte]); } @@ -349,7 +339,7 @@ static int pcd_associate(struct tmem_page_descriptor *pgp, char *cdata, pagesize ASSERT(pfp_size <= PAGE_SIZE); ASSERT(!(pfp_size & (sizeof(uint64_t)-1))); } - tmem_write_lock(&pcd_tree_rwlocks[firstbyte]); + write_lock(&pcd_tree_rwlocks[firstbyte]); /* look for page match */ root = &pcd_tree_roots[firstbyte]; @@ -443,7 +433,7 @@ match: pgp->pcd = pcd; unlock: - tmem_write_unlock(&pcd_tree_rwlocks[firstbyte]); + write_unlock(&pcd_tree_rwlocks[firstbyte]); return ret; } @@ -552,7 +542,7 @@ static void pgp_delist(struct tmem_page_descriptor *pgp, bool_t no_eph_lock) if ( !is_persistent(pgp->us.obj->pool) ) { if ( !no_eph_lock ) - tmem_spin_lock(&eph_lists_spinlock); + spin_lock(&eph_lists_spinlock); if ( !list_empty(&pgp->us.client_eph_pages) ) client->eph_count--; ASSERT(client->eph_count >= 0); @@ -562,20 +552,20 @@ static void pgp_delist(struct tmem_page_descriptor *pgp, bool_t no_eph_lock) ASSERT(global_eph_count >= 0); list_del_init(&pgp->global_eph_pages); if ( !no_eph_lock ) - tmem_spin_unlock(&eph_lists_spinlock); + spin_unlock(&eph_lists_spinlock); } else { if ( client->live_migrating ) { - tmem_spin_lock(&pers_lists_spinlock); + spin_lock(&pers_lists_spinlock); list_add_tail(&pgp->client_inv_pages, &client->persistent_invalidated_list); if ( pgp != pgp->us.obj->pool->cur_pgp ) list_del_init(&pgp->us.pool_pers_pages); - tmem_spin_unlock(&pers_lists_spinlock); + spin_unlock(&pers_lists_spinlock); } else { - tmem_spin_lock(&pers_lists_spinlock); + spin_lock(&pers_lists_spinlock); list_del_init(&pgp->us.pool_pers_pages); - tmem_spin_unlock(&pers_lists_spinlock); + spin_unlock(&pers_lists_spinlock); } } } @@ -709,7 +699,7 @@ static struct tmem_object_root * obj_find(struct tmem_pool *pool, struct oid *oi struct tmem_object_root *obj; restart_find: - tmem_read_lock(&pool->pool_rwlock); + read_lock(&pool->pool_rwlock); node = pool->obj_rb_root[oid_hash(oidp)].rb_node; while ( node ) { @@ -717,17 +707,12 @@ restart_find: switch ( oid_compare(&obj->oid, oidp) ) { case 0: /* equal */ - if ( tmem_lock_all ) - obj->no_evict = 1; - else + if ( !spin_trylock(&obj->obj_spinlock) ) { - if ( !tmem_spin_trylock(&obj->obj_spinlock) ) - { - tmem_read_unlock(&pool->pool_rwlock); - goto restart_find; - } - tmem_read_unlock(&pool->pool_rwlock); + read_unlock(&pool->pool_rwlock); + goto restart_find; } + read_unlock(&pool->pool_rwlock); return obj; case -1: node = node->rb_left; @@ -736,7 +721,7 @@ restart_find: node = node->rb_right; } } - tmem_read_unlock(&pool->pool_rwlock); + read_unlock(&pool->pool_rwlock); return NULL; } @@ -763,7 +748,7 @@ static void obj_free(struct tmem_object_root *obj, int no_rebalance) /* use no_rebalance only if all objects are being destroyed anyway */ if ( !no_rebalance ) rb_erase(&obj->rb_tree_node,&pool->obj_rb_root[oid_hash(&old_oid)]); - tmem_spin_unlock(&obj->obj_spinlock); + spin_unlock(&obj->obj_spinlock); tmem_free(obj, pool); } @@ -813,7 +798,7 @@ static struct tmem_object_root * obj_new(struct tmem_pool *pool, struct oid *oid obj->oid = *oidp; obj->pgp_count = 0; obj->last_client = TMEM_CLI_ID_NULL; - tmem_spin_lock(&obj->obj_spinlock); + spin_lock(&obj->obj_spinlock); obj_rb_insert(&pool->obj_rb_root[oid_hash(oidp)], obj); obj->no_evict = 1; ASSERT_SPINLOCK(&obj->obj_spinlock); @@ -835,7 +820,7 @@ static void pool_destroy_objs(struct tmem_pool *pool, bool_t selective, domid_t struct tmem_object_root *obj; int i; - tmem_write_lock(&pool->pool_rwlock); + write_lock(&pool->pool_rwlock); pool->is_dying = 1; for (i = 0; i < OBJ_HASH_BUCKETS; i++) { @@ -843,7 +828,7 @@ static void pool_destroy_objs(struct tmem_pool *pool, bool_t selective, domid_t while ( node != NULL ) { obj = container_of(node, struct tmem_object_root, rb_tree_node); - tmem_spin_lock(&obj->obj_spinlock); + spin_lock(&obj->obj_spinlock); node = rb_next(node); ASSERT(obj->no_evict == 0); if ( !selective ) @@ -852,10 +837,10 @@ static void pool_destroy_objs(struct tmem_pool *pool, bool_t selective, domid_t else if ( obj->last_client == cli_id ) obj_destroy(obj,0); else - tmem_spin_unlock(&obj->obj_spinlock); + spin_unlock(&obj->obj_spinlock); } } - tmem_write_unlock(&pool->pool_rwlock); + write_unlock(&pool->pool_rwlock); } @@ -1114,9 +1099,7 @@ static bool_t tmem_try_to_evict_pgp(struct tmem_page_descriptor *pgp, bool_t *ho if ( pool->is_dying ) return 0; - if ( tmem_lock_all && !obj->no_evict ) - return 1; - if ( tmem_spin_trylock(&obj->obj_spinlock) ) + if ( spin_trylock(&obj->obj_spinlock) ) { if ( tmem_dedup_enabled() ) { @@ -1124,7 +1107,7 @@ static bool_t tmem_try_to_evict_pgp(struct tmem_page_descriptor *pgp, bool_t *ho if ( firstbyte == NOT_SHAREABLE ) goto obj_unlock; ASSERT(firstbyte < 256); - if ( !tmem_write_trylock(&pcd_tree_rwlocks[firstbyte]) ) + if ( !write_trylock(&pcd_tree_rwlocks[firstbyte]) ) goto obj_unlock; if ( pgp->pcd->pgp_ref_count > 1 && !pgp->eviction_attempted ) { @@ -1138,15 +1121,15 @@ static bool_t tmem_try_to_evict_pgp(struct tmem_page_descriptor *pgp, bool_t *ho } if ( obj->pgp_count > 1 ) return 1; - if ( tmem_write_trylock(&pool->pool_rwlock) ) + if ( write_trylock(&pool->pool_rwlock) ) { *hold_pool_rwlock = 1; return 1; } pcd_unlock: - tmem_write_unlock(&pcd_tree_rwlocks[firstbyte]); + write_unlock(&pcd_tree_rwlocks[firstbyte]); obj_unlock: - tmem_spin_unlock(&obj->obj_spinlock); + spin_unlock(&obj->obj_spinlock); } return 0; } @@ -1160,7 +1143,7 @@ static int tmem_evict(void) int ret = 0; bool_t hold_pool_rwlock = 0; - tmem_spin_lock(&eph_lists_spinlock); + spin_lock(&eph_lists_spinlock); if ( (client != NULL) && client_over_quota(client) && !list_empty(&client->ephemeral_page_list) ) { @@ -1201,13 +1184,13 @@ found: obj_free(obj,0); } else - tmem_spin_unlock(&obj->obj_spinlock); + spin_unlock(&obj->obj_spinlock); if ( hold_pool_rwlock ) - tmem_write_unlock(&pool->pool_rwlock); + write_unlock(&pool->pool_rwlock); ret = 1; out: - tmem_spin_unlock(&eph_lists_spinlock); + spin_unlock(&eph_lists_spinlock); return ret; } @@ -1349,7 +1332,7 @@ done: if ( is_shared(pool) ) obj->last_client = client->cli_id; obj->no_evict = 0; - tmem_spin_unlock(&obj->obj_spinlock); + spin_unlock(&obj->obj_spinlock); return 1; failed_dup: @@ -1362,12 +1345,12 @@ cleanup: pgp_delete(pgpfound,0); if ( obj->pgp_count == 0 ) { - tmem_write_lock(&pool->pool_rwlock); + write_lock(&pool->pool_rwlock); obj_free(obj,0); - tmem_write_unlock(&pool->pool_rwlock); + write_unlock(&pool->pool_rwlock); } else { obj->no_evict = 0; - tmem_spin_unlock(&obj->obj_spinlock); + spin_unlock(&obj->obj_spinlock); } return ret; } @@ -1403,14 +1386,14 @@ static int do_tmem_put(struct tmem_pool *pool, /* no puts allowed into a frozen pool (except dup puts) */ if ( client->frozen ) return ret; - tmem_write_lock(&pool->pool_rwlock); + write_lock(&pool->pool_rwlock); if ( (obj = obj_new(pool,oidp)) == NULL ) { - tmem_write_unlock(&pool->pool_rwlock); + write_unlock(&pool->pool_rwlock); return -ENOMEM; } newobj= 1; - tmem_write_unlock(&pool->pool_rwlock); + write_unlock(&pool->pool_rwlock); } /* When arrive here, we have a spinlocked obj for use */ @@ -1462,21 +1445,21 @@ copy_uncompressed: insert_page: if ( !is_persistent(pool) ) { - tmem_spin_lock(&eph_lists_spinlock); + spin_lock(&eph_lists_spinlock); list_add_tail(&pgp->global_eph_pages, &global_ephemeral_page_list); ++global_eph_count; list_add_tail(&pgp->us.client_eph_pages, &client->ephemeral_page_list); ++client->eph_count; - tmem_spin_unlock(&eph_lists_spinlock); + spin_unlock(&eph_lists_spinlock); } else { /* is_persistent */ - tmem_spin_lock(&pers_lists_spinlock); + spin_lock(&pers_lists_spinlock); list_add_tail(&pgp->us.pool_pers_pages, &pool->persistent_page_list); - tmem_spin_unlock(&pers_lists_spinlock); + spin_unlock(&pers_lists_spinlock); } if ( is_shared(pool) ) @@ -1484,7 +1467,7 @@ insert_page: obj->no_evict = 0; /* free the obj spinlock */ - tmem_spin_unlock(&obj->obj_spinlock); + spin_unlock(&obj->obj_spinlock); return 1; del_pgp_from_obj: @@ -1496,14 +1479,14 @@ free_pgp: unlock_obj: if ( newobj ) { - tmem_write_lock(&pool->pool_rwlock); + write_lock(&pool->pool_rwlock); obj_free(obj,0); - tmem_write_unlock(&pool->pool_rwlock); + write_unlock(&pool->pool_rwlock); } else { obj->no_evict = 0; - tmem_spin_unlock(&obj->obj_spinlock); + spin_unlock(&obj->obj_spinlock); } return ret; } @@ -1531,7 +1514,7 @@ static int do_tmem_get(struct tmem_pool *pool, struct oid *oidp, uint32_t index, if ( pgp == NULL ) { obj->no_evict = 0; - tmem_spin_unlock(&obj->obj_spinlock); + spin_unlock(&obj->obj_spinlock); return 0; } ASSERT(pgp->size != -1); @@ -1554,31 +1537,31 @@ static int do_tmem_get(struct tmem_pool *pool, struct oid *oidp, uint32_t index, pgp_delete(pgp,0); if ( obj->pgp_count == 0 ) { - tmem_write_lock(&pool->pool_rwlock); + write_lock(&pool->pool_rwlock); obj_free(obj,0); obj = NULL; - tmem_write_unlock(&pool->pool_rwlock); + write_unlock(&pool->pool_rwlock); } } else { - tmem_spin_lock(&eph_lists_spinlock); + spin_lock(&eph_lists_spinlock); list_del(&pgp->global_eph_pages); list_add_tail(&pgp->global_eph_pages,&global_ephemeral_page_list); list_del(&pgp->us.client_eph_pages); list_add_tail(&pgp->us.client_eph_pages,&client->ephemeral_page_list); - tmem_spin_unlock(&eph_lists_spinlock); + spin_unlock(&eph_lists_spinlock); obj->last_client = tmem_get_cli_id_from_current(); } } if ( obj != NULL ) { obj->no_evict = 0; - tmem_spin_unlock(&obj->obj_spinlock); + spin_unlock(&obj->obj_spinlock); } return 1; out: obj->no_evict = 0; - tmem_spin_unlock(&obj->obj_spinlock); + spin_unlock(&obj->obj_spinlock); return rc; } @@ -1594,18 +1577,18 @@ static int do_tmem_flush_page(struct tmem_pool *pool, struct oid *oidp, uint32_t if ( pgp == NULL ) { obj->no_evict = 0; - tmem_spin_unlock(&obj->obj_spinlock); + spin_unlock(&obj->obj_spinlock); goto out; } pgp_delete(pgp,0); if ( obj->pgp_count == 0 ) { - tmem_write_lock(&pool->pool_rwlock); + write_lock(&pool->pool_rwlock); obj_free(obj,0); - tmem_write_unlock(&pool->pool_rwlock); + write_unlock(&pool->pool_rwlock); } else { obj->no_evict = 0; - tmem_spin_unlock(&obj->obj_spinlock); + spin_unlock(&obj->obj_spinlock); } out: @@ -1622,9 +1605,9 @@ static int do_tmem_flush_object(struct tmem_pool *pool, struct oid *oidp) obj = obj_find(pool,oidp); if ( obj == NULL ) goto out; - tmem_write_lock(&pool->pool_rwlock); + write_lock(&pool->pool_rwlock); obj_destroy(obj,0); - tmem_write_unlock(&pool->pool_rwlock); + write_unlock(&pool->pool_rwlock); out: if ( pool->client->frozen ) @@ -2031,7 +2014,7 @@ static int tmemc_save_get_next_page(int cli_id, uint32_t pool_id, if ( bufsize < pagesize + sizeof(struct tmem_handle) ) return -ENOMEM; - tmem_spin_lock(&pers_lists_spinlock); + spin_lock(&pers_lists_spinlock); if ( list_empty(&pool->persistent_page_list) ) { ret = -1; @@ -2063,7 +2046,7 @@ static int tmemc_save_get_next_page(int cli_id, uint32_t pool_id, ret = do_tmem_get(pool, &oid, pgp->index, 0, buf); out: - tmem_spin_unlock(&pers_lists_spinlock); + spin_unlock(&pers_lists_spinlock); return ret; } @@ -2079,7 +2062,7 @@ static int tmemc_save_get_next_inv(int cli_id, tmem_cli_va_param_t buf, return 0; if ( bufsize < sizeof(struct tmem_handle) ) return 0; - tmem_spin_lock(&pers_lists_spinlock); + spin_lock(&pers_lists_spinlock); if ( list_empty(&client->persistent_invalidated_list) ) goto out; if ( client->cur_pgp == NULL ) @@ -2105,7 +2088,7 @@ static int tmemc_save_get_next_inv(int cli_id, tmem_cli_va_param_t buf, tmem_copy_to_client_buf(buf, &h, 1); ret = 1; out: - tmem_spin_unlock(&pers_lists_spinlock); + spin_unlock(&pers_lists_spinlock); return ret; } @@ -2226,7 +2209,7 @@ long do_tmem_op(tmem_cli_op_t uops) bool_t succ_get = 0, succ_put = 0; bool_t non_succ_get = 0, non_succ_put = 0; bool_t flush = 0, flush_obj = 0; - bool_t tmem_write_lock_set = 0, tmem_read_lock_set = 0; + bool_t write_lock_set = 0, read_lock_set = 0; if ( !tmem_initialized ) return -ENODEV; @@ -2234,47 +2217,31 @@ long do_tmem_op(tmem_cli_op_t uops) if ( !tmem_current_permitted() ) return -EPERM; - if ( tmem_lock_all ) - { - if ( tmem_lock_all > 1 ) - spin_lock_irq(&tmem_spinlock); - else - spin_lock(&tmem_spinlock); - } - if ( client != NULL && tmem_client_is_dying(client) ) - { - rc = -ENODEV; - if ( tmem_lock_all ) - goto out; - simple_error: - return rc; - } + return -ENODEV; if ( unlikely(tmem_get_tmemop_from_client(&op, uops) != 0) ) { tmem_client_err("tmem: can''t get tmem struct from %s\n", tmem_client_str); rc = -EFAULT; - if ( !tmem_lock_all ) - goto simple_error; - goto out; + return rc; } if ( op.cmd == TMEM_CONTROL ) { - tmem_write_lock(&tmem_rwlock); - tmem_write_lock_set = 1; + write_lock(&tmem_rwlock); + write_lock_set = 1; rc = do_tmem_control(&op); goto out; } else if ( op.cmd == TMEM_AUTH ) { - tmem_write_lock(&tmem_rwlock); - tmem_write_lock_set = 1; + write_lock(&tmem_rwlock); + write_lock_set = 1; rc = tmemc_shared_pool_auth(op.u.creat.arg1,op.u.creat.uuid[0], op.u.creat.uuid[1],op.u.creat.flags); goto out; } else if ( op.cmd == TMEM_RESTORE_NEW ) { - tmem_write_lock(&tmem_rwlock); - tmem_write_lock_set = 1; + write_lock(&tmem_rwlock); + write_lock_set = 1; rc = do_tmem_new_pool(op.u.creat.arg1, op.pool_id, op.u.creat.flags, op.u.creat.uuid[0], op.u.creat.uuid[1]); goto out; @@ -2283,8 +2250,8 @@ long do_tmem_op(tmem_cli_op_t uops) /* create per-client tmem structure dynamically on first use by client */ if ( client == NULL ) { - tmem_write_lock(&tmem_rwlock); - tmem_write_lock_set = 1; + write_lock(&tmem_rwlock); + write_lock_set = 1; if ( (client = client_create(tmem_get_cli_id_from_current())) == NULL ) { tmem_client_err("tmem: can''t create tmem structure for %s\n", @@ -2296,18 +2263,18 @@ long do_tmem_op(tmem_cli_op_t uops) if ( op.cmd == TMEM_NEW_POOL || op.cmd == TMEM_DESTROY_POOL ) { - if ( !tmem_write_lock_set ) + if ( !write_lock_set ) { - tmem_write_lock(&tmem_rwlock); - tmem_write_lock_set = 1; + write_lock(&tmem_rwlock); + write_lock_set = 1; } } else { - if ( !tmem_write_lock_set ) + if ( !write_lock_set ) { - tmem_read_lock(&tmem_rwlock); - tmem_read_lock_set = 1; + read_lock(&tmem_rwlock); + read_lock_set = 1; } if ( ((uint32_t)op.pool_id >= MAX_POOLS_PER_DOMAIN) || ((pool = client->pools[op.pool_id]) == NULL) ) @@ -2357,21 +2324,12 @@ long do_tmem_op(tmem_cli_op_t uops) } out: - if ( tmem_lock_all ) - { - if ( tmem_lock_all > 1 ) - spin_unlock_irq(&tmem_spinlock); - else - spin_unlock(&tmem_spinlock); - } else { - if ( tmem_write_lock_set ) - write_unlock(&tmem_rwlock); - else if ( tmem_read_lock_set ) - read_unlock(&tmem_rwlock); - else - ASSERT(0); - } - + if ( write_lock_set ) + write_unlock(&tmem_rwlock); + else if ( read_lock_set ) + read_unlock(&tmem_rwlock); + else + ASSERT(0); return rc; } @@ -2389,38 +2347,26 @@ void tmem_destroy(void *v) return; } - if ( tmem_lock_all ) - spin_lock(&tmem_spinlock); - else - write_lock(&tmem_rwlock); + write_lock(&tmem_rwlock); printk("tmem: flushing tmem pools for %s=%d\n", tmem_cli_id_str, client->cli_id); client_flush(client, 1); - if ( tmem_lock_all ) - spin_unlock(&tmem_spinlock); - else - write_unlock(&tmem_rwlock); + write_unlock(&tmem_rwlock); } /* freezing all pools guarantees that no additional memory will be consumed */ void tmem_freeze_all(unsigned char key) { static int freeze = 0; - - if ( tmem_lock_all ) - spin_lock(&tmem_spinlock); - else - write_lock(&tmem_rwlock); + + write_lock(&tmem_rwlock); freeze = !freeze; tmemc_freeze_pools(TMEM_CLI_ID_NULL,freeze); - if ( tmem_lock_all ) - spin_unlock(&tmem_spinlock); - else - write_unlock(&tmem_rwlock); + write_unlock(&tmem_rwlock); } #define MAX_EVICTS 10 /* should be variable or set via TMEMC_ ?? */ @@ -2442,12 +2388,7 @@ void *tmem_relinquish_pages(unsigned int order, unsigned int memflags) } if ( tmem_called_from_tmem(memflags) ) - { - if ( tmem_lock_all ) - spin_lock(&tmem_spinlock); - else - read_lock(&tmem_rwlock); - } + read_lock(&tmem_rwlock); while ( (pfp = tmem_alloc_page(NULL,1)) == NULL ) { @@ -2462,12 +2403,7 @@ void *tmem_relinquish_pages(unsigned int order, unsigned int memflags) } if ( tmem_called_from_tmem(memflags) ) - { - if ( tmem_lock_all ) - spin_unlock(&tmem_spinlock); - else - read_unlock(&tmem_rwlock); - } + read_unlock(&tmem_rwlock); return pfp; } @@ -2493,9 +2429,8 @@ static int __init init_tmem(void) if ( tmem_init() ) { - printk("tmem: initialized comp=%d dedup=%d tze=%d global-lock=%d\n", - tmem_compression_enabled(), tmem_dedup_enabled(), tmem_tze_enabled(), - tmem_lock_all); + printk("tmem: initialized comp=%d dedup=%d tze=%d\n", + tmem_compression_enabled(), tmem_dedup_enabled(), tmem_tze_enabled()); if ( tmem_dedup_enabled()&&tmem_compression_enabled()&&tmem_tze_enabled() ) { tmem_tze_disable(); diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c index fbd1acc..bc8e249 100644 --- a/xen/common/tmem_xen.c +++ b/xen/common/tmem_xen.c @@ -29,9 +29,6 @@ boolean_param("tmem_tze", opt_tmem_tze); bool_t __read_mostly opt_tmem_shared_auth = 0; boolean_param("tmem_shared_auth", opt_tmem_shared_auth); -int __read_mostly opt_tmem_lock = 0; -integer_param("tmem_lock", opt_tmem_lock); - atomic_t freeable_page_count = ATOMIC_INIT(0); /* these are a concurrency bottleneck, could be percpu and dynamically diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h index ae6acf3..9907575 100644 --- a/xen/include/xen/tmem_xen.h +++ b/xen/include/xen/tmem_xen.h @@ -34,11 +34,6 @@ extern spinlock_t tmem_page_list_lock; extern unsigned long tmem_page_list_pages; extern atomic_t freeable_page_count; -extern spinlock_t tmem_lock; -extern spinlock_t tmem_spinlock; -extern rwlock_t tmem_rwlock; - -extern void tmem_copy_page(char *to, char*from); extern int tmem_init(void); #define tmem_hash hash_long @@ -77,8 +72,6 @@ static inline bool_t tmem_enabled(void) return opt_tmem; } -extern int opt_tmem_lock; - /* * Memory free page list management */ @@ -182,7 +175,6 @@ static inline unsigned long tmem_free_mb(void) return (tmem_page_list_pages + total_free_pages()) >> (20 - PAGE_SHIFT); } -#define tmem_lock_all opt_tmem_lock #define tmem_called_from_tmem(_memflags) (_memflags & MEMF_tmem) /* "Client" (==domain) abstraction */ -- 1.7.10.4
There are two allocate path for each persistant and ephemeral pool. This path try to refactor those allocate/free functions with better name and more readable call layer. Also added more comment. Signed-off-by: Bob Liu <bob.liu@oracle.com> --- xen/common/tmem.c | 112 +++++++++++++++++++++++++++++++++++++------- xen/common/tmem_xen.c | 63 ------------------------- xen/include/xen/tmem_xen.h | 20 ++------ 3 files changed, 100 insertions(+), 95 deletions(-) diff --git a/xen/common/tmem.c b/xen/common/tmem.c index 67fa1ee..ee61899 100644 --- a/xen/common/tmem.c +++ b/xen/common/tmem.c @@ -166,6 +166,13 @@ static bool_t global_shared_auth = 0; static atomic_t client_weight_total = ATOMIC_INIT(0); static int tmem_initialized = 0; +struct xmem_pool *tmem_mempool = 0; +unsigned int tmem_mempool_maxalloc = 0; + +DEFINE_SPINLOCK(tmem_page_list_lock); +PAGE_LIST_HEAD(tmem_page_list); +unsigned long tmem_page_list_pages = 0; + /************ CONCURRENCY ***********************************************/ DEFINE_RWLOCK(tmem_rwlock); static DEFINE_SPINLOCK(eph_lists_spinlock); /* protects global AND clients */ @@ -177,7 +184,29 @@ static DEFINE_SPINLOCK(pers_lists_spinlock); /* global counters (should use long_atomic_t access) */ static long global_eph_count = 0; /* atomicity depends on eph_lists_spinlock */ -/************ MEMORY ALLOCATION INTERFACE *****************************/ +/* + * There two types of memory allocation interfaces in tmem. + * One is based on xmem_pool and the other is used for allocate a whole page. + * Both of them are based on the lowlevel function __tmem_alloc_page/_thispool(). + * The call trace of alloc path is like below. + * Persistant pool: + * 1.tmem_malloc() + * > xmem_pool_alloc() + * > tmem_persistent_pool_page_get() + * > __tmem_alloc_page_thispool() + * 2.tmem_alloc_page() + * > __tmem_alloc_page_thispool() + * + * Ephemeral pool: + * 1.tmem_malloc() + * > xmem_pool_alloc() + * > tmem_mempool_page_get() + * > __tmem_alloc_page() + * 2.tmem_alloc_page() + * > __tmem_alloc_page() + * + * The free path is done in the same manner. + */ static void *tmem_malloc(size_t size, struct tmem_pool *pool) { void *v = NULL; @@ -209,28 +238,76 @@ static void tmem_free(void *p, struct tmem_pool *pool) } } -static struct page_info *tmem_page_alloc(struct tmem_pool *pool) +static struct page_info *tmem_alloc_page(struct tmem_pool *pool) { struct page_info *pfp = NULL; if ( pool != NULL && is_persistent(pool) ) - pfp = tmem_alloc_page_thispool(pool->client->domain); + pfp = __tmem_alloc_page_thispool(pool->client->domain); else - pfp = tmem_alloc_page(pool,0); + pfp = __tmem_alloc_page(pool,0); return pfp; } -static void tmem_page_free(struct tmem_pool *pool, struct page_info *pfp) +static void tmem_free_page(struct tmem_pool *pool, struct page_info *pfp) { ASSERT(pfp); if ( pool == NULL || !is_persistent(pool) ) - tmem_free_page(pfp); + __tmem_free_page(pfp); else - tmem_free_page_thispool(pfp); + __tmem_free_page_thispool(pfp); } -/************ PAGE CONTENT DESCRIPTOR MANIPULATION ROUTINES ***********/ +static noinline void *tmem_mempool_page_get(unsigned long size) +{ + struct page_info *pi; + ASSERT(size == PAGE_SIZE); + if ( (pi = __tmem_alloc_page(NULL,0)) == NULL ) + return NULL; + ASSERT(IS_VALID_PAGE(pi)); + return page_to_virt(pi); +} + +static void tmem_mempool_page_put(void *page_va) +{ + ASSERT(IS_PAGE_ALIGNED(page_va)); + __tmem_free_page(virt_to_page(page_va)); +} + +static int __init tmem_mempool_init(void) +{ + tmem_mempool = xmem_pool_create("tmem", tmem_mempool_page_get, + tmem_mempool_page_put, PAGE_SIZE, 0, PAGE_SIZE); + if ( tmem_mempool ) + tmem_mempool_maxalloc = xmem_pool_maxalloc(tmem_mempool); + return tmem_mempool != NULL; +} + +/* persistent pools are per-domain */ +static void *tmem_persistent_pool_page_get(unsigned long size) +{ + struct page_info *pi; + struct domain *d = current->domain; + + ASSERT(size == PAGE_SIZE); + if ( (pi = __tmem_alloc_page_thispool(d)) == NULL ) + return NULL; + ASSERT(IS_VALID_PAGE(pi)); + return page_to_virt(pi); +} + +static void tmem_persistent_pool_page_put(void *page_va) +{ + struct page_info *pi; + + ASSERT(IS_PAGE_ALIGNED(page_va)); + pi = mfn_to_page(virt_to_mfn(page_va)); + ASSERT(IS_VALID_PAGE(pi)); + __tmem_free_page_thispool(pi); +} + +/************ PAGE CONTENT DESCRIPTOR MANIPULATION ROUTINES ***********/ #define NOT_SHAREABLE ((uint16_t)-1UL) static int pcd_copy_to_client(xen_pfn_t cmfn, struct tmem_page_descriptor *pgp) @@ -305,7 +382,7 @@ static void pcd_disassociate(struct tmem_page_descriptor *pgp, struct tmem_pool tmem_free(pcd_tze, pool); } else { /* real physical page */ - tmem_page_free(pool,pfp); + tmem_free_page(pool,pfp); } write_unlock(&pcd_tree_rwlocks[firstbyte]); } @@ -384,7 +461,7 @@ static int pcd_associate(struct tmem_page_descriptor *pgp, char *cdata, pagesize /* match! if not compressed, free the no-longer-needed page */ /* but if compressed, data is assumed static so don''t free! */ if ( cdata == NULL ) - tmem_page_free(pgp->us.obj->pool,pgp->pfp); + tmem_free_page(pgp->us.obj->pool,pgp->pfp); goto match; } } @@ -417,7 +494,7 @@ static int pcd_associate(struct tmem_page_descriptor *pgp, char *cdata, pagesize ((pcd->tze = tmem_malloc(pfp_size,pgp->us.obj->pool)) != NULL) ) { tmem_tze_copy_from_pfp(pcd->tze,pgp->pfp,pfp_size); pcd->size = pfp_size; - tmem_page_free(pgp->us.obj->pool,pgp->pfp); + tmem_free_page(pgp->us.obj->pool,pgp->pfp); } else { pcd->pfp = pgp->pfp; pcd->size = PAGE_SIZE; @@ -486,7 +563,7 @@ static void pgp_free_data(struct tmem_page_descriptor *pgp, struct tmem_pool *po else if ( pgp_size ) tmem_free(pgp->cdata, pool); else - tmem_page_free(pgp->us.obj->pool,pgp->pfp); + tmem_free_page(pgp->us.obj->pool,pgp->pfp); pgp->pfp = NULL; pgp->size = -1; } @@ -1258,7 +1335,7 @@ static int do_tmem_put_compress(struct tmem_page_descriptor *pgp, xen_pfn_t cmfn ret = tmem_compress_from_client(cmfn, &dst, &size, clibuf); if ( ret <= 0 ) goto out; - else if ( (size == 0) || (size >= tmem_subpage_maxsize()) ) { + else if ( (size == 0) || (size >= tmem_mempool_maxalloc) ) { ret = 0; goto out; } else if ( tmem_dedup_enabled() && !is_persistent(pgp->us.obj->pool) ) { @@ -1315,7 +1392,7 @@ static int do_tmem_dup_put(struct tmem_page_descriptor *pgp, xen_pfn_t cmfn, copy_uncompressed: if ( pgp->pfp ) pgp_free_data(pgp, pool); - if ( ( pgp->pfp = tmem_page_alloc(pool) ) == NULL ) + if ( ( pgp->pfp = tmem_alloc_page(pool) ) == NULL ) goto failed_dup; pgp->size = 0; ret = tmem_copy_from_client(pgp->pfp, cmfn, tmem_cli_buf_null); @@ -1427,7 +1504,7 @@ static int do_tmem_put(struct tmem_pool *pool, } copy_uncompressed: - if ( ( pgp->pfp = tmem_page_alloc(pool) ) == NULL ) + if ( ( pgp->pfp = tmem_alloc_page(pool) ) == NULL ) { ret = -ENOMEM; goto del_pgp_from_obj; @@ -2390,7 +2467,7 @@ void *tmem_relinquish_pages(unsigned int order, unsigned int memflags) if ( tmem_called_from_tmem(memflags) ) read_lock(&tmem_rwlock); - while ( (pfp = tmem_alloc_page(NULL,1)) == NULL ) + while ( (pfp = __tmem_alloc_page(NULL,1)) == NULL ) { if ( (max_evictions-- <= 0) || !tmem_evict()) break; @@ -2427,6 +2504,9 @@ static int __init init_tmem(void) rwlock_init(&pcd_tree_rwlocks[i]); } + if ( !tmem_mempool_init() ) + return 0; + if ( tmem_init() ) { printk("tmem: initialized comp=%d dedup=%d tze=%d\n", diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c index bc8e249..5ef131b 100644 --- a/xen/common/tmem_xen.c +++ b/xen/common/tmem_xen.c @@ -238,67 +238,7 @@ int tmem_copy_tze_to_client(xen_pfn_t cmfn, void *tmem_va, return 1; } -/****************** XEN-SPECIFIC MEMORY ALLOCATION ********************/ - -struct xmem_pool *tmem_mempool = 0; -unsigned int tmem_mempool_maxalloc = 0; - -DEFINE_SPINLOCK(tmem_page_list_lock); -PAGE_LIST_HEAD(tmem_page_list); -unsigned long tmem_page_list_pages = 0; - -static noinline void *tmem_mempool_page_get(unsigned long size) -{ - struct page_info *pi; - - ASSERT(size == PAGE_SIZE); - if ( (pi = tmem_alloc_page(NULL,0)) == NULL ) - return NULL; - ASSERT(IS_VALID_PAGE(pi)); - return page_to_virt(pi); -} - -static void tmem_mempool_page_put(void *page_va) -{ - ASSERT(IS_PAGE_ALIGNED(page_va)); - tmem_free_page(virt_to_page(page_va)); -} - -static int __init tmem_mempool_init(void) -{ - tmem_mempool = xmem_pool_create("tmem", tmem_mempool_page_get, - tmem_mempool_page_put, PAGE_SIZE, 0, PAGE_SIZE); - if ( tmem_mempool ) - tmem_mempool_maxalloc = xmem_pool_maxalloc(tmem_mempool); - return tmem_mempool != NULL; -} - -/* persistent pools are per-domain */ - -void *tmem_persistent_pool_page_get(unsigned long size) -{ - struct page_info *pi; - struct domain *d = current->domain; - - ASSERT(size == PAGE_SIZE); - if ( (pi = tmem_alloc_page_thispool(d)) == NULL ) - return NULL; - ASSERT(IS_VALID_PAGE(pi)); - return page_to_virt(pi); -} - -void tmem_persistent_pool_page_put(void *page_va) -{ - struct page_info *pi; - - ASSERT(IS_PAGE_ALIGNED(page_va)); - pi = mfn_to_page(virt_to_mfn(page_va)); - ASSERT(IS_VALID_PAGE(pi)); - tmem_free_page_thispool(pi); -} - /****************** XEN-SPECIFIC HOST INITIALIZATION ********************/ - static int dstmem_order, workmem_order; static int cpu_callback( @@ -351,9 +291,6 @@ int __init tmem_init(void) { unsigned int cpu; - if ( !tmem_mempool_init() ) - return 0; - dstmem_order = get_order_from_pages(LZO_DSTMEM_PAGES); workmem_order = get_order_from_bytes(LZO1X_1_MEM_COMPRESS); diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h index 9907575..7468c28 100644 --- a/xen/include/xen/tmem_xen.h +++ b/xen/include/xen/tmem_xen.h @@ -27,8 +27,6 @@ typedef uint32_t pagesize_t; /* like size_t, must handle largest PAGE_SIZE */ ((void *)((((unsigned long)addr + (PAGE_SIZE - 1)) & PAGE_MASK)) == addr) #define IS_VALID_PAGE(_pi) ( mfn_valid(page_to_mfn(_pi)) ) -extern struct xmem_pool *tmem_mempool; -extern unsigned int tmem_mempool_maxalloc; extern struct page_list_head tmem_page_list; extern spinlock_t tmem_page_list_lock; extern unsigned long tmem_page_list_pages; @@ -100,7 +98,7 @@ static inline void tmem_page_list_put(struct page_info *pi) /* * Memory allocation for persistent data */ -static inline struct page_info *tmem_alloc_page_thispool(struct domain *d) +static inline struct page_info *__tmem_alloc_page_thispool(struct domain *d) { struct page_info *pi; @@ -128,7 +126,7 @@ out: return pi; } -static inline void tmem_free_page_thispool(struct page_info *pi) +static inline void __tmem_free_page_thispool(struct page_info *pi) { struct domain *d = page_get_owner(pi); @@ -146,7 +144,7 @@ static inline void tmem_free_page_thispool(struct page_info *pi) /* * Memory allocation for ephemeral (non-persistent) data */ -static inline struct page_info *tmem_alloc_page(void *pool, int no_heap) +static inline struct page_info *__tmem_alloc_page(void *pool, int no_heap) { struct page_info *pi = tmem_page_list_get(); @@ -158,18 +156,13 @@ static inline struct page_info *tmem_alloc_page(void *pool, int no_heap) return pi; } -static inline void tmem_free_page(struct page_info *pi) +static inline void __tmem_free_page(struct page_info *pi) { ASSERT(IS_VALID_PAGE(pi)); tmem_page_list_put(pi); atomic_dec(&freeable_page_count); } -static inline unsigned int tmem_subpage_maxsize(void) -{ - return tmem_mempool_maxalloc; -} - static inline unsigned long tmem_free_mb(void) { return (tmem_page_list_pages + total_free_pages()) >> (20 - PAGE_SHIFT); @@ -373,17 +366,12 @@ static inline void tmem_copy_to_client_buf_offset(tmem_cli_va_param_t clibuf, int tmem_decompress_to_client(xen_pfn_t, void *, size_t, tmem_cli_va_param_t); - int tmem_compress_from_client(xen_pfn_t, void **, size_t *, tmem_cli_va_param_t); int tmem_copy_from_client(struct page_info *, xen_pfn_t, tmem_cli_va_param_t); - int tmem_copy_to_client(xen_pfn_t, struct page_info *, tmem_cli_va_param_t); - extern int tmem_copy_tze_to_client(xen_pfn_t cmfn, void *tmem_va, pagesize_t len); -extern void *tmem_persistent_pool_page_get(unsigned long size); -extern void tmem_persistent_pool_page_put(void *page_va); #define tmem_client_err(fmt, args...) printk(XENLOG_G_ERR fmt, ##args) #define tmem_client_warn(fmt, args...) printk(XENLOG_G_WARNING fmt, ##args) -- 1.7.10.4
Bob Liu
2013-Nov-20 08:46 UTC
[PATCH 13/16] tmem: cleanup: __tmem_alloc_page: drop unneed parameters
The two parameters of __tmem_alloc_page() can be reduced. tmem_called_from_tmem() was also dropped by this patch. Signed-off-by: Bob Liu <bob.liu@oracle.com> --- xen/common/tmem.c | 13 +++++-------- xen/include/xen/tmem_xen.h | 11 +++++------ 2 files changed, 10 insertions(+), 14 deletions(-) diff --git a/xen/common/tmem.c b/xen/common/tmem.c index ee61899..bf0fa1b 100644 --- a/xen/common/tmem.c +++ b/xen/common/tmem.c @@ -245,7 +245,7 @@ static struct page_info *tmem_alloc_page(struct tmem_pool *pool) if ( pool != NULL && is_persistent(pool) ) pfp = __tmem_alloc_page_thispool(pool->client->domain); else - pfp = __tmem_alloc_page(pool,0); + pfp = __tmem_alloc_page(); return pfp; } @@ -263,9 +263,8 @@ static noinline void *tmem_mempool_page_get(unsigned long size) struct page_info *pi; ASSERT(size == PAGE_SIZE); - if ( (pi = __tmem_alloc_page(NULL,0)) == NULL ) + if ( (pi = __tmem_alloc_page()) == NULL ) return NULL; - ASSERT(IS_VALID_PAGE(pi)); return page_to_virt(pi); } @@ -2450,7 +2449,6 @@ void tmem_freeze_all(unsigned char key) void *tmem_relinquish_pages(unsigned int order, unsigned int memflags) { struct page_info *pfp; - unsigned long evicts_per_relinq = 0; int max_evictions = 10; if (!tmem_enabled() || !tmem_freeable_pages()) @@ -2464,14 +2462,13 @@ void *tmem_relinquish_pages(unsigned int order, unsigned int memflags) return NULL; } - if ( tmem_called_from_tmem(memflags) ) + if ( memflags & MEMF_tmem ) read_lock(&tmem_rwlock); - while ( (pfp = __tmem_alloc_page(NULL,1)) == NULL ) + while ( (pfp = tmem_page_list_get()) == NULL ) { if ( (max_evictions-- <= 0) || !tmem_evict()) break; - evicts_per_relinq++; } if ( pfp != NULL ) { @@ -2479,7 +2476,7 @@ void *tmem_relinquish_pages(unsigned int order, unsigned int memflags) scrub_one_page(pfp); } - if ( tmem_called_from_tmem(memflags) ) + if ( memflags & MEMF_tmem ) read_unlock(&tmem_rwlock); return pfp; diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h index 7468c28..c4b9f5a 100644 --- a/xen/include/xen/tmem_xen.h +++ b/xen/include/xen/tmem_xen.h @@ -144,15 +144,16 @@ static inline void __tmem_free_page_thispool(struct page_info *pi) /* * Memory allocation for ephemeral (non-persistent) data */ -static inline struct page_info *__tmem_alloc_page(void *pool, int no_heap) +static inline struct page_info *__tmem_alloc_page(void) { struct page_info *pi = tmem_page_list_get(); - if ( pi == NULL && !no_heap ) + if ( pi == NULL) pi = alloc_domheap_pages(0,0,MEMF_tmem); - ASSERT((pi == NULL) || IS_VALID_PAGE(pi)); - if ( pi != NULL && !no_heap ) + + if ( pi ) atomic_inc(&freeable_page_count); + ASSERT((pi == NULL) || IS_VALID_PAGE(pi)); return pi; } @@ -168,8 +169,6 @@ static inline unsigned long tmem_free_mb(void) return (tmem_page_list_pages + total_free_pages()) >> (20 - PAGE_SHIFT); } -#define tmem_called_from_tmem(_memflags) (_memflags & MEMF_tmem) - /* "Client" (==domain) abstraction */ struct client; -- 1.7.10.4
Bob Liu
2013-Nov-20 08:46 UTC
[PATCH 14/16] tmem: cleanup: drop useless functions from head file
They are several one line functions in tmem_xen.h which are useless, this patch embeded them into tmem.c directly. Signed-off-by: Bob Liu <bob.liu@oracle.com> --- xen/common/tmem.c | 22 +++++++++++----------- xen/include/xen/tmem_xen.h | 27 --------------------------- 2 files changed, 11 insertions(+), 38 deletions(-) diff --git a/xen/common/tmem.c b/xen/common/tmem.c index bf0fa1b..2e807d4 100644 --- a/xen/common/tmem.c +++ b/xen/common/tmem.c @@ -1212,7 +1212,7 @@ obj_unlock: static int tmem_evict(void) { - struct client *client = tmem_client_from_current(); + struct client *client = (struct client *)current->domain->tmem; struct tmem_page_descriptor *pgp = NULL, *pgp2, *pgp_del; struct tmem_object_root *obj; struct tmem_pool *pool; @@ -1625,7 +1625,7 @@ static int do_tmem_get(struct tmem_pool *pool, struct oid *oidp, uint32_t index, list_del(&pgp->us.client_eph_pages); list_add_tail(&pgp->us.client_eph_pages,&client->ephemeral_page_list); spin_unlock(&eph_lists_spinlock); - obj->last_client = tmem_get_cli_id_from_current(); + obj->last_client = current->domain->domain_id; } } if ( obj != NULL ) @@ -1694,7 +1694,7 @@ out: static int do_tmem_destroy_pool(uint32_t pool_id) { - struct client *client = tmem_client_from_current(); + struct client *client = (struct client *)current->domain->tmem; struct tmem_pool *pool; if ( client->pools == NULL ) @@ -1725,7 +1725,7 @@ static int do_tmem_new_pool(domid_t this_cli_id, int i; if ( this_cli_id == TMEM_CLI_ID_NULL ) - cli_id = tmem_get_cli_id_from_current(); + cli_id = current->domain->domain_id; else cli_id = this_cli_id; tmem_client_info("tmem: allocating %s-%s tmem pool for %s=%d...", @@ -1766,7 +1766,7 @@ static int do_tmem_new_pool(domid_t this_cli_id, } else { - client = tmem_client_from_current(); + client = (struct client *)current->domain->tmem; ASSERT(client != NULL); for ( d_poolid = 0; d_poolid < MAX_POOLS_PER_DOMAIN; d_poolid++ ) if ( client->pools[d_poolid] == NULL ) @@ -2206,7 +2206,7 @@ static int do_tmem_control(struct tmem_op *op) uint32_t subop = op->u.ctrl.subop; struct oid *oidp = (struct oid *)(&op->u.ctrl.oid[0]); - if (!tmem_current_is_privileged()) + if ( xsm_tmem_control(XSM_PRIV) ) return -EPERM; switch(subop) @@ -2278,7 +2278,7 @@ static int do_tmem_control(struct tmem_op *op) long do_tmem_op(tmem_cli_op_t uops) { struct tmem_op op; - struct client *client = tmem_client_from_current(); + struct client *client = (struct client *)current->domain->tmem; struct tmem_pool *pool = NULL; struct oid *oidp; int rc = 0; @@ -2290,10 +2290,10 @@ long do_tmem_op(tmem_cli_op_t uops) if ( !tmem_initialized ) return -ENODEV; - if ( !tmem_current_permitted() ) + if ( xsm_tmem_op(XSM_HOOK) ) return -EPERM; - if ( client != NULL && tmem_client_is_dying(client) ) + if ( client != NULL && client->domain->is_dying ) return -ENODEV; if ( unlikely(tmem_get_tmemop_from_client(&op, uops) != 0) ) @@ -2328,7 +2328,7 @@ long do_tmem_op(tmem_cli_op_t uops) { write_lock(&tmem_rwlock); write_lock_set = 1; - if ( (client = client_create(tmem_get_cli_id_from_current())) == NULL ) + if ( (client = client_create(current->domain->domain_id)) == NULL ) { tmem_client_err("tmem: can''t create tmem structure for %s\n", tmem_client_str); @@ -2417,7 +2417,7 @@ void tmem_destroy(void *v) if ( client == NULL ) return; - if ( !tmem_client_is_dying(client) ) + if ( !client->domain->is_dying ) { printk("tmem: tmem_destroy can only destroy dying client\n"); return; diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h index c4b9f5a..bf3be62 100644 --- a/xen/include/xen/tmem_xen.h +++ b/xen/include/xen/tmem_xen.h @@ -183,33 +183,6 @@ static inline struct client *tmem_client_from_cli_id(domid_t cli_id) return c; } -static inline struct client *tmem_client_from_current(void) -{ - return (struct client *)(current->domain->tmem); -} - -#define tmem_client_is_dying(_client) (!!_client->domain->is_dying) - -static inline domid_t tmem_get_cli_id_from_current(void) -{ - return current->domain->domain_id; -} - -static inline struct domain *tmem_get_cli_ptr_from_current(void) -{ - return current->domain; -} - -static inline bool_t tmem_current_permitted(void) -{ - return !xsm_tmem_op(XSM_HOOK); -} - -static inline bool_t tmem_current_is_privileged(void) -{ - return !xsm_tmem_control(XSM_PRIV); -} - static inline uint8_t tmem_get_first_byte(struct page_info *pfp) { void *p = __map_domain_page(pfp); -- 1.7.10.4
Bob Liu
2013-Nov-20 08:46 UTC
[PATCH 15/16] tmem: refator function tmem_ensure_avail_pages()
tmem_ensure_avail_pages() doesn''t return a value which is incorrect because the caller need to confirm there is enough memory. Signed-off-by: Bob Liu <bob.liu@oracle.com> --- xen/common/tmem.c | 43 +++++++++++++++++++------------------------ xen/include/xen/tmem_xen.h | 6 ------ 2 files changed, 19 insertions(+), 30 deletions(-) diff --git a/xen/common/tmem.c b/xen/common/tmem.c index 2e807d4..0bc33ee 100644 --- a/xen/common/tmem.c +++ b/xen/common/tmem.c @@ -1296,22 +1296,28 @@ static unsigned long tmem_relinquish_npages(unsigned long n) return avail_pages; } -/* Under certain conditions (e.g. if each client is putting pages for exactly +/* + * Under certain conditions (e.g. if each client is putting pages for exactly * one object), once locks are held, freeing up memory may * result in livelocks and very long "put" times, so we try to ensure there * is a minimum amount of memory (1MB) available BEFORE any data structure - * locks are held */ -static inline void tmem_ensure_avail_pages(void) + * locks are held. + */ +static inline bool_t tmem_ensure_avail_pages(void) { int failed_evict = 10; + unsigned long free_mem; - while ( !tmem_free_mb() ) - { - if ( tmem_evict() ) - continue; - else if ( failed_evict-- <= 0 ) - break; - } + do { + free_mem = (tmem_page_list_pages + total_free_pages()) + >> (20 - PAGE_SHIFT); + if ( free_mem ) + return 1; + if ( !tmem_evict() ) + failed_evict--; + } while ( failed_evict > 0 ); + + return 0; } /************ TMEM CORE OPERATIONS ************************************/ @@ -2282,9 +2288,6 @@ long do_tmem_op(tmem_cli_op_t uops) struct tmem_pool *pool = NULL; struct oid *oidp; int rc = 0; - bool_t succ_get = 0, succ_put = 0; - bool_t non_succ_get = 0, non_succ_put = 0; - bool_t flush = 0, flush_obj = 0; bool_t write_lock_set = 0, read_lock_set = 0; if ( !tmem_initialized ) @@ -2299,8 +2302,7 @@ long do_tmem_op(tmem_cli_op_t uops) if ( unlikely(tmem_get_tmemop_from_client(&op, uops) != 0) ) { tmem_client_err("tmem: can''t get tmem struct from %s\n", tmem_client_str); - rc = -EFAULT; - return rc; + return -EFAULT; } if ( op.cmd == TMEM_CONTROL ) @@ -2369,28 +2371,21 @@ long do_tmem_op(tmem_cli_op_t uops) op.u.creat.uuid[0], op.u.creat.uuid[1]); break; case TMEM_PUT_PAGE: - tmem_ensure_avail_pages(); - rc = do_tmem_put(pool, oidp, op.u.gen.index, op.u.gen.cmfn, + if ( tmem_ensure_avail_pages() ) + rc = do_tmem_put(pool, oidp, op.u.gen.index, op.u.gen.cmfn, tmem_cli_buf_null); - if (rc == 1) succ_put = 1; - else non_succ_put = 1; break; case TMEM_GET_PAGE: rc = do_tmem_get(pool, oidp, op.u.gen.index, op.u.gen.cmfn, tmem_cli_buf_null); - if (rc == 1) succ_get = 1; - else non_succ_get = 1; break; case TMEM_FLUSH_PAGE: - flush = 1; rc = do_tmem_flush_page(pool, oidp, op.u.gen.index); break; case TMEM_FLUSH_OBJECT: rc = do_tmem_flush_object(pool, oidp); - flush_obj = 1; break; case TMEM_DESTROY_POOL: - flush = 1; rc = do_tmem_destroy_pool(op.pool_id); break; default: diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h index bf3be62..258e8e7 100644 --- a/xen/include/xen/tmem_xen.h +++ b/xen/include/xen/tmem_xen.h @@ -164,13 +164,7 @@ static inline void __tmem_free_page(struct page_info *pi) atomic_dec(&freeable_page_count); } -static inline unsigned long tmem_free_mb(void) -{ - return (tmem_page_list_pages + total_free_pages()) >> (20 - PAGE_SHIFT); -} - /* "Client" (==domain) abstraction */ - struct client; static inline struct client *tmem_client_from_cli_id(domid_t cli_id) { -- 1.7.10.4
Bob Liu
2013-Nov-20 08:46 UTC
[PATCH 16/16] tmem: cleanup: rename tmem_relinquish_npages()
Rename tmem_relinquish_npages() to tmem_flush_npages() to distinguish it from tmem_relinquish_pages(). Signed-off-by: Bob Liu <bob.liu@oracle.com> --- xen/common/tmem.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/xen/common/tmem.c b/xen/common/tmem.c index 0bc33ee..a09e20f 100644 --- a/xen/common/tmem.c +++ b/xen/common/tmem.c @@ -1270,7 +1270,7 @@ out: return ret; } -static unsigned long tmem_relinquish_npages(unsigned long n) +static unsigned long tmem_flush_npages(unsigned long n) { unsigned long avail_pages = 0; @@ -1886,7 +1886,7 @@ static int tmemc_flush_mem(domid_t cli_id, uint32_t kb) } /* convert kb to pages, rounding up if necessary */ npages = (kb + ((1 << (PAGE_SHIFT-10))-1)) >> (PAGE_SHIFT-10); - flushed_pages = tmem_relinquish_npages(npages); + flushed_pages = tmem_flush_npages(npages); flushed_kb = flushed_pages << (PAGE_SHIFT-10); return flushed_kb; } -- 1.7.10.4
>>> On 20.11.13 at 09:46, Bob Liu <lliubbo@gmail.com> wrote:(There''s no overview mail [i.e. 00/16] here, so in its absence I''m replying here.) Isn''t this a re-send of something you sent before, and that Konrad had sent a pull request for? This should be made clear, either by using "RESEND" after the PATCH tag in the subject, or by tagging on a v2 there. Also, didn''t Konrad ack that whole series (in which case your resend should include these acks)? With the way it is now, I don''t know what to do with it. Jan
On 11/20/2013 05:08 PM, Jan Beulich wrote:>>>> On 20.11.13 at 09:46, Bob Liu <lliubbo@gmail.com> wrote: > > (There''s no overview mail [i.e. 00/16] here, so in its absence I''m > replying here.)I''m sorry for forgetting that!> > Isn''t this a re-send of something you sent before, and that KonradNo, this is not re-send. This series are further cleanups based on my previous [00/11]. And I''m afraid I''ll continue to send out many patches because there are still many work needed to finish in order to make tmem readable and maintainable. -- Thanks, -Bob
>>> On 20.11.13 at 10:19, Bob Liu <bob.liu@oracle.com> wrote: > On 11/20/2013 05:08 PM, Jan Beulich wrote: >> Isn''t this a re-send of something you sent before, and that Konrad > > No, this is not re-send. > This series are further cleanups based on my previous [00/11].In which case you should have made clear that its relation to the earlier series (i.e. I assume this new one goes on top of the previous one).> And I''m afraid I''ll continue to send out many patches because there are > still many work needed to finish in order to make tmem readable and > maintainable.Fine with me, albeit applying all those will (if I''m the one to do it) be handled with relatively low priority. Jan
Konrad Rzeszutek Wilk
2013-Nov-20 13:51 UTC
Re: [PATCH 01/16] tmem: cleanup: drop some debug code
On Wed, Nov 20, 2013 at 09:25:58AM +0000, Jan Beulich wrote:> >>> On 20.11.13 at 10:19, Bob Liu <bob.liu@oracle.com> wrote: > > On 11/20/2013 05:08 PM, Jan Beulich wrote: > >> Isn''t this a re-send of something you sent before, and that Konrad > > > > No, this is not re-send. > > This series are further cleanups based on my previous [00/11]. > > In which case you should have made clear that its relation to > the earlier series (i.e. I assume this new one goes on top of the > previous one). > > > And I''m afraid I''ll continue to send out many patches because there are > > still many work needed to finish in order to make tmem readable and > > maintainable. > > Fine with me, albeit applying all those will (if I''m the one to do it) > be handled with relatively low priority.Jan, if you are the one that will commit them - could you please wait for: a) either my Ack on them or, b) when I send a GIT PULL? Thank you!
>>> On 20.11.13 at 14:51, Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> wrote: > On Wed, Nov 20, 2013 at 09:25:58AM +0000, Jan Beulich wrote: >> >>> On 20.11.13 at 10:19, Bob Liu <bob.liu@oracle.com> wrote: >> > On 11/20/2013 05:08 PM, Jan Beulich wrote: >> >> Isn''t this a re-send of something you sent before, and that Konrad >> > >> > No, this is not re-send. >> > This series are further cleanups based on my previous [00/11]. >> >> In which case you should have made clear that its relation to >> the earlier series (i.e. I assume this new one goes on top of the >> previous one). >> >> > And I''m afraid I''ll continue to send out many patches because there are >> > still many work needed to finish in order to make tmem readable and >> > maintainable. >> >> Fine with me, albeit applying all those will (if I''m the one to do it) >> be handled with relatively low priority. > > Jan, if you are the one that will commit them - could you please wait for: > a) either my Ack on them or, b) when I send a GIT PULL?Of course - did my reply sound as if I meant to skip you? Jan
Konrad Rzeszutek Wilk
2013-Nov-20 18:46 UTC
Re: [PATCH 01/16] tmem: cleanup: drop some debug code
On Wed, Nov 20, 2013 at 02:21:49PM +0000, Jan Beulich wrote:> >>> On 20.11.13 at 14:51, Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> wrote: > > On Wed, Nov 20, 2013 at 09:25:58AM +0000, Jan Beulich wrote: > >> >>> On 20.11.13 at 10:19, Bob Liu <bob.liu@oracle.com> wrote: > >> > On 11/20/2013 05:08 PM, Jan Beulich wrote: > >> >> Isn''t this a re-send of something you sent before, and that Konrad > >> > > >> > No, this is not re-send. > >> > This series are further cleanups based on my previous [00/11]. > >> > >> In which case you should have made clear that its relation to > >> the earlier series (i.e. I assume this new one goes on top of the > >> previous one). > >> > >> > And I''m afraid I''ll continue to send out many patches because there are > >> > still many work needed to finish in order to make tmem readable and > >> > maintainable. > >> > >> Fine with me, albeit applying all those will (if I''m the one to do it) > >> be handled with relatively low priority. > > > > Jan, if you are the one that will commit them - could you please wait for: > > a) either my Ack on them or, b) when I send a GIT PULL? > > Of course - did my reply sound as if I meant to skip you?You did not mention whether you would or would not. Hence the extra pre-caution on my side. Thanks!
Konrad Rzeszutek Wilk
2013-Nov-22 17:38 UTC
Re: [PATCH 03/16] tmem: cleanup: rm unused tmem_op
On Wed, Nov 20, 2013 at 04:46:12PM +0800, Bob Liu wrote:> TMEM_READ/TMEM_WRITE/TMEM_XCHG/TMEM_NEW_PAGE are never be used.You also need to delete them in include/public/tmem.h and rev up the TMEM_SPEC_VERSION I think?> > Signed-off-by: Bob Liu <bob.liu@oracle.com> > --- > xen/common/tmem.c | 21 --------------------- > 1 file changed, 21 deletions(-) > > diff --git a/xen/common/tmem.c b/xen/common/tmem.c > index 081772e..c272930 100644 > --- a/xen/common/tmem.c > +++ b/xen/common/tmem.c > @@ -2646,11 +2646,6 @@ EXPORT long do_tmem_op(tmem_cli_op_t uops) > rc = do_tmem_new_pool(TMEM_CLI_ID_NULL, 0, op.u.creat.flags, > op.u.creat.uuid[0], op.u.creat.uuid[1]); > break; > - case TMEM_NEW_PAGE: > - tmem_ensure_avail_pages(); > - rc = do_tmem_put(pool, oidp, op.u.gen.index, op.u.gen.cmfn, 0, 0, 0, > - tmem_cli_buf_null); > - break; > case TMEM_PUT_PAGE: > tmem_ensure_avail_pages(); > rc = do_tmem_put(pool, oidp, op.u.gen.index, op.u.gen.cmfn, 0, 0, > @@ -2676,22 +2671,6 @@ EXPORT long do_tmem_op(tmem_cli_op_t uops) > flush = 1; > rc = do_tmem_destroy_pool(op.pool_id); > break; > - case TMEM_READ: > - rc = do_tmem_get(pool, oidp, op.u.gen.index, op.u.gen.cmfn, > - op.u.gen.tmem_offset, op.u.gen.pfn_offset, > - op.u.gen.len, tmem_cli_buf_null); > - break; > - case TMEM_WRITE: > - rc = do_tmem_put(pool, oidp, > - op.u.gen.index, op.u.gen.cmfn, > - op.u.gen.tmem_offset, op.u.gen.pfn_offset, > - op.u.gen.len, tmem_cli_buf_null); > - break; > - case TMEM_XCHG: > - /* need to hold global lock to ensure xchg is atomic */ > - tmem_client_warn("tmem_xchg op not implemented yet\n"); > - rc = 0; > - break;Hm, they end up at the default which has ''rc=0''. That is incorrect. It should actually be -ENOSYS.> default: > tmem_client_warn("tmem: op %d not implemented\n", op.cmd); > rc = 0; > -- > 1.7.10.4 >
Konrad Rzeszutek Wilk
2013-Nov-22 17:54 UTC
Re: [PATCH 04/16] tmem: cleanup: rm unneeded parameters from put path
On Wed, Nov 20, 2013 at 04:46:13PM +0800, Bob Liu wrote:> tmem only takes a page as unit, so parameters tmem_offset, pfn_offset and len > are meanless, this patch remove those parameters from do_tmem_put() related path^^^^^^^^ ^-s Not exactly right. They were used by TMEM_WRITE and TMEM_READ, but your other patch "tmem: cleanup: rm unused tmem_op" removed them. Looking a bit on Google I see this thread: http://marc.info/?l=xen-devel&m=136975816209598 which implies that the Windows GPL driver is using it? Did you check its code? (See http://xenbits.xen.org/ext/win-pvdrivers/) If it is not in the code, can you also CC James Harper on it so he knows not use those ops?> > Signed-off-by: Bob Liu <bob.liu@oracle.com> > --- > xen/common/tmem.c | 32 +++++++++++++++----------------- > xen/common/tmem_xen.c | 27 +++++---------------------- > xen/include/xen/tmem_xen.h | 3 +-- > 3 files changed, 21 insertions(+), 41 deletions(-) > > diff --git a/xen/common/tmem.c b/xen/common/tmem.c > index c272930..e2e69bf 100644 > --- a/xen/common/tmem.c > +++ b/xen/common/tmem.c > @@ -1421,7 +1421,6 @@ out: > } > > static int do_tmem_dup_put(struct tmem_page_descriptor *pgp, xen_pfn_t cmfn, > - pagesize_t tmem_offset, pagesize_t pfn_offset, pagesize_t len, > tmem_cli_va_param_t clibuf) > { > struct tmem_pool *pool; > @@ -1442,7 +1441,7 @@ static int do_tmem_dup_put(struct tmem_page_descriptor *pgp, xen_pfn_t cmfn, > if ( client->live_migrating ) > goto failed_dup; /* no dups allowed when migrating */ > /* can we successfully manipulate pgp to change out the data? */ > - if ( len != 0 && client->compress && pgp->size != 0 ) > + if ( client->compress && pgp->size != 0 ) > { > ret = do_tmem_put_compress(pgp, cmfn, clibuf); > if ( ret == 1 ) > @@ -1461,9 +1460,7 @@ copy_uncompressed: > if ( ( pgp->pfp = tmem_page_alloc(pool) ) == NULL ) > goto failed_dup; > pgp->size = 0; > - /* tmem_copy_from_client properly handles len==0 and offsets != 0 */ > - ret = tmem_copy_from_client(pgp->pfp, cmfn, tmem_offset, pfn_offset, len, > - tmem_cli_buf_null); > + ret = tmem_copy_from_client(pgp->pfp, cmfn, tmem_cli_buf_null); > if ( ret < 0 ) > goto bad_copy; > if ( tmem_dedup_enabled() && !is_persistent(pool) ) > @@ -1509,11 +1506,9 @@ cleanup: > return ret; > } > > - > static int do_tmem_put(struct tmem_pool *pool, > struct oid *oidp, uint32_t index, > - xen_pfn_t cmfn, pagesize_t tmem_offset, > - pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_param_t clibuf) > + xen_pfn_t cmfn, tmem_cli_va_param_t clibuf) > { > struct tmem_object_root *obj = NULL, *objfound = NULL, *objnew = NULL; > struct tmem_page_descriptor *pgp = NULL, *pgpdel = NULL; > @@ -1527,8 +1522,7 @@ static int do_tmem_put(struct tmem_pool *pool, > { > ASSERT_SPINLOCK(&objfound->obj_spinlock); > if ((pgp = pgp_lookup_in_obj(objfound, index)) != NULL) > - return do_tmem_dup_put(pgp, cmfn, tmem_offset, pfn_offset, len, > - clibuf); > + return do_tmem_dup_put(pgp, cmfn, clibuf); > } > > /* no puts allowed into a frozen pool (except dup puts) */ > @@ -1560,7 +1554,7 @@ static int do_tmem_put(struct tmem_pool *pool, > pgp->index = index; > pgp->size = 0; > > - if ( len != 0 && client->compress ) > + if ( client->compress ) > { > ASSERT(pgp->pfp == NULL); > ret = do_tmem_put_compress(pgp, cmfn, clibuf); > @@ -1586,9 +1580,7 @@ copy_uncompressed: > ret = -ENOMEM; > goto delete_and_free; > } > - /* tmem_copy_from_client properly handles len==0 (TMEM_NEW_PAGE) */ > - ret = tmem_copy_from_client(pgp->pfp, cmfn, tmem_offset, pfn_offset, len, > - clibuf); > + ret = tmem_copy_from_client(pgp->pfp, cmfn, clibuf); > if ( ret < 0 ) > goto bad_copy; > if ( tmem_dedup_enabled() && !is_persistent(pool) ) > @@ -2440,10 +2432,17 @@ static int tmemc_restore_put_page(int cli_id, uint32_t pool_id, struct oid *oidp > struct client *client = tmem_client_from_cli_id(cli_id); > struct tmem_pool *pool = (client == NULL || pool_id >= MAX_POOLS_PER_DOMAIN) > ? NULL : client->pools[pool_id]; > + unsigned int pagesize; > > if ( pool == NULL ) > return -1; > - return do_tmem_put(pool, oidp, index, 0, 0, 0, bufsize, buf); > + pagesize = 1 << (pool->pageshift + 12);Ahem! 12? #define please.> + if (bufsize != pagesize) { > + tmem_client_err("tmem: %s bufsize %d not page aligend(%d)\n", __func__, > + bufsize, pagesize); > + return -1; > + } > + return do_tmem_put(pool, oidp, index, 0, buf); > } > > static int tmemc_restore_flush_page(int cli_id, uint32_t pool_id, struct oid *oidp, > @@ -2648,8 +2647,7 @@ EXPORT long do_tmem_op(tmem_cli_op_t uops) > break; > case TMEM_PUT_PAGE: > tmem_ensure_avail_pages(); > - rc = do_tmem_put(pool, oidp, op.u.gen.index, op.u.gen.cmfn, 0, 0, > - PAGE_SIZE, tmem_cli_buf_null); > + rc = do_tmem_put(pool, oidp, op.u.gen.index, op.u.gen.cmfn, tmem_cli_buf_null); > if (rc == 1) succ_put = 1; > else non_succ_put = 1; > break; > diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c > index af67703..165c7cf 100644 > --- a/xen/common/tmem_xen.c > +++ b/xen/common/tmem_xen.c > @@ -100,25 +100,16 @@ static inline void cli_put_page(void *cli_va, struct page_info *cli_pfp, > #endif > > EXPORT int tmem_copy_from_client(struct page_info *pfp, > - xen_pfn_t cmfn, pagesize_t tmem_offset, > - pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_param_t clibuf) > + xen_pfn_t cmfn, tmem_cli_va_param_t clibuf) > { > unsigned long tmem_mfn, cli_mfn = 0; > char *tmem_va, *cli_va = NULL; > struct page_info *cli_pfp = NULL; > int rc = 1; > > - if ( tmem_offset > PAGE_SIZE || pfn_offset > PAGE_SIZE || len > PAGE_SIZE ) > - return -EINVAL; > ASSERT(pfp != NULL); > tmem_mfn = page_to_mfn(pfp); > tmem_va = map_domain_page(tmem_mfn); > - if ( tmem_offset == 0 && pfn_offset == 0 && len == 0 ) > - { > - memset(tmem_va, 0, PAGE_SIZE); > - unmap_domain_page(tmem_va); > - return 1; > - } > if ( guest_handle_is_null(clibuf) ) > { > cli_va = cli_get_page(cmfn, &cli_mfn, &cli_pfp, 0); > @@ -129,21 +120,13 @@ EXPORT int tmem_copy_from_client(struct page_info *pfp, > } > } > smp_mb(); > - if ( len == PAGE_SIZE && !tmem_offset && !pfn_offset && cli_va ) > - memcpy(tmem_va, cli_va, PAGE_SIZE); > - else if ( (tmem_offset+len <= PAGE_SIZE) && > - (pfn_offset+len <= PAGE_SIZE) ) > + if ( cli_va ) > { > - if ( cli_va ) > - memcpy(tmem_va + tmem_offset, cli_va + pfn_offset, len); > - else if ( copy_from_guest_offset(tmem_va + tmem_offset, clibuf, > - pfn_offset, len) ) > - rc = -EFAULT; > + memcpy(tmem_va, cli_va, PAGE_SIZE); > + cli_put_page(cli_va, cli_pfp, cli_mfn, 0); > } > - else if ( len ) > + else > rc = -EINVAL; > - if ( cli_va ) > - cli_put_page(cli_va, cli_pfp, cli_mfn, 0); > unmap_domain_page(tmem_va); > return rc; > } > diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h > index 4e4c227..25ce268 100644 > --- a/xen/include/xen/tmem_xen.h > +++ b/xen/include/xen/tmem_xen.h > @@ -385,8 +385,7 @@ int tmem_decompress_to_client(xen_pfn_t, void *, size_t, > int tmem_compress_from_client(xen_pfn_t, void **, size_t *, > tmem_cli_va_param_t); > > -int tmem_copy_from_client(struct page_info *, xen_pfn_t, pagesize_t tmem_offset, > - pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_param_t); > +int tmem_copy_from_client(struct page_info *, xen_pfn_t, tmem_cli_va_param_t); > > int tmem_copy_to_client(xen_pfn_t, struct page_info *, pagesize_t tmem_offset, > pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_param_t); > -- > 1.7.10.4 >
Konrad Rzeszutek Wilk
2013-Nov-22 17:55 UTC
Re: [PATCH 05/16] tmem: cleanup: rm unneeded parameters from get path
On Wed, Nov 20, 2013 at 04:46:14PM +0800, Bob Liu wrote:> tmem only takes a page as unit, so parameters tmem_offset, pfn_offset and len > are meanless, this patch remove them from do_tmem_get() related path.Could you just squash this and the previous one please?> > Signed-off-by: Bob Liu <bob.liu@oracle.com> > --- > xen/common/tmem.c | 19 ++++++++----------- > xen/common/tmem_xen.c | 18 ++++-------------- > xen/include/xen/tmem_xen.h | 3 +-- > 3 files changed, 13 insertions(+), 27 deletions(-) > > diff --git a/xen/common/tmem.c b/xen/common/tmem.c > index e2e69bf..de3559d 100644 > --- a/xen/common/tmem.c > +++ b/xen/common/tmem.c > @@ -330,8 +330,7 @@ static int pcd_copy_to_client(xen_pfn_t cmfn, struct tmem_page_descriptor *pgp) > else if ( tmem_tze_enabled() && pcd->size < PAGE_SIZE ) > ret = tmem_copy_tze_to_client(cmfn, pcd->tze, pcd->size); > else > - ret = tmem_copy_to_client(cmfn, pcd->pfp, 0, 0, PAGE_SIZE, > - tmem_cli_buf_null); > + ret = tmem_copy_to_client(cmfn, pcd->pfp, tmem_cli_buf_null); > tmem_read_unlock(&pcd_tree_rwlocks[firstbyte]); > return ret; > } > @@ -1647,8 +1646,7 @@ free: > } > > static int do_tmem_get(struct tmem_pool *pool, struct oid *oidp, uint32_t index, > - xen_pfn_t cmfn, pagesize_t tmem_offset, > - pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_param_t clibuf) > + xen_pfn_t cmfn, tmem_cli_va_param_t clibuf) > { > struct tmem_object_root *obj; > struct tmem_page_descriptor *pgp; > @@ -1680,12 +1678,10 @@ static int do_tmem_get(struct tmem_pool *pool, struct oid *oidp, uint32_t index, > rc = pcd_copy_to_client(cmfn, pgp); > else if ( pgp->size != 0 ) > { > - rc = tmem_decompress_to_client(cmfn, pgp->cdata, > - pgp->size, clibuf); > + rc = tmem_decompress_to_client(cmfn, pgp->cdata, pgp->size, clibuf); > } > else > - rc = tmem_copy_to_client(cmfn, pgp->pfp, tmem_offset, > - pfn_offset, len, clibuf); > + rc = tmem_copy_to_client(cmfn, pgp->pfp, clibuf); > if ( rc <= 0 ) > goto bad_copy; > > @@ -2377,7 +2373,7 @@ static int tmemc_save_get_next_page(int cli_id, uint32_t pool_id, > h.index = pgp->index; > tmem_copy_to_client_buf(buf, &h, 1); > tmem_client_buf_add(buf, sizeof(h)); > - ret = do_tmem_get(pool, &oid, pgp->index, 0, 0, 0, pagesize, buf); > + ret = do_tmem_get(pool, &oid, pgp->index, 0, buf); > > out: > tmem_spin_unlock(&pers_lists_spinlock); > @@ -2647,13 +2643,14 @@ EXPORT long do_tmem_op(tmem_cli_op_t uops) > break; > case TMEM_PUT_PAGE: > tmem_ensure_avail_pages(); > - rc = do_tmem_put(pool, oidp, op.u.gen.index, op.u.gen.cmfn, tmem_cli_buf_null); > + rc = do_tmem_put(pool, oidp, op.u.gen.index, op.u.gen.cmfn, > + tmem_cli_buf_null); > if (rc == 1) succ_put = 1; > else non_succ_put = 1; > break; > case TMEM_GET_PAGE: > rc = do_tmem_get(pool, oidp, op.u.gen.index, op.u.gen.cmfn, > - 0, 0, PAGE_SIZE, tmem_cli_buf_null); > + tmem_cli_buf_null); > if (rc == 1) succ_get = 1; > else non_succ_get = 1; > break; > diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c > index 165c7cf..efc2259 100644 > --- a/xen/common/tmem_xen.c > +++ b/xen/common/tmem_xen.c > @@ -164,7 +164,6 @@ EXPORT int tmem_compress_from_client(xen_pfn_t cmfn, > } > > EXPORT int tmem_copy_to_client(xen_pfn_t cmfn, struct page_info *pfp, > - pagesize_t tmem_offset, pagesize_t pfn_offset, pagesize_t len, > tmem_cli_va_param_t clibuf) > { > unsigned long tmem_mfn, cli_mfn = 0; > @@ -172,8 +171,6 @@ EXPORT int tmem_copy_to_client(xen_pfn_t cmfn, struct page_info *pfp, > struct page_info *cli_pfp = NULL; > int rc = 1; > > - if ( tmem_offset > PAGE_SIZE || pfn_offset > PAGE_SIZE || len > PAGE_SIZE ) > - return -EINVAL; > ASSERT(pfp != NULL); > if ( guest_handle_is_null(clibuf) ) > { > @@ -183,21 +180,14 @@ EXPORT int tmem_copy_to_client(xen_pfn_t cmfn, struct page_info *pfp, > } > tmem_mfn = page_to_mfn(pfp); > tmem_va = map_domain_page(tmem_mfn); > - if ( len == PAGE_SIZE && !tmem_offset && !pfn_offset && cli_va ) > - memcpy(cli_va, tmem_va, PAGE_SIZE); > - else if ( (tmem_offset+len <= PAGE_SIZE) && (pfn_offset+len <= PAGE_SIZE) ) > + if ( cli_va ) > { > - if ( cli_va ) > - memcpy(cli_va + pfn_offset, tmem_va + tmem_offset, len); > - else if ( copy_to_guest_offset(clibuf, pfn_offset, > - tmem_va + tmem_offset, len) ) > - rc = -EFAULT; > + memcpy(cli_va, tmem_va, PAGE_SIZE); > + cli_put_page(cli_va, cli_pfp, cli_mfn, 1); > } > - else if ( len ) > + else > rc = -EINVAL; > unmap_domain_page(tmem_va); > - if ( cli_va ) > - cli_put_page(cli_va, cli_pfp, cli_mfn, 1); > smp_mb(); > return rc; > } > diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h > index 25ce268..ae6acf3 100644 > --- a/xen/include/xen/tmem_xen.h > +++ b/xen/include/xen/tmem_xen.h > @@ -387,8 +387,7 @@ int tmem_compress_from_client(xen_pfn_t, void **, size_t *, > > int tmem_copy_from_client(struct page_info *, xen_pfn_t, tmem_cli_va_param_t); > > -int tmem_copy_to_client(xen_pfn_t, struct page_info *, pagesize_t tmem_offset, > - pagesize_t pfn_offset, pagesize_t len, tmem_cli_va_param_t); > +int tmem_copy_to_client(xen_pfn_t, struct page_info *, tmem_cli_va_param_t); > > extern int tmem_copy_tze_to_client(xen_pfn_t cmfn, void *tmem_va, pagesize_t len); > extern void *tmem_persistent_pool_page_get(unsigned long size); > -- > 1.7.10.4 >
Konrad Rzeszutek Wilk
2013-Nov-22 18:04 UTC
Re: [PATCH 06/16] tmem: cleanup: reorg do_tmem_put()
On Wed, Nov 20, 2013 at 04:46:15PM +0800, Bob Liu wrote:> Reorg code logic of do_tmem_put() to make it more readable and also drop useless > local parameters. > > Signed-off-by: Bob Liu <bob.liu@oracle.com> > --- > xen/common/tmem.c | 87 ++++++++++++++++++++++++++++++----------------------- > 1 file changed, 49 insertions(+), 38 deletions(-) > > diff --git a/xen/common/tmem.c b/xen/common/tmem.c > index de3559d..1c9dd5a 100644 > --- a/xen/common/tmem.c > +++ b/xen/common/tmem.c > @@ -1509,50 +1509,56 @@ static int do_tmem_put(struct tmem_pool *pool, > struct oid *oidp, uint32_t index, > xen_pfn_t cmfn, tmem_cli_va_param_t clibuf) > { > - struct tmem_object_root *obj = NULL, *objfound = NULL, *objnew = NULL; > - struct tmem_page_descriptor *pgp = NULL, *pgpdel = NULL; > - struct client *client = pool->client; > - int ret = client->frozen ? -EFROZEN : -ENOMEM; > + struct tmem_object_root *obj = NULL; > + struct tmem_page_descriptor *pgp = NULL; > + struct client *client; > + int ret, newobj = 0; > > ASSERT(pool != NULL); > + client = pool->client; > + ret = client->frozen ? -EFROZEN : -ENOMEM; > pool->puts++; > /* does page already exist (dup)? if so, handle specially */ > - if ( (obj = objfound = obj_find(pool,oidp)) != NULL ) > + if ( (obj = obj_find(pool,oidp)) != NULL ) > { > - ASSERT_SPINLOCK(&objfound->obj_spinlock); > - if ((pgp = pgp_lookup_in_obj(objfound, index)) != NULL) > + if ((pgp = pgp_lookup_in_obj(obj, index)) != NULL) > + { > return do_tmem_dup_put(pgp, cmfn, clibuf); > + } > + else > + { > + /* no puts allowed into a frozen pool (except dup puts) */ > + if ( client->frozen ) > + goto unlock_obj; > + } > } > - > - /* no puts allowed into a frozen pool (except dup puts) */ > - if ( client->frozen ) > - goto free; > - > - if ( (objfound == NULL) ) > + else > { > + /* no puts allowed into a frozen pool (except dup puts) */ > + if ( client->frozen ) > + return ret; > tmem_write_lock(&pool->pool_rwlock); > - if ( (obj = objnew = obj_new(pool,oidp)) == NULL ) > + if ( (obj = obj_new(pool,oidp)) == NULL ) > { > tmem_write_unlock(&pool->pool_rwlock); > return -ENOMEM; > } > - ASSERT_SPINLOCK(&objnew->obj_spinlock); > + newobj= 1;Space before ''=''> tmem_write_unlock(&pool->pool_rwlock); > } > > - ASSERT((obj != NULL)&&((objnew==obj)||(objfound==obj))&&(objnew!=objfound)); > + /* When arrive here, we have a spinlocked obj for use */ > ASSERT_SPINLOCK(&obj->obj_spinlock); > if ( (pgp = pgp_alloc(obj)) == NULL ) > - goto free; > + goto unlock_obj; > > ret = pgp_add_to_obj(obj, index, pgp); > if ( ret == -ENOMEM ) > /* warning, may result in partially built radix tree ("stump") */ > - goto free; > - ASSERT(ret != -EEXIST); > + goto free_pgp; > + > pgp->index = index; > pgp->size = 0; > -Stray. But that might not really matter as this is a cleanup patch after all.> if ( client->compress ) > { > ASSERT(pgp->pfp == NULL); > @@ -1562,7 +1568,7 @@ static int do_tmem_put(struct tmem_pool *pool, > if ( ret == -ENOMEM ) > { > client->compress_nomem++; > - goto delete_and_free; > + goto del_pgp_from_obj; > } > if ( ret == 0 ) > { > @@ -1577,15 +1583,16 @@ copy_uncompressed: > if ( ( pgp->pfp = tmem_page_alloc(pool) ) == NULL ) > { > ret = -ENOMEM; > - goto delete_and_free; > + goto del_pgp_from_obj; > } > ret = tmem_copy_from_client(pgp->pfp, cmfn, clibuf); > if ( ret < 0 ) > goto bad_copy; > + > if ( tmem_dedup_enabled() && !is_persistent(pool) ) > { > if ( pcd_associate(pgp,NULL,0) == -ENOMEM ) > - goto delete_and_free; > + goto del_pgp_from_obj; > } > > insert_page: > @@ -1601,18 +1608,23 @@ insert_page: > if (++client->eph_count > client->eph_count_max) > client->eph_count_max = client->eph_count; > tmem_spin_unlock(&eph_lists_spinlock); > - } else { /* is_persistent */ > + } > + else > + { /* is_persistent */So StyleGuide fixes too, in which case you might as well:> tmem_spin_lock(&pers_lists_spinlock); > list_add_tail(&pgp->us.pool_pers_pages, > &pool->persistent_page_list); > tmem_spin_unlock(&pers_lists_spinlock); > } > - ASSERT( ((objnew==obj)||(objfound==obj)) && (objnew!=objfound)); > + > if ( is_shared(pool) ) > obj->last_client = client->cli_id; > obj->no_evict = 0; > + > + /* free the obj spinlock */ > tmem_spin_unlock(&obj->obj_spinlock); > pool->good_puts++; > + > if ( is_persistent(pool) ) > client->succ_pers_puts++; > else > @@ -1622,25 +1634,24 @@ insert_page: > bad_copy: > failed_copies++; > > -delete_and_free: > +del_pgp_from_obj: > ASSERT((obj != NULL) && (pgp != NULL) && (pgp->index != -1)); > - pgpdel = pgp_delete_from_obj(obj, pgp->index); > - ASSERT(pgp == pgpdel); > + pgp_delete_from_obj(obj, pgp->index); > > -free: > - if ( pgp ) > - pgp_delete(pgp,0); > - if ( objfound ) > - { > - objfound->no_evict = 0; > - tmem_spin_unlock(&objfound->obj_spinlock); > - } > - if ( objnew ) > +free_pgp: > + pgp_delete(pgp,0);.. fix that.> +unlock_obj: > + if ( newobj ) > { > tmem_write_lock(&pool->pool_rwlock); > - obj_free(objnew,0); > + obj_free(obj,0);.. and that.> tmem_write_unlock(&pool->pool_rwlock); > } > + else > + { > + obj->no_evict = 0; > + tmem_spin_unlock(&obj->obj_spinlock); > + } > pool->no_mem_puts++; > return ret; > } > -- > 1.7.10.4 >
Konrad Rzeszutek Wilk
2013-Nov-22 18:05 UTC
Re: [PATCH 08/16] tmem: cleanup: rm useless EXPORT/FORWARD define
On Wed, Nov 20, 2013 at 04:46:17PM +0800, Bob Liu wrote:> It''s meanless to define EXPORT/FORWARD and nobody use them.^^^^^^^^ meaningless> > Signed-off-by: Bob Liu <bob.liu@oracle.com> > --- > xen/common/tmem.c | 16 ++++++---------- > xen/common/tmem_xen.c | 38 ++++++++++++++++++-------------------- > 2 files changed, 24 insertions(+), 30 deletions(-) > > diff --git a/xen/common/tmem.c b/xen/common/tmem.c > index f37e054..6afbdbc 100644 > --- a/xen/common/tmem.c > +++ b/xen/common/tmem.c > @@ -24,9 +24,6 @@ > #include <xen/list.h> > #include <xen/init.h> > > -#define EXPORT /* indicates code other modules are dependent upon */ > -#define FORWARD > - > #define TMEM_SPEC_VERSION 1 > > /* global statistics (none need to be locked) */ > @@ -212,8 +209,8 @@ static int tmem_initialized = 0; > > /************ CONCURRENCY ***********************************************/ > > -EXPORT DEFINE_SPINLOCK(tmem_spinlock); /* used iff tmem_lock_all */ > -EXPORT DEFINE_RWLOCK(tmem_rwlock); /* used iff !tmem_lock_all */ > +DEFINE_SPINLOCK(tmem_spinlock); /* used iff tmem_lock_all */ > +DEFINE_RWLOCK(tmem_rwlock); /* used iff !tmem_lock_all */ > static DEFINE_SPINLOCK(eph_lists_spinlock); /* protects global AND clients */ > static DEFINE_SPINLOCK(pers_lists_spinlock); > > @@ -2538,7 +2535,7 @@ static int do_tmem_control(struct tmem_op *op) > > /************ EXPORTed FUNCTIONS **************************************/ > > -EXPORT long do_tmem_op(tmem_cli_op_t uops) > +long do_tmem_op(tmem_cli_op_t uops) > { > struct tmem_op op; > struct client *client = tmem_client_from_current(); > @@ -2703,7 +2700,7 @@ out: > } > > /* this should be called when the host is destroying a client */ > -EXPORT void tmem_destroy(void *v) > +void tmem_destroy(void *v) > { > struct client *client = (struct client *)v; > > @@ -2732,7 +2729,7 @@ EXPORT void tmem_destroy(void *v) > } > > /* freezing all pools guarantees that no additional memory will be consumed */ > -EXPORT void tmem_freeze_all(unsigned char key) > +void tmem_freeze_all(unsigned char key) > { > static int freeze = 0; > > @@ -2751,8 +2748,7 @@ EXPORT void tmem_freeze_all(unsigned char key) > } > > #define MAX_EVICTS 10 /* should be variable or set via TMEMC_ ?? */ > - > -EXPORT void *tmem_relinquish_pages(unsigned int order, unsigned int memflags) > +void *tmem_relinquish_pages(unsigned int order, unsigned int memflags) > { > struct page_info *pfp; > unsigned long evicts_per_relinq = 0; > diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c > index efc2259..fbd1acc 100644 > --- a/xen/common/tmem_xen.c > +++ b/xen/common/tmem_xen.c > @@ -14,27 +14,25 @@ > #include <xen/cpu.h> > #include <xen/init.h> > > -#define EXPORT /* indicates code other modules are dependent upon */ > - > -EXPORT bool_t __read_mostly opt_tmem = 0; > +bool_t __read_mostly opt_tmem = 0; > boolean_param("tmem", opt_tmem); > > -EXPORT bool_t __read_mostly opt_tmem_compress = 0; > +bool_t __read_mostly opt_tmem_compress = 0; > boolean_param("tmem_compress", opt_tmem_compress); > > -EXPORT bool_t __read_mostly opt_tmem_dedup = 0; > +bool_t __read_mostly opt_tmem_dedup = 0; > boolean_param("tmem_dedup", opt_tmem_dedup); > > -EXPORT bool_t __read_mostly opt_tmem_tze = 0; > +bool_t __read_mostly opt_tmem_tze = 0; > boolean_param("tmem_tze", opt_tmem_tze); > > -EXPORT bool_t __read_mostly opt_tmem_shared_auth = 0; > +bool_t __read_mostly opt_tmem_shared_auth = 0; > boolean_param("tmem_shared_auth", opt_tmem_shared_auth); > > -EXPORT int __read_mostly opt_tmem_lock = 0; > +int __read_mostly opt_tmem_lock = 0; > integer_param("tmem_lock", opt_tmem_lock); > > -EXPORT atomic_t freeable_page_count = ATOMIC_INIT(0); > +atomic_t freeable_page_count = ATOMIC_INIT(0); > > /* these are a concurrency bottleneck, could be percpu and dynamically > * allocated iff opt_tmem_compress */ > @@ -99,7 +97,7 @@ static inline void cli_put_page(void *cli_va, struct page_info *cli_pfp, > } > #endif > > -EXPORT int tmem_copy_from_client(struct page_info *pfp, > +int tmem_copy_from_client(struct page_info *pfp, > xen_pfn_t cmfn, tmem_cli_va_param_t clibuf) > { > unsigned long tmem_mfn, cli_mfn = 0; > @@ -131,7 +129,7 @@ EXPORT int tmem_copy_from_client(struct page_info *pfp, > return rc; > } > > -EXPORT int tmem_compress_from_client(xen_pfn_t cmfn, > +int tmem_compress_from_client(xen_pfn_t cmfn, > void **out_va, size_t *out_len, tmem_cli_va_param_t clibuf) > { > int ret = 0; > @@ -163,7 +161,7 @@ EXPORT int tmem_compress_from_client(xen_pfn_t cmfn, > return 1; > } > > -EXPORT int tmem_copy_to_client(xen_pfn_t cmfn, struct page_info *pfp, > +int tmem_copy_to_client(xen_pfn_t cmfn, struct page_info *pfp, > tmem_cli_va_param_t clibuf) > { > unsigned long tmem_mfn, cli_mfn = 0; > @@ -192,7 +190,7 @@ EXPORT int tmem_copy_to_client(xen_pfn_t cmfn, struct page_info *pfp, > return rc; > } > > -EXPORT int tmem_decompress_to_client(xen_pfn_t cmfn, void *tmem_va, > +int tmem_decompress_to_client(xen_pfn_t cmfn, void *tmem_va, > size_t size, tmem_cli_va_param_t clibuf) > { > unsigned long cli_mfn = 0; > @@ -221,7 +219,7 @@ EXPORT int tmem_decompress_to_client(xen_pfn_t cmfn, void *tmem_va, > return 1; > } > > -EXPORT int tmem_copy_tze_to_client(xen_pfn_t cmfn, void *tmem_va, > +int tmem_copy_tze_to_client(xen_pfn_t cmfn, void *tmem_va, > pagesize_t len) > { > void *cli_va; > @@ -245,12 +243,12 @@ EXPORT int tmem_copy_tze_to_client(xen_pfn_t cmfn, void *tmem_va, > > /****************** XEN-SPECIFIC MEMORY ALLOCATION ********************/ > > -EXPORT struct xmem_pool *tmem_mempool = 0; > -EXPORT unsigned int tmem_mempool_maxalloc = 0; > +struct xmem_pool *tmem_mempool = 0; > +unsigned int tmem_mempool_maxalloc = 0; > > -EXPORT DEFINE_SPINLOCK(tmem_page_list_lock); > -EXPORT PAGE_LIST_HEAD(tmem_page_list); > -EXPORT unsigned long tmem_page_list_pages = 0; > +DEFINE_SPINLOCK(tmem_page_list_lock); > +PAGE_LIST_HEAD(tmem_page_list); > +unsigned long tmem_page_list_pages = 0; > > static noinline void *tmem_mempool_page_get(unsigned long size) > { > @@ -352,7 +350,7 @@ static struct notifier_block cpu_nfb = { > .notifier_call = cpu_callback > }; > > -EXPORT int __init tmem_init(void) > +int __init tmem_init(void) > { > unsigned int cpu; > > -- > 1.7.10.4 >
Konrad Rzeszutek Wilk
2013-Nov-22 18:07 UTC
Re: [PATCH 09/16] tmem: cleanup: drop tmemc_list() temporary
On Wed, Nov 20, 2013 at 04:46:18PM +0800, Bob Liu wrote:> tmemc_list() collects a lot of statistics which are not well defined, > unreadable and most of them are useless. > > This patch drop those mess code and leave TMEMC_LIST subops unimplemented > temporary.Could you mention the title of which patch puts it back? ...and> > Signed-off-by: Bob Liu <bob.liu@oracle.com> > --- > xen/common/tmem.c | 171 +---------------------------------------------------- > 1 file changed, 2 insertions(+), 169 deletions(-) > > diff --git a/xen/common/tmem.c b/xen/common/tmem.c > index 6afbdbc..48b67bf 100644 > --- a/xen/common/tmem.c > +++ b/xen/common/tmem.c > @@ -29,12 +29,10 @@ > /* global statistics (none need to be locked) */ > static unsigned long total_tmem_ops = 0; > static unsigned long errored_tmem_ops = 0; > -static unsigned long total_flush_pool = 0; > static unsigned long alloc_failed = 0, alloc_page_failed = 0; > static unsigned long evicted_pgs = 0, evict_attempts = 0; > static unsigned long relinq_pgs = 0, relinq_attempts = 0; > static unsigned long max_evicts_per_relinq = 0; > -static unsigned long low_on_memory = 0; > static unsigned long deduped_puts = 0; > static unsigned long tot_good_eph_puts = 0; > static int global_obj_count_max = 0; > @@ -1980,176 +1978,11 @@ static int tmemc_flush_mem(domid_t cli_id, uint32_t kb) > return flushed_kb; > } > > -/* > - * These tmemc_list* routines output lots of stats in a format that is > - * intended to be program-parseable, not human-readable. Further, by > - * tying each group of stats to a line format indicator (e.g. G= for > - * global stats) and each individual stat to a two-letter specifier > - * (e.g. Ec:nnnnn in the G= line says there are nnnnn pages in the > - * global ephemeral pool), it should allow the stats reported to be > - * forward and backwards compatible as tmem evolves. > - */ > -#define BSIZE 1024 > - > -static int tmemc_list_client(struct client *c, tmem_cli_va_param_t buf, > - int off, uint32_t len, bool_t use_long) > -{ > - char info[BSIZE]; > - int i, n = 0, sum = 0; > - struct tmem_pool *p; > - bool_t s; > - > - n = scnprintf(info,BSIZE,"C=CI:%d,ww:%d,ca:%d,co:%d,fr:%d," > - "Tc:%"PRIu64",Ge:%ld,Pp:%ld,Gp:%ld%c", > - c->cli_id, c->weight, c->cap, c->compress, c->frozen, > - c->total_cycles, c->succ_eph_gets, c->succ_pers_puts, c->succ_pers_gets, > - use_long ? '','' : ''\n''); > - if (use_long) > - n += scnprintf(info+n,BSIZE-n, > - "Ec:%ld,Em:%ld,cp:%ld,cb:%"PRId64",cn:%ld,cm:%ld\n", > - c->eph_count, c->eph_count_max, > - c->compressed_pages, c->compressed_sum_size, > - c->compress_poor, c->compress_nomem); > - tmem_copy_to_client_buf_offset(buf,off+sum,info,n+1); > - sum += n; > - for ( i = 0; i < MAX_POOLS_PER_DOMAIN; i++ ) > - { > - if ( (p = c->pools[i]) == NULL ) > - continue; > - s = is_shared(p); > - n = scnprintf(info,BSIZE,"P=CI:%d,PI:%d," > - "PT:%c%c,U0:%"PRIx64",U1:%"PRIx64"%c", > - c->cli_id, p->pool_id, > - is_persistent(p) ? ''P'' : ''E'', s ? ''S'' : ''P'', > - (uint64_t)(s ? p->uuid[0] : 0), > - (uint64_t)(s ? p->uuid[1] : 0LL), > - use_long ? '','' : ''\n''); > - if (use_long) > - n += scnprintf(info+n,BSIZE-n, > - "Pc:%d,Pm:%d,Oc:%ld,Om:%ld,Nc:%lu,Nm:%lu," > - "ps:%lu,pt:%lu,pd:%lu,pr:%lu,px:%lu,gs:%lu,gt:%lu," > - "fs:%lu,ft:%lu,os:%lu,ot:%lu\n", > - _atomic_read(p->pgp_count), p->pgp_count_max, > - p->obj_count, p->obj_count_max, > - p->objnode_count, p->objnode_count_max, > - p->good_puts, p->puts,p->dup_puts_flushed, p->dup_puts_replaced, > - p->no_mem_puts, > - p->found_gets, p->gets, > - p->flushs_found, p->flushs, p->flush_objs_found, p->flush_objs); > - if ( sum + n >= len ) > - return sum; > - tmem_copy_to_client_buf_offset(buf,off+sum,info,n+1); > - sum += n; > - } > - return sum; > -} > - > -static int tmemc_list_shared(tmem_cli_va_param_t buf, int off, uint32_t len, > - bool_t use_long) > -{ > - char info[BSIZE]; > - int i, n = 0, sum = 0; > - struct tmem_pool *p; > - struct share_list *sl; > - > - for ( i = 0; i < MAX_GLOBAL_SHARED_POOLS; i++ ) > - { > - if ( (p = global_shared_pools[i]) == NULL ) > - continue; > - n = scnprintf(info+n,BSIZE-n,"S=SI:%d,PT:%c%c,U0:%"PRIx64",U1:%"PRIx64, > - i, is_persistent(p) ? ''P'' : ''E'', > - is_shared(p) ? ''S'' : ''P'', > - p->uuid[0], p->uuid[1]); > - list_for_each_entry(sl,&p->share_list, share_list) > - n += scnprintf(info+n,BSIZE-n,",SC:%d",sl->client->cli_id); > - n += scnprintf(info+n,BSIZE-n,"%c", use_long ? '','' : ''\n''); > - if (use_long) > - n += scnprintf(info+n,BSIZE-n, > - "Pc:%d,Pm:%d,Oc:%ld,Om:%ld,Nc:%lu,Nm:%lu," > - "ps:%lu,pt:%lu,pd:%lu,pr:%lu,px:%lu,gs:%lu,gt:%lu," > - "fs:%lu,ft:%lu,os:%lu,ot:%lu\n", > - _atomic_read(p->pgp_count), p->pgp_count_max, > - p->obj_count, p->obj_count_max, > - p->objnode_count, p->objnode_count_max, > - p->good_puts, p->puts,p->dup_puts_flushed, p->dup_puts_replaced, > - p->no_mem_puts, > - p->found_gets, p->gets, > - p->flushs_found, p->flushs, p->flush_objs_found, p->flush_objs); > - if ( sum + n >= len ) > - return sum; > - tmem_copy_to_client_buf_offset(buf,off+sum,info,n+1); > - sum += n; > - } > - return sum; > -} > - > -static int tmemc_list_global_perf(tmem_cli_va_param_t buf, int off, > - uint32_t len, bool_t use_long) > -{ > - char info[BSIZE]; > - int n = 0, sum = 0; > - > - n = scnprintf(info+n,BSIZE-n,"T="); > - n--; /* overwrite trailing comma */ > - n += scnprintf(info+n,BSIZE-n,"\n"); > - if ( sum + n >= len ) > - return sum; > - tmem_copy_to_client_buf_offset(buf,off+sum,info,n+1); > - sum += n; > - return sum; > -} > - > -static int tmemc_list_global(tmem_cli_va_param_t buf, int off, uint32_t len, > - bool_t use_long) > -{ > - char info[BSIZE]; > - int n = 0, sum = off; > - > - n += scnprintf(info,BSIZE,"G=" > - "Tt:%lu,Te:%lu,Cf:%lu,Af:%lu,Pf:%lu,Ta:%lu," > - "Lm:%lu,Et:%lu,Ea:%lu,Rt:%lu,Ra:%lu,Rx:%lu,Fp:%lu%c", > - total_tmem_ops, errored_tmem_ops, failed_copies, > - alloc_failed, alloc_page_failed, tmem_page_list_pages, > - low_on_memory, evicted_pgs, > - evict_attempts, relinq_pgs, relinq_attempts, max_evicts_per_relinq, > - total_flush_pool, use_long ? '','' : ''\n''); > - if (use_long) > - n += scnprintf(info+n,BSIZE-n, > - "Ec:%ld,Em:%ld,Oc:%d,Om:%d,Nc:%d,Nm:%d,Pc:%d,Pm:%d," > - "Fc:%d,Fm:%d,Sc:%d,Sm:%d,Ep:%lu,Gd:%lu,Zt:%lu,Gz:%lu\n", > - global_eph_count, global_eph_count_max, > - _atomic_read(global_obj_count), global_obj_count_max, > - _atomic_read(global_rtree_node_count), global_rtree_node_count_max, > - _atomic_read(global_pgp_count), global_pgp_count_max, > - _atomic_read(global_page_count), global_page_count_max, > - _atomic_read(global_pcd_count), global_pcd_count_max, > - tot_good_eph_puts,deduped_puts,pcd_tot_tze_size,pcd_tot_csize); > - if ( sum + n >= len ) > - return sum; > - tmem_copy_to_client_buf_offset(buf,off+sum,info,n+1); > - sum += n; > - return sum; > -} > - > static int tmemc_list(domid_t cli_id, tmem_cli_va_param_t buf, uint32_t len, > bool_t use_long) > { > - struct client *client; > - int off = 0; > - > - if ( cli_id == TMEM_CLI_ID_NULL ) { > - off = tmemc_list_global(buf,0,len,use_long); > - off += tmemc_list_shared(buf,off,len-off,use_long); > - list_for_each_entry(client,&global_client_list,client_list) > - off += tmemc_list_client(client, buf, off, len-off, use_long); > - off += tmemc_list_global_perf(buf,off,len-off,use_long); > - } > - else if ( (client = tmem_client_from_cli_id(cli_id)) == NULL) > - return -1; > - else > - off = tmemc_list_client(client, buf, 0, len, use_long); > - > - return 0; > + tmem_client_info("tmemc_list is not implemented yet!\n"); > + return -1;-ENOSYS.> } > > static int tmemc_set_var_one(struct client *client, uint32_t subop, uint32_t arg1) > -- > 1.7.10.4 >
Konrad Rzeszutek Wilk
2013-Nov-22 18:08 UTC
Re: [PATCH 10/16] tmem: cleanup: drop runtime statistics
On Wed, Nov 20, 2013 at 04:46:19PM +0800, Bob Liu wrote:> They are so many runtime statistics which are unreadable and hard to maintain. > This patch drop them temporary for code cleanup until we have clear define what > runtime statistics are really needed.We should really also have a TODO so that it is not forgotton.> > Signed-off-by: Bob Liu <bob.liu@oracle.com> > --- > xen/common/tmem.c | 187 ++++------------------------------------------------- > 1 file changed, 13 insertions(+), 174 deletions(-) > > diff --git a/xen/common/tmem.c b/xen/common/tmem.c > index 48b67bf..c1b3e21 100644 > --- a/xen/common/tmem.c > +++ b/xen/common/tmem.c > @@ -26,27 +26,7 @@ > > #define TMEM_SPEC_VERSION 1 > > -/* global statistics (none need to be locked) */ > -static unsigned long total_tmem_ops = 0; > -static unsigned long errored_tmem_ops = 0; > -static unsigned long alloc_failed = 0, alloc_page_failed = 0; > -static unsigned long evicted_pgs = 0, evict_attempts = 0; > -static unsigned long relinq_pgs = 0, relinq_attempts = 0; > -static unsigned long max_evicts_per_relinq = 0; > -static unsigned long deduped_puts = 0; > -static unsigned long tot_good_eph_puts = 0; > -static int global_obj_count_max = 0; > -static int global_pgp_count_max = 0; > -static int global_pcd_count_max = 0; > -static int global_page_count_max = 0; > -static int global_rtree_node_count_max = 0; > -static long global_eph_count_max = 0; > -static unsigned long failed_copies; > -static unsigned long pcd_tot_tze_size = 0; > -static unsigned long pcd_tot_csize = 0; > - > /************ CORE DATA STRUCTURES ************************************/ > - > #define MAX_POOLS_PER_DOMAIN 16 > #define MAX_GLOBAL_SHARED_POOLS 16 > > @@ -59,7 +39,7 @@ struct client { > struct domain *domain; > struct xmem_pool *persistent_pool; > struct list_head ephemeral_page_list; > - long eph_count, eph_count_max; > + long eph_count; > domid_t cli_id; > uint32_t weight; > uint32_t cap; > @@ -71,12 +51,6 @@ struct client { > bool_t was_frozen; > struct list_head persistent_invalidated_list; > struct tmem_page_descriptor *cur_pgp; > - /* statistics collection */ > - unsigned long compress_poor, compress_nomem; > - unsigned long compressed_pages; > - uint64_t compressed_sum_size; > - uint64_t total_cycles; > - unsigned long succ_pers_puts, succ_eph_gets, succ_pers_gets; > /* shared pool authentication */ > uint64_t shared_auth_uuid[MAX_GLOBAL_SHARED_POOLS][2]; > }; > @@ -107,17 +81,6 @@ struct tmem_pool { > struct tmem_page_descriptor *cur_pgp; > /* statistics collection */ > atomic_t pgp_count; > - int pgp_count_max; > - long obj_count; /* atomicity depends on pool_rwlock held for write */ > - long obj_count_max; > - unsigned long objnode_count, objnode_count_max; > - uint64_t sum_life_cycles; > - uint64_t sum_evicted_cycles; > - unsigned long puts, good_puts, no_mem_puts; > - unsigned long dup_puts_flushed, dup_puts_replaced; > - unsigned long gets, found_gets; > - unsigned long flushs, flushs_found; > - unsigned long flush_objs, flush_objs_found; > }; > > #define is_persistent(_p) (_p->persistent) > @@ -130,7 +93,6 @@ struct oid { > struct tmem_object_root { > struct oid oid; > struct rb_node rb_tree_node; /* protected by pool->pool_rwlock */ > - unsigned long objnode_count; /* atomicity depends on obj_spinlock */ > long pgp_count; /* atomicity depends on obj_spinlock */ > struct radix_tree_root tree_root; /* tree of pages within object */ > struct tmem_pool *pool; > @@ -196,7 +158,6 @@ struct rb_root pcd_tree_roots[256]; /* choose based on first byte of page */ > rwlock_t pcd_tree_rwlocks[256]; /* poor man''s concurrency for now */ > > static LIST_HEAD(global_ephemeral_page_list); /* all pages in ephemeral pools */ > - > static LIST_HEAD(global_client_list); > static LIST_HEAD(global_pool_list); > > @@ -206,7 +167,6 @@ static atomic_t client_weight_total = ATOMIC_INIT(0); > static int tmem_initialized = 0; > > /************ CONCURRENCY ***********************************************/ > - > DEFINE_SPINLOCK(tmem_spinlock); /* used iff tmem_lock_all */ > DEFINE_RWLOCK(tmem_rwlock); /* used iff !tmem_lock_all */ > static DEFINE_SPINLOCK(eph_lists_spinlock); /* protects global AND clients */ > @@ -226,23 +186,6 @@ static DEFINE_SPINLOCK(pers_lists_spinlock); > > /* global counters (should use long_atomic_t access) */ > static long global_eph_count = 0; /* atomicity depends on eph_lists_spinlock */ > -static atomic_t global_obj_count = ATOMIC_INIT(0); > -static atomic_t global_pgp_count = ATOMIC_INIT(0); > -static atomic_t global_pcd_count = ATOMIC_INIT(0); > -static atomic_t global_page_count = ATOMIC_INIT(0); > -static atomic_t global_rtree_node_count = ATOMIC_INIT(0); > - > -#define atomic_inc_and_max(_c) do { \ > - atomic_inc(&_c); \ > - if ( _atomic_read(_c) > _c##_max ) \ > - _c##_max = _atomic_read(_c); \ > -} while (0) > - > -#define atomic_dec_and_assert(_c) do { \ > - atomic_dec(&_c); \ > - ASSERT(_atomic_read(_c) >= 0); \ > -} while (0) > - > > /************ MEMORY ALLOCATION INTERFACE *****************************/ > static void *tmem_malloc(size_t size, struct tmem_pool *pool) > @@ -259,8 +202,6 @@ static void *tmem_malloc(size_t size, struct tmem_pool *pool) > ASSERT( tmem_mempool != NULL ); > v = xmem_pool_alloc(size, tmem_mempool); > } > - if ( v == NULL ) > - alloc_failed++; > return v; > } > > @@ -286,10 +227,6 @@ static struct page_info *tmem_page_alloc(struct tmem_pool *pool) > pfp = tmem_alloc_page_thispool(pool->client->domain); > else > pfp = tmem_alloc_page(pool,0); > - if ( pfp == NULL ) > - alloc_page_failed++; > - else > - atomic_inc_and_max(global_page_count); > return pfp; > } > > @@ -300,7 +237,6 @@ static void tmem_page_free(struct tmem_pool *pool, struct page_info *pfp) > tmem_free_page(pfp); > else > tmem_free_page_thispool(pfp); > - atomic_dec_and_assert(global_page_count); > } > > /************ PAGE CONTENT DESCRIPTOR MANIPULATION ROUTINES ***********/ > @@ -339,7 +275,6 @@ static void pcd_disassociate(struct tmem_page_descriptor *pgp, struct tmem_pool > pagesize_t pcd_size = pcd->size; > pagesize_t pgp_size = pgp->size; > char *pcd_cdata = pgp->pcd->cdata; > - pagesize_t pcd_csize = pgp->pcd->size; > > ASSERT(tmem_dedup_enabled()); > ASSERT(firstbyte != NOT_SHAREABLE); > @@ -368,25 +303,18 @@ static void pcd_disassociate(struct tmem_page_descriptor *pgp, struct tmem_pool > RB_CLEAR_NODE(&pcd->pcd_rb_tree_node); > /* now free up the pcd memory */ > tmem_free(pcd, NULL); > - atomic_dec_and_assert(global_pcd_count); > if ( pgp_size != 0 && pcd_size < PAGE_SIZE ) > { > /* compressed data */ > tmem_free(pcd_cdata, pool); > - pcd_tot_csize -= pcd_csize; > } > else if ( pcd_size != PAGE_SIZE ) > { > /* trailing zero data */ > - pcd_tot_tze_size -= pcd_size; > if ( pcd_size ) > tmem_free(pcd_tze, pool); > } else { > /* real physical page */ > - if ( tmem_tze_enabled() ) > - pcd_tot_tze_size -= PAGE_SIZE; > - if ( tmem_compression_enabled() ) > - pcd_tot_csize -= PAGE_SIZE; > tmem_page_free(pool,pfp); > } > tmem_write_unlock(&pcd_tree_rwlocks[firstbyte]); > @@ -467,7 +395,6 @@ static int pcd_associate(struct tmem_page_descriptor *pgp, char *cdata, pagesize > /* but if compressed, data is assumed static so don''t free! */ > if ( cdata == NULL ) > tmem_page_free(pgp->us.obj->pool,pgp->pfp); > - deduped_puts++; > goto match; > } > } > @@ -485,7 +412,6 @@ static int pcd_associate(struct tmem_page_descriptor *pgp, char *cdata, pagesize > goto unlock; > } > } > - atomic_inc_and_max(global_pcd_count); > RB_CLEAR_NODE(&pcd->pcd_rb_tree_node); /* is this necessary */ > INIT_LIST_HEAD(&pcd->pgp_list); /* is this necessary */ > pcd->pgp_ref_count = 0; > @@ -493,7 +419,6 @@ static int pcd_associate(struct tmem_page_descriptor *pgp, char *cdata, pagesize > { > memcpy(pcd->cdata,cdata,csize); > pcd->size = csize; > - pcd_tot_csize += csize; > } else if ( pfp_size == 0 ) { > ASSERT(tmem_tze_enabled()); > pcd->size = 0; > @@ -502,15 +427,10 @@ static int pcd_associate(struct tmem_page_descriptor *pgp, char *cdata, pagesize > ((pcd->tze = tmem_malloc(pfp_size,pgp->us.obj->pool)) != NULL) ) { > tmem_tze_copy_from_pfp(pcd->tze,pgp->pfp,pfp_size); > pcd->size = pfp_size; > - pcd_tot_tze_size += pfp_size; > tmem_page_free(pgp->us.obj->pool,pgp->pfp); > } else { > pcd->pfp = pgp->pfp; > pcd->size = PAGE_SIZE; > - if ( tmem_tze_enabled() ) > - pcd_tot_tze_size += PAGE_SIZE; > - if ( tmem_compression_enabled() ) > - pcd_tot_csize += PAGE_SIZE; > } > rb_link_node(&pcd->pcd_rb_tree_node, parent, new); > rb_insert_color(&pcd->pcd_rb_tree_node, root); > @@ -553,8 +473,7 @@ static struct tmem_page_descriptor *pgp_alloc(struct tmem_object_root *obj) > pgp->size = -1; > pgp->index = -1; > pgp->timestamp = get_cycles(); > - atomic_inc_and_max(global_pgp_count); > - atomic_inc_and_max(pool->pgp_count); > + atomic_inc(&pool->pgp_count); > return pgp; > } > > @@ -578,11 +497,6 @@ static void pgp_free_data(struct tmem_page_descriptor *pgp, struct tmem_pool *po > tmem_free(pgp->cdata, pool); > else > tmem_page_free(pgp->us.obj->pool,pgp->pfp); > - if ( pool != NULL && pgp_size ) > - { > - pool->client->compressed_pages--; > - pool->client->compressed_sum_size -= pgp_size; > - } > pgp->pfp = NULL; > pgp->size = -1; > } > @@ -603,8 +517,7 @@ static void pgp_free(struct tmem_page_descriptor *pgp, int from_delete) > ASSERT(list_empty(&pgp->us.client_eph_pages)); > } > pgp_free_data(pgp, pool); > - atomic_dec_and_assert(global_pgp_count); > - atomic_dec_and_assert(pool->pgp_count); > + atomic_dec(&pool->pgp_count); > pgp->size = -1; > if ( is_persistent(pool) && pool->client->live_migrating ) > { > @@ -676,7 +589,6 @@ static void pgp_delete(struct tmem_page_descriptor *pgp, bool_t no_eph_lock) > ASSERT(pgp->us.obj != NULL); > ASSERT(pgp->us.obj->pool != NULL); > life = get_cycles() - pgp->timestamp; > - pgp->us.obj->pool->sum_life_cycles += life; > pgp_delist(pgp, no_eph_lock); > pgp_free(pgp,1); > } > @@ -734,10 +646,6 @@ static struct radix_tree_node *rtn_alloc(void *arg) > return NULL; > objnode->obj = obj; > memset(&objnode->rtn, 0, sizeof(struct radix_tree_node)); > - if (++obj->pool->objnode_count > obj->pool->objnode_count_max) > - obj->pool->objnode_count_max = obj->pool->objnode_count; > - atomic_inc_and_max(global_rtree_node_count); > - obj->objnode_count++; > return &objnode->rtn; > } > > @@ -753,11 +661,8 @@ static void rtn_free(struct radix_tree_node *rtn, void *arg) > ASSERT_SPINLOCK(&objnode->obj->obj_spinlock); > pool = objnode->obj->pool; > ASSERT(pool != NULL); > - pool->objnode_count--; > - objnode->obj->objnode_count--; > objnode->obj = NULL; > tmem_free(objnode, pool); > - atomic_dec_and_assert(global_rtree_node_count); > } > > /************ POOL OBJECT COLLECTION MANIPULATION ROUTINES *******************/ > @@ -850,15 +755,11 @@ static void obj_free(struct tmem_object_root *obj, int no_rebalance) > ASSERT_WRITELOCK(&pool->pool_rwlock); > if ( obj->tree_root.rnode != NULL ) /* may be a "stump" with no leaves */ > radix_tree_destroy(&obj->tree_root, pgp_destroy); > - ASSERT((long)obj->objnode_count == 0); > ASSERT(obj->tree_root.rnode == NULL); > - pool->obj_count--; > - ASSERT(pool->obj_count >= 0); > obj->pool = NULL; > old_oid = obj->oid; > oid_set_invalid(&obj->oid); > obj->last_client = TMEM_CLI_ID_NULL; > - atomic_dec_and_assert(global_obj_count); > /* use no_rebalance only if all objects are being destroyed anyway */ > if ( !no_rebalance ) > rb_erase(&obj->rb_tree_node,&pool->obj_rb_root[oid_hash(&old_oid)]); > @@ -905,16 +806,11 @@ static struct tmem_object_root * obj_new(struct tmem_pool *pool, struct oid *oid > ASSERT_WRITELOCK(&pool->pool_rwlock); > if ( (obj = tmem_malloc(sizeof(struct tmem_object_root), pool)) == NULL ) > return NULL; > - pool->obj_count++; > - if (pool->obj_count > pool->obj_count_max) > - pool->obj_count_max = pool->obj_count; > - atomic_inc_and_max(global_obj_count); > radix_tree_init(&obj->tree_root); > radix_tree_set_alloc_callbacks(&obj->tree_root, rtn_alloc, rtn_free, obj); > spin_lock_init(&obj->obj_spinlock); > obj->pool = pool; > obj->oid = *oidp; > - obj->objnode_count = 0; > obj->pgp_count = 0; > obj->last_client = TMEM_CLI_ID_NULL; > tmem_spin_lock(&obj->obj_spinlock); > @@ -978,16 +874,9 @@ static struct tmem_pool * pool_alloc(void) > INIT_LIST_HEAD(&pool->persistent_page_list); > pool->cur_pgp = NULL; > rwlock_init(&pool->pool_rwlock); > - pool->pgp_count_max = pool->obj_count_max = 0; > - pool->objnode_count = pool->objnode_count_max = 0; > atomic_set(&pool->pgp_count,0); > - pool->obj_count = 0; pool->shared_count = 0; > + pool->shared_count = 0; > pool->pageshift = PAGE_SHIFT - 12; > - pool->good_puts = pool->puts = pool->dup_puts_flushed = 0; > - pool->dup_puts_replaced = pool->no_mem_puts = 0; > - pool->found_gets = pool->gets = 0; > - pool->flushs_found = pool->flushs = 0; > - pool->flush_objs_found = pool->flush_objs = 0; > pool->is_dying = 0; > return pool; > } > @@ -1163,9 +1052,7 @@ static struct client *client_create(domid_t cli_id) > INIT_LIST_HEAD(&client->ephemeral_page_list); > INIT_LIST_HEAD(&client->persistent_invalidated_list); > client->cur_pgp = NULL; > - client->eph_count = client->eph_count_max = 0; > - client->total_cycles = 0; client->succ_pers_puts = 0; > - client->succ_eph_gets = 0; client->succ_pers_gets = 0; > + client->eph_count = 0; > tmem_client_info("ok\n"); > return client; > > @@ -1273,7 +1160,6 @@ static int tmem_evict(void) > int ret = 0; > bool_t hold_pool_rwlock = 0; > > - evict_attempts++; > tmem_spin_lock(&eph_lists_spinlock); > if ( (client != NULL) && client_over_quota(client) && > !list_empty(&client->ephemeral_page_list) ) > @@ -1318,7 +1204,6 @@ found: > tmem_spin_unlock(&obj->obj_spinlock); > if ( hold_pool_rwlock ) > tmem_write_unlock(&pool->pool_rwlock); > - evicted_pgs++; > ret = 1; > > out: > @@ -1404,8 +1289,6 @@ static int do_tmem_put_compress(struct tmem_page_descriptor *pgp, xen_pfn_t cmfn > pgp->cdata = p; > } > pgp->size = size; > - pgp->us.obj->pool->client->compressed_pages++; > - pgp->us.obj->pool->client->compressed_sum_size += size; > ret = 1; > > out: > @@ -1443,7 +1326,7 @@ static int do_tmem_dup_put(struct tmem_page_descriptor *pgp, xen_pfn_t cmfn, > else if ( ret == -ENOMEM ) > goto failed_dup; > else if ( ret == -EFAULT ) > - goto bad_copy; > + goto cleanup; > } > > copy_uncompressed: > @@ -1454,7 +1337,7 @@ copy_uncompressed: > pgp->size = 0; > ret = tmem_copy_from_client(pgp->pfp, cmfn, tmem_cli_buf_null); > if ( ret < 0 ) > - goto bad_copy; > + goto cleanup; > if ( tmem_dedup_enabled() && !is_persistent(pool) ) > { > if ( pcd_associate(pgp,NULL,0) == -ENOMEM ) > @@ -1467,16 +1350,8 @@ done: > obj->last_client = client->cli_id; > obj->no_evict = 0; > tmem_spin_unlock(&obj->obj_spinlock); > - pool->dup_puts_replaced++; > - pool->good_puts++; > - if ( is_persistent(pool) ) > - client->succ_pers_puts++; > return 1; > > -bad_copy: > - failed_copies++; > - goto cleanup; > - > failed_dup: > /* couldn''t change out the data, flush the old data and return > * -ENOSPC instead of -ENOMEM to differentiate failed _dup_ put */ > @@ -1494,7 +1369,6 @@ cleanup: > obj->no_evict = 0; > tmem_spin_unlock(&obj->obj_spinlock); > } > - pool->dup_puts_flushed++; > return ret; > } > > @@ -1510,7 +1384,6 @@ static int do_tmem_put(struct tmem_pool *pool, > ASSERT(pool != NULL); > client = pool->client; > ret = client->frozen ? -EFROZEN : -ENOMEM; > - pool->puts++; > /* does page already exist (dup)? if so, handle specially */ > if ( (obj = obj_find(pool,oidp)) != NULL ) > { > @@ -1560,16 +1433,14 @@ static int do_tmem_put(struct tmem_pool *pool, > goto insert_page; > if ( ret == -ENOMEM ) > { > - client->compress_nomem++; > goto del_pgp_from_obj; > } > if ( ret == 0 ) > { > - client->compress_poor++; > goto copy_uncompressed; > } > if ( ret == -EFAULT ) > - goto bad_copy; > + goto del_pgp_from_obj; > } > > copy_uncompressed: > @@ -1580,7 +1451,7 @@ copy_uncompressed: > } > ret = tmem_copy_from_client(pgp->pfp, cmfn, clibuf); > if ( ret < 0 ) > - goto bad_copy; > + goto del_pgp_from_obj; > > if ( tmem_dedup_enabled() && !is_persistent(pool) ) > { > @@ -1594,12 +1465,10 @@ insert_page: > tmem_spin_lock(&eph_lists_spinlock); > list_add_tail(&pgp->global_eph_pages, > &global_ephemeral_page_list); > - if (++global_eph_count > global_eph_count_max) > - global_eph_count_max = global_eph_count; > + ++global_eph_count; > list_add_tail(&pgp->us.client_eph_pages, > &client->ephemeral_page_list); > - if (++client->eph_count > client->eph_count_max) > - client->eph_count_max = client->eph_count; > + ++client->eph_count; > tmem_spin_unlock(&eph_lists_spinlock); > } > else > @@ -1616,17 +1485,8 @@ insert_page: > > /* free the obj spinlock */ > tmem_spin_unlock(&obj->obj_spinlock); > - pool->good_puts++; > - > - if ( is_persistent(pool) ) > - client->succ_pers_puts++; > - else > - tot_good_eph_puts++; > return 1; > > -bad_copy: > - failed_copies++; > - > del_pgp_from_obj: > ASSERT((obj != NULL) && (pgp != NULL) && (pgp->index != -1)); > pgp_delete_from_obj(obj, pgp->index); > @@ -1645,7 +1505,6 @@ unlock_obj: > obj->no_evict = 0; > tmem_spin_unlock(&obj->obj_spinlock); > } > - pool->no_mem_puts++; > return ret; > } > > @@ -1660,7 +1519,6 @@ static int do_tmem_get(struct tmem_pool *pool, struct oid *oidp, uint32_t index, > if ( !_atomic_read(pool->pgp_count) ) > return -EEMPTY; > > - pool->gets++; > obj = obj_find(pool,oidp); > if ( obj == NULL ) > return 0; > @@ -1687,7 +1545,7 @@ static int do_tmem_get(struct tmem_pool *pool, struct oid *oidp, uint32_t index, > else > rc = tmem_copy_to_client(cmfn, pgp->pfp, clibuf); > if ( rc <= 0 ) > - goto bad_copy; > + goto out; > > if ( !is_persistent(pool) ) > { > @@ -1716,17 +1574,11 @@ static int do_tmem_get(struct tmem_pool *pool, struct oid *oidp, uint32_t index, > obj->no_evict = 0; > tmem_spin_unlock(&obj->obj_spinlock); > } > - pool->found_gets++; > - if ( is_persistent(pool) ) > - client->succ_pers_gets++; > - else > - client->succ_eph_gets++; > return 1; > > -bad_copy: > +out: > obj->no_evict = 0; > tmem_spin_unlock(&obj->obj_spinlock); > - failed_copies++; > return rc; > } > > @@ -1735,7 +1587,6 @@ static int do_tmem_flush_page(struct tmem_pool *pool, struct oid *oidp, uint32_t > struct tmem_object_root *obj; > struct tmem_page_descriptor *pgp; > > - pool->flushs++; > obj = obj_find(pool,oidp); > if ( obj == NULL ) > goto out; > @@ -1756,7 +1607,6 @@ static int do_tmem_flush_page(struct tmem_pool *pool, struct oid *oidp, uint32_t > obj->no_evict = 0; > tmem_spin_unlock(&obj->obj_spinlock); > } > - pool->flushs_found++; > > out: > if ( pool->client->frozen ) > @@ -1769,13 +1619,11 @@ static int do_tmem_flush_object(struct tmem_pool *pool, struct oid *oidp) > { > struct tmem_object_root *obj; > > - pool->flush_objs++; > obj = obj_find(pool,oidp); > if ( obj == NULL ) > goto out; > tmem_write_lock(&pool->pool_rwlock); > obj_destroy(obj,0); > - pool->flush_objs_found++; > tmem_write_unlock(&pool->pool_rwlock); > > out: > @@ -2386,8 +2234,6 @@ long do_tmem_op(tmem_cli_op_t uops) > if ( !tmem_current_permitted() ) > return -EPERM; > > - total_tmem_ops++; > - > if ( tmem_lock_all ) > { > if ( tmem_lock_all > 1 ) > @@ -2402,7 +2248,6 @@ long do_tmem_op(tmem_cli_op_t uops) > if ( tmem_lock_all ) > goto out; > simple_error: > - errored_tmem_ops++; > return rc; > } > > @@ -2512,8 +2357,6 @@ long do_tmem_op(tmem_cli_op_t uops) > } > > out: > - if ( rc < 0 ) > - errored_tmem_ops++; > if ( tmem_lock_all ) > { > if ( tmem_lock_all > 1 ) > @@ -2590,7 +2433,6 @@ void *tmem_relinquish_pages(unsigned int order, unsigned int memflags) > if (!tmem_enabled() || !tmem_freeable_pages()) > return NULL; > > - relinq_attempts++; > if ( order > 0 ) > { > #ifndef NDEBUG > @@ -2613,13 +2455,10 @@ void *tmem_relinquish_pages(unsigned int order, unsigned int memflags) > break; > evicts_per_relinq++; > } > - if ( evicts_per_relinq > max_evicts_per_relinq ) > - max_evicts_per_relinq = evicts_per_relinq; > if ( pfp != NULL ) > { > if ( !(memflags & MEMF_tmem) ) > scrub_one_page(pfp); > - relinq_pgs++; > } > > if ( tmem_called_from_tmem(memflags) ) > -- > 1.7.10.4 >
Konrad Rzeszutek Wilk
2013-Nov-22 18:17 UTC
Re: [PATCH 13/16] tmem: cleanup: __tmem_alloc_page: drop unneed parameters
On Wed, Nov 20, 2013 at 04:46:22PM +0800, Bob Liu wrote:> The two parameters of __tmem_alloc_page() can be reduced.That is dangerous. The last one (no_heap) was added to guard against recursion. Check how tmem_alloc_page_thispool calls alloc_domheap_pages, whcih calls alloc_heap_pages which can call tmem_relinquish_pages, which can .. well, you will get the idea.> tmem_called_from_tmem() was also dropped by this patch. > > Signed-off-by: Bob Liu <bob.liu@oracle.com> > --- > xen/common/tmem.c | 13 +++++-------- > xen/include/xen/tmem_xen.h | 11 +++++------ > 2 files changed, 10 insertions(+), 14 deletions(-) > > diff --git a/xen/common/tmem.c b/xen/common/tmem.c > index ee61899..bf0fa1b 100644 > --- a/xen/common/tmem.c > +++ b/xen/common/tmem.c > @@ -245,7 +245,7 @@ static struct page_info *tmem_alloc_page(struct tmem_pool *pool) > if ( pool != NULL && is_persistent(pool) ) > pfp = __tmem_alloc_page_thispool(pool->client->domain); > else > - pfp = __tmem_alloc_page(pool,0); > + pfp = __tmem_alloc_page(); > return pfp; > } > > @@ -263,9 +263,8 @@ static noinline void *tmem_mempool_page_get(unsigned long size) > struct page_info *pi; > > ASSERT(size == PAGE_SIZE); > - if ( (pi = __tmem_alloc_page(NULL,0)) == NULL ) > + if ( (pi = __tmem_alloc_page()) == NULL ) > return NULL; > - ASSERT(IS_VALID_PAGE(pi)); > return page_to_virt(pi); > } > > @@ -2450,7 +2449,6 @@ void tmem_freeze_all(unsigned char key) > void *tmem_relinquish_pages(unsigned int order, unsigned int memflags) > { > struct page_info *pfp; > - unsigned long evicts_per_relinq = 0; > int max_evictions = 10; > > if (!tmem_enabled() || !tmem_freeable_pages()) > @@ -2464,14 +2462,13 @@ void *tmem_relinquish_pages(unsigned int order, unsigned int memflags) > return NULL; > } > > - if ( tmem_called_from_tmem(memflags) ) > + if ( memflags & MEMF_tmem ) > read_lock(&tmem_rwlock); > > - while ( (pfp = __tmem_alloc_page(NULL,1)) == NULL ) > + while ( (pfp = tmem_page_list_get()) == NULL )No no. That could lead to bad recursion. You need to pass ''1'' somehow.> { > if ( (max_evictions-- <= 0) || !tmem_evict()) > break; > - evicts_per_relinq++;? Should this be a seperate patch?> } > if ( pfp != NULL ) > { > @@ -2479,7 +2476,7 @@ void *tmem_relinquish_pages(unsigned int order, unsigned int memflags) > scrub_one_page(pfp); > } > > - if ( tmem_called_from_tmem(memflags) ) > + if ( memflags & MEMF_tmem )Perhaps a comment too? /* Called recursively from within tmem. */> read_unlock(&tmem_rwlock); > > return pfp; > diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h > index 7468c28..c4b9f5a 100644 > --- a/xen/include/xen/tmem_xen.h > +++ b/xen/include/xen/tmem_xen.h > @@ -144,15 +144,16 @@ static inline void __tmem_free_page_thispool(struct page_info *pi) > /* > * Memory allocation for ephemeral (non-persistent) data > */ > -static inline struct page_info *__tmem_alloc_page(void *pool, int no_heap) > +static inline struct page_info *__tmem_alloc_page(void) > { > struct page_info *pi = tmem_page_list_get(); > > - if ( pi == NULL && !no_heap ) > + if ( pi == NULL) > pi = alloc_domheap_pages(0,0,MEMF_tmem); > - ASSERT((pi == NULL) || IS_VALID_PAGE(pi)); > - if ( pi != NULL && !no_heap ) > + > + if ( pi ) > atomic_inc(&freeable_page_count); > + ASSERT((pi == NULL) || IS_VALID_PAGE(pi)); > return pi; > } > > @@ -168,8 +169,6 @@ static inline unsigned long tmem_free_mb(void) > return (tmem_page_list_pages + total_free_pages()) >> (20 - PAGE_SHIFT); > } > > -#define tmem_called_from_tmem(_memflags) (_memflags & MEMF_tmem) > - > /* "Client" (==domain) abstraction */ > > struct client; > -- > 1.7.10.4 >
Konrad Rzeszutek Wilk
2013-Nov-22 18:22 UTC
Re: [PATCH 15/16] tmem: refator function tmem_ensure_avail_pages()
On Wed, Nov 20, 2013 at 04:46:24PM +0800, Bob Liu wrote:> tmem_ensure_avail_pages() doesn''t return a value which is incorrect because > the caller need to confirm there is enough memory.Which it was not doing until now. Ouch.> > Signed-off-by: Bob Liu <bob.liu@oracle.com> > --- > xen/common/tmem.c | 43 +++++++++++++++++++------------------------ > xen/include/xen/tmem_xen.h | 6 ------ > 2 files changed, 19 insertions(+), 30 deletions(-) > > diff --git a/xen/common/tmem.c b/xen/common/tmem.c > index 2e807d4..0bc33ee 100644 > --- a/xen/common/tmem.c > +++ b/xen/common/tmem.c > @@ -1296,22 +1296,28 @@ static unsigned long tmem_relinquish_npages(unsigned long n) > return avail_pages; > } > > -/* Under certain conditions (e.g. if each client is putting pages for exactly > +/* > + * Under certain conditions (e.g. if each client is putting pages for exactly > * one object), once locks are held, freeing up memory may > * result in livelocks and very long "put" times, so we try to ensure there > * is a minimum amount of memory (1MB) available BEFORE any data structure > - * locks are held */ > -static inline void tmem_ensure_avail_pages(void) > + * locks are held. > + */ > +static inline bool_t tmem_ensure_avail_pages(void) > { > int failed_evict = 10; > + unsigned long free_mem; > > - while ( !tmem_free_mb() ) > - { > - if ( tmem_evict() ) > - continue; > - else if ( failed_evict-- <= 0 ) > - break; > - } > + do { > + free_mem = (tmem_page_list_pages + total_free_pages()) > + >> (20 - PAGE_SHIFT); > + if ( free_mem ) > + return 1; > + if ( !tmem_evict() ) > + failed_evict--; > + } while ( failed_evict > 0 ); > + > + return 0; > } > > /************ TMEM CORE OPERATIONS ************************************/ > @@ -2282,9 +2288,6 @@ long do_tmem_op(tmem_cli_op_t uops) > struct tmem_pool *pool = NULL; > struct oid *oidp; > int rc = 0; > - bool_t succ_get = 0, succ_put = 0; > - bool_t non_succ_get = 0, non_succ_put = 0; > - bool_t flush = 0, flush_obj = 0;Um, OK, they seem to belong to a different patch?> bool_t write_lock_set = 0, read_lock_set = 0; > > if ( !tmem_initialized ) > @@ -2299,8 +2302,7 @@ long do_tmem_op(tmem_cli_op_t uops) > if ( unlikely(tmem_get_tmemop_from_client(&op, uops) != 0) ) > { > tmem_client_err("tmem: can''t get tmem struct from %s\n", tmem_client_str); > - rc = -EFAULT; > - return rc; > + return -EFAULT;This too.> } > > if ( op.cmd == TMEM_CONTROL ) > @@ -2369,28 +2371,21 @@ long do_tmem_op(tmem_cli_op_t uops) > op.u.creat.uuid[0], op.u.creat.uuid[1]); > break; > case TMEM_PUT_PAGE: > - tmem_ensure_avail_pages(); > - rc = do_tmem_put(pool, oidp, op.u.gen.index, op.u.gen.cmfn, > + if ( tmem_ensure_avail_pages() ) > + rc = do_tmem_put(pool, oidp, op.u.gen.index, op.u.gen.cmfn, > tmem_cli_buf_null); > - if (rc == 1) succ_put = 1; > - else non_succ_put = 1;which we ignore..> break; > case TMEM_GET_PAGE: > rc = do_tmem_get(pool, oidp, op.u.gen.index, op.u.gen.cmfn, > tmem_cli_buf_null); > - if (rc == 1) succ_get = 1; > - else non_succ_get = 1;ugh.> break; > case TMEM_FLUSH_PAGE: > - flush = 1; > rc = do_tmem_flush_page(pool, oidp, op.u.gen.index); > break; > case TMEM_FLUSH_OBJECT: > rc = do_tmem_flush_object(pool, oidp); > - flush_obj = 1; > break; > case TMEM_DESTROY_POOL: > - flush = 1;Those really belong to a seperate cleanup patch.> rc = do_tmem_destroy_pool(op.pool_id); > break; > default: > diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h > index bf3be62..258e8e7 100644 > --- a/xen/include/xen/tmem_xen.h > +++ b/xen/include/xen/tmem_xen.h > @@ -164,13 +164,7 @@ static inline void __tmem_free_page(struct page_info *pi) > atomic_dec(&freeable_page_count); > } > > -static inline unsigned long tmem_free_mb(void) > -{ > - return (tmem_page_list_pages + total_free_pages()) >> (20 - PAGE_SHIFT); > -} > - > /* "Client" (==domain) abstraction */ > - > struct client; > static inline struct client *tmem_client_from_cli_id(domid_t cli_id) > { > -- > 1.7.10.4 >
Konrad Rzeszutek Wilk
2013-Nov-22 21:00 UTC
Re: [PATCH 09/16] tmem: cleanup: drop tmemc_list() temporary
On Wed, Nov 20, 2013 at 04:46:18PM +0800, Bob Liu wrote:> tmemc_list() collects a lot of statistics which are not well defined, > unreadable and most of them are useless. > > This patch drop those mess code and leave TMEMC_LIST subops unimplemented > temporary.And you might want to drop tmem_copy_to_client_buf_offset as well as it is the only user of it.> > Signed-off-by: Bob Liu <bob.liu@oracle.com> > --- > xen/common/tmem.c | 171 +---------------------------------------------------- > 1 file changed, 2 insertions(+), 169 deletions(-) > > diff --git a/xen/common/tmem.c b/xen/common/tmem.c > index 6afbdbc..48b67bf 100644 > --- a/xen/common/tmem.c > +++ b/xen/common/tmem.c > @@ -29,12 +29,10 @@ > /* global statistics (none need to be locked) */ > static unsigned long total_tmem_ops = 0; > static unsigned long errored_tmem_ops = 0; > -static unsigned long total_flush_pool = 0; > static unsigned long alloc_failed = 0, alloc_page_failed = 0; > static unsigned long evicted_pgs = 0, evict_attempts = 0; > static unsigned long relinq_pgs = 0, relinq_attempts = 0; > static unsigned long max_evicts_per_relinq = 0; > -static unsigned long low_on_memory = 0; > static unsigned long deduped_puts = 0; > static unsigned long tot_good_eph_puts = 0; > static int global_obj_count_max = 0; > @@ -1980,176 +1978,11 @@ static int tmemc_flush_mem(domid_t cli_id, uint32_t kb) > return flushed_kb; > } > > -/* > - * These tmemc_list* routines output lots of stats in a format that is > - * intended to be program-parseable, not human-readable. Further, by > - * tying each group of stats to a line format indicator (e.g. G= for > - * global stats) and each individual stat to a two-letter specifier > - * (e.g. Ec:nnnnn in the G= line says there are nnnnn pages in the > - * global ephemeral pool), it should allow the stats reported to be > - * forward and backwards compatible as tmem evolves. > - */ > -#define BSIZE 1024 > - > -static int tmemc_list_client(struct client *c, tmem_cli_va_param_t buf, > - int off, uint32_t len, bool_t use_long) > -{ > - char info[BSIZE]; > - int i, n = 0, sum = 0; > - struct tmem_pool *p; > - bool_t s; > - > - n = scnprintf(info,BSIZE,"C=CI:%d,ww:%d,ca:%d,co:%d,fr:%d," > - "Tc:%"PRIu64",Ge:%ld,Pp:%ld,Gp:%ld%c", > - c->cli_id, c->weight, c->cap, c->compress, c->frozen, > - c->total_cycles, c->succ_eph_gets, c->succ_pers_puts, c->succ_pers_gets, > - use_long ? '','' : ''\n''); > - if (use_long) > - n += scnprintf(info+n,BSIZE-n, > - "Ec:%ld,Em:%ld,cp:%ld,cb:%"PRId64",cn:%ld,cm:%ld\n", > - c->eph_count, c->eph_count_max, > - c->compressed_pages, c->compressed_sum_size, > - c->compress_poor, c->compress_nomem); > - tmem_copy_to_client_buf_offset(buf,off+sum,info,n+1); > - sum += n; > - for ( i = 0; i < MAX_POOLS_PER_DOMAIN; i++ ) > - { > - if ( (p = c->pools[i]) == NULL ) > - continue; > - s = is_shared(p); > - n = scnprintf(info,BSIZE,"P=CI:%d,PI:%d," > - "PT:%c%c,U0:%"PRIx64",U1:%"PRIx64"%c", > - c->cli_id, p->pool_id, > - is_persistent(p) ? ''P'' : ''E'', s ? ''S'' : ''P'', > - (uint64_t)(s ? p->uuid[0] : 0), > - (uint64_t)(s ? p->uuid[1] : 0LL), > - use_long ? '','' : ''\n''); > - if (use_long) > - n += scnprintf(info+n,BSIZE-n, > - "Pc:%d,Pm:%d,Oc:%ld,Om:%ld,Nc:%lu,Nm:%lu," > - "ps:%lu,pt:%lu,pd:%lu,pr:%lu,px:%lu,gs:%lu,gt:%lu," > - "fs:%lu,ft:%lu,os:%lu,ot:%lu\n", > - _atomic_read(p->pgp_count), p->pgp_count_max, > - p->obj_count, p->obj_count_max, > - p->objnode_count, p->objnode_count_max, > - p->good_puts, p->puts,p->dup_puts_flushed, p->dup_puts_replaced, > - p->no_mem_puts, > - p->found_gets, p->gets, > - p->flushs_found, p->flushs, p->flush_objs_found, p->flush_objs); > - if ( sum + n >= len ) > - return sum; > - tmem_copy_to_client_buf_offset(buf,off+sum,info,n+1); > - sum += n; > - } > - return sum; > -} > - > -static int tmemc_list_shared(tmem_cli_va_param_t buf, int off, uint32_t len, > - bool_t use_long) > -{ > - char info[BSIZE]; > - int i, n = 0, sum = 0; > - struct tmem_pool *p; > - struct share_list *sl; > - > - for ( i = 0; i < MAX_GLOBAL_SHARED_POOLS; i++ ) > - { > - if ( (p = global_shared_pools[i]) == NULL ) > - continue; > - n = scnprintf(info+n,BSIZE-n,"S=SI:%d,PT:%c%c,U0:%"PRIx64",U1:%"PRIx64, > - i, is_persistent(p) ? ''P'' : ''E'', > - is_shared(p) ? ''S'' : ''P'', > - p->uuid[0], p->uuid[1]); > - list_for_each_entry(sl,&p->share_list, share_list) > - n += scnprintf(info+n,BSIZE-n,",SC:%d",sl->client->cli_id); > - n += scnprintf(info+n,BSIZE-n,"%c", use_long ? '','' : ''\n''); > - if (use_long) > - n += scnprintf(info+n,BSIZE-n, > - "Pc:%d,Pm:%d,Oc:%ld,Om:%ld,Nc:%lu,Nm:%lu," > - "ps:%lu,pt:%lu,pd:%lu,pr:%lu,px:%lu,gs:%lu,gt:%lu," > - "fs:%lu,ft:%lu,os:%lu,ot:%lu\n", > - _atomic_read(p->pgp_count), p->pgp_count_max, > - p->obj_count, p->obj_count_max, > - p->objnode_count, p->objnode_count_max, > - p->good_puts, p->puts,p->dup_puts_flushed, p->dup_puts_replaced, > - p->no_mem_puts, > - p->found_gets, p->gets, > - p->flushs_found, p->flushs, p->flush_objs_found, p->flush_objs); > - if ( sum + n >= len ) > - return sum; > - tmem_copy_to_client_buf_offset(buf,off+sum,info,n+1); > - sum += n; > - } > - return sum; > -} > - > -static int tmemc_list_global_perf(tmem_cli_va_param_t buf, int off, > - uint32_t len, bool_t use_long) > -{ > - char info[BSIZE]; > - int n = 0, sum = 0; > - > - n = scnprintf(info+n,BSIZE-n,"T="); > - n--; /* overwrite trailing comma */ > - n += scnprintf(info+n,BSIZE-n,"\n"); > - if ( sum + n >= len ) > - return sum; > - tmem_copy_to_client_buf_offset(buf,off+sum,info,n+1); > - sum += n; > - return sum; > -} > - > -static int tmemc_list_global(tmem_cli_va_param_t buf, int off, uint32_t len, > - bool_t use_long) > -{ > - char info[BSIZE]; > - int n = 0, sum = off; > - > - n += scnprintf(info,BSIZE,"G=" > - "Tt:%lu,Te:%lu,Cf:%lu,Af:%lu,Pf:%lu,Ta:%lu," > - "Lm:%lu,Et:%lu,Ea:%lu,Rt:%lu,Ra:%lu,Rx:%lu,Fp:%lu%c", > - total_tmem_ops, errored_tmem_ops, failed_copies, > - alloc_failed, alloc_page_failed, tmem_page_list_pages, > - low_on_memory, evicted_pgs, > - evict_attempts, relinq_pgs, relinq_attempts, max_evicts_per_relinq, > - total_flush_pool, use_long ? '','' : ''\n''); > - if (use_long) > - n += scnprintf(info+n,BSIZE-n, > - "Ec:%ld,Em:%ld,Oc:%d,Om:%d,Nc:%d,Nm:%d,Pc:%d,Pm:%d," > - "Fc:%d,Fm:%d,Sc:%d,Sm:%d,Ep:%lu,Gd:%lu,Zt:%lu,Gz:%lu\n", > - global_eph_count, global_eph_count_max, > - _atomic_read(global_obj_count), global_obj_count_max, > - _atomic_read(global_rtree_node_count), global_rtree_node_count_max, > - _atomic_read(global_pgp_count), global_pgp_count_max, > - _atomic_read(global_page_count), global_page_count_max, > - _atomic_read(global_pcd_count), global_pcd_count_max, > - tot_good_eph_puts,deduped_puts,pcd_tot_tze_size,pcd_tot_csize); > - if ( sum + n >= len ) > - return sum; > - tmem_copy_to_client_buf_offset(buf,off+sum,info,n+1); > - sum += n; > - return sum; > -} > - > static int tmemc_list(domid_t cli_id, tmem_cli_va_param_t buf, uint32_t len, > bool_t use_long) > { > - struct client *client; > - int off = 0; > - > - if ( cli_id == TMEM_CLI_ID_NULL ) { > - off = tmemc_list_global(buf,0,len,use_long); > - off += tmemc_list_shared(buf,off,len-off,use_long); > - list_for_each_entry(client,&global_client_list,client_list) > - off += tmemc_list_client(client, buf, off, len-off, use_long); > - off += tmemc_list_global_perf(buf,off,len-off,use_long); > - } > - else if ( (client = tmem_client_from_cli_id(cli_id)) == NULL) > - return -1; > - else > - off = tmemc_list_client(client, buf, 0, len, use_long); > - > - return 0; > + tmem_client_info("tmemc_list is not implemented yet!\n"); > + return -1; > } > > static int tmemc_set_var_one(struct client *client, uint32_t subop, uint32_t arg1) > -- > 1.7.10.4 >
>>> On 22.11.13 at 18:38, Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> wrote: > On Wed, Nov 20, 2013 at 04:46:12PM +0800, Bob Liu wrote: >> TMEM_READ/TMEM_WRITE/TMEM_XCHG/TMEM_NEW_PAGE are never be used. > > You also need to delete them in include/public/tmem.h and rev up the > TMEM_SPEC_VERSION I think?I said this elsewhere already - you can''t simply delete things from the public interface. Jan
On Mon, 2013-11-25 at 09:43 +0000, Jan Beulich wrote:> >>> On 22.11.13 at 18:38, Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> wrote: > > On Wed, Nov 20, 2013 at 04:46:12PM +0800, Bob Liu wrote: > >> TMEM_READ/TMEM_WRITE/TMEM_XCHG/TMEM_NEW_PAGE are never be used. > > > > You also need to delete them in include/public/tmem.h and rev up the > > TMEM_SPEC_VERSION I think? > > I said this elsewhere already - you can''t simply delete things > from the public interface.Should 006a687ba4de74d7933c09b43872abc19f126c63 be partially reverted then, to reinstate the typedef under a suitable #ifdef __XEN_INTERFACE_VERSION__ ? Ian.
>>> On 25.11.13 at 10:52, Ian Campbell <Ian.Campbell@citrix.com> wrote: > On Mon, 2013-11-25 at 09:43 +0000, Jan Beulich wrote: >> >>> On 22.11.13 at 18:38, Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> wrote: >> > On Wed, Nov 20, 2013 at 04:46:12PM +0800, Bob Liu wrote: >> >> TMEM_READ/TMEM_WRITE/TMEM_XCHG/TMEM_NEW_PAGE are never be used. >> > >> > You also need to delete them in include/public/tmem.h and rev up the >> > TMEM_SPEC_VERSION I think? >> >> I said this elsewhere already - you can''t simply delete things >> from the public interface. > > Should 006a687ba4de74d7933c09b43872abc19f126c63 be partially reverted > then, to reinstate the typedef under a suitable #ifdef > __XEN_INTERFACE_VERSION__ ?Yes, that would be my recommendation. But Konrad as the tmem maintainer should voice his opinion before we do that. Jan
Konrad Rzeszutek Wilk
2013-Nov-25 16:37 UTC
Re: [PATCH 03/16] tmem: cleanup: rm unused tmem_op
On Mon, Nov 25, 2013 at 09:52:16AM +0000, Ian Campbell wrote:> On Mon, 2013-11-25 at 09:43 +0000, Jan Beulich wrote: > > >>> On 22.11.13 at 18:38, Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> wrote: > > > On Wed, Nov 20, 2013 at 04:46:12PM +0800, Bob Liu wrote: > > >> TMEM_READ/TMEM_WRITE/TMEM_XCHG/TMEM_NEW_PAGE are never be used. > > > > > > You also need to delete them in include/public/tmem.h and rev up the > > > TMEM_SPEC_VERSION I think? > > > > I said this elsewhere already - you can''t simply delete things > > from the public interface. > > Should 006a687ba4de74d7933c09b43872abc19f126c63 be partially reverted > then, to reinstate the typedef under a suitable #ifdef > __XEN_INTERFACE_VERSION__ ?I think that is OK - the typedef was of ''tmem_cli_mfn_t'' which was ''xen_pfn_t'' based. And the struct that got changed is now using ''xen_pfn_t'' instead of ''tmem_cli_mfn_t''.> > Ian. >
On Mon, 2013-11-25 at 11:37 -0500, Konrad Rzeszutek Wilk wrote:> On Mon, Nov 25, 2013 at 09:52:16AM +0000, Ian Campbell wrote: > > On Mon, 2013-11-25 at 09:43 +0000, Jan Beulich wrote: > > > >>> On 22.11.13 at 18:38, Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> wrote: > > > > On Wed, Nov 20, 2013 at 04:46:12PM +0800, Bob Liu wrote: > > > >> TMEM_READ/TMEM_WRITE/TMEM_XCHG/TMEM_NEW_PAGE are never be used. > > > > > > > > You also need to delete them in include/public/tmem.h and rev up the > > > > TMEM_SPEC_VERSION I think? > > > > > > I said this elsewhere already - you can''t simply delete things > > > from the public interface. > > > > Should 006a687ba4de74d7933c09b43872abc19f126c63 be partially reverted > > then, to reinstate the typedef under a suitable #ifdef > > __XEN_INTERFACE_VERSION__ ? > > I think that is OK - the typedef was of ''tmem_cli_mfn_t'' which > was ''xen_pfn_t'' based. And the struct that got changed is now > using ''xen_pfn_t'' instead of ''tmem_cli_mfn_t''.It''s not OK for any existing code using the heads and the old name though, since it will fail to build. We normally try and care about such things. As TMEM maintainer are you telling us not to bother this time? Ian.
Konrad Rzeszutek Wilk
2013-Nov-25 17:09 UTC
Re: [PATCH 03/16] tmem: cleanup: rm unused tmem_op
On Mon, Nov 25, 2013 at 04:40:18PM +0000, Ian Campbell wrote:> On Mon, 2013-11-25 at 11:37 -0500, Konrad Rzeszutek Wilk wrote: > > On Mon, Nov 25, 2013 at 09:52:16AM +0000, Ian Campbell wrote: > > > On Mon, 2013-11-25 at 09:43 +0000, Jan Beulich wrote: > > > > >>> On 22.11.13 at 18:38, Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> wrote: > > > > > On Wed, Nov 20, 2013 at 04:46:12PM +0800, Bob Liu wrote: > > > > >> TMEM_READ/TMEM_WRITE/TMEM_XCHG/TMEM_NEW_PAGE are never be used. > > > > > > > > > > You also need to delete them in include/public/tmem.h and rev up the > > > > > TMEM_SPEC_VERSION I think? > > > > > > > > I said this elsewhere already - you can''t simply delete things > > > > from the public interface. > > > > > > Should 006a687ba4de74d7933c09b43872abc19f126c63 be partially reverted > > > then, to reinstate the typedef under a suitable #ifdef > > > __XEN_INTERFACE_VERSION__ ? > > > > I think that is OK - the typedef was of ''tmem_cli_mfn_t'' which > > was ''xen_pfn_t'' based. And the struct that got changed is now > > using ''xen_pfn_t'' instead of ''tmem_cli_mfn_t''. > > It''s not OK for any existing code using the heads and the old name > though, since it will fail to build.The name is still ''cmfn''. Are you saying that consumers (external to the Xen tree) of this header will fail to build b/c the typedef is gone?> > We normally try and care about such things. As TMEM maintainer are you > telling us not to bother this time?No, I am trying to understand the problem so that they don''t happen.> > Ian. >
On Mon, 2013-11-25 at 12:09 -0500, Konrad Rzeszutek Wilk wrote:> On Mon, Nov 25, 2013 at 04:40:18PM +0000, Ian Campbell wrote: > > On Mon, 2013-11-25 at 11:37 -0500, Konrad Rzeszutek Wilk wrote: > > > On Mon, Nov 25, 2013 at 09:52:16AM +0000, Ian Campbell wrote: > > > > On Mon, 2013-11-25 at 09:43 +0000, Jan Beulich wrote: > > > > > >>> On 22.11.13 at 18:38, Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> wrote: > > > > > > On Wed, Nov 20, 2013 at 04:46:12PM +0800, Bob Liu wrote: > > > > > >> TMEM_READ/TMEM_WRITE/TMEM_XCHG/TMEM_NEW_PAGE are never be used. > > > > > > > > > > > > You also need to delete them in include/public/tmem.h and rev up the > > > > > > TMEM_SPEC_VERSION I think? > > > > > > > > > > I said this elsewhere already - you can''t simply delete things > > > > > from the public interface. > > > > > > > > Should 006a687ba4de74d7933c09b43872abc19f126c63 be partially reverted > > > > then, to reinstate the typedef under a suitable #ifdef > > > > __XEN_INTERFACE_VERSION__ ? > > > > > > I think that is OK - the typedef was of ''tmem_cli_mfn_t'' which > > > was ''xen_pfn_t'' based. And the struct that got changed is now > > > using ''xen_pfn_t'' instead of ''tmem_cli_mfn_t''. > > > > It''s not OK for any existing code using the heads and the old name > > though, since it will fail to build. > > The name is still ''cmfn''. Are you saying that consumers (external > to the Xen tree) of this header will fail to build b/c the typedef is > gone?They will if they are using the typedef itself for something, which isn''t restricted to the single use in the struct. They could be using it for something entirely unrelated, or as a temporary variable while calculating what they are going to put in the struct etc. Ian.
Konrad Rzeszutek Wilk
2013-Nov-25 19:56 UTC
Re: [PATCH 03/16] tmem: cleanup: rm unused tmem_op
On Mon, Nov 25, 2013 at 05:12:36PM +0000, Ian Campbell wrote:> On Mon, 2013-11-25 at 12:09 -0500, Konrad Rzeszutek Wilk wrote: > > On Mon, Nov 25, 2013 at 04:40:18PM +0000, Ian Campbell wrote: > > > On Mon, 2013-11-25 at 11:37 -0500, Konrad Rzeszutek Wilk wrote: > > > > On Mon, Nov 25, 2013 at 09:52:16AM +0000, Ian Campbell wrote: > > > > > On Mon, 2013-11-25 at 09:43 +0000, Jan Beulich wrote: > > > > > > >>> On 22.11.13 at 18:38, Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> wrote: > > > > > > > On Wed, Nov 20, 2013 at 04:46:12PM +0800, Bob Liu wrote: > > > > > > >> TMEM_READ/TMEM_WRITE/TMEM_XCHG/TMEM_NEW_PAGE are never be used. > > > > > > > > > > > > > > You also need to delete them in include/public/tmem.h and rev up the > > > > > > > TMEM_SPEC_VERSION I think? > > > > > > > > > > > > I said this elsewhere already - you can''t simply delete things > > > > > > from the public interface. > > > > > > > > > > Should 006a687ba4de74d7933c09b43872abc19f126c63 be partially reverted > > > > > then, to reinstate the typedef under a suitable #ifdef > > > > > __XEN_INTERFACE_VERSION__ ? > > > > > > > > I think that is OK - the typedef was of ''tmem_cli_mfn_t'' which > > > > was ''xen_pfn_t'' based. And the struct that got changed is now > > > > using ''xen_pfn_t'' instead of ''tmem_cli_mfn_t''. > > > > > > It''s not OK for any existing code using the heads and the old name > > > though, since it will fail to build. > > > > The name is still ''cmfn''. Are you saying that consumers (external > > to the Xen tree) of this header will fail to build b/c the typedef is > > gone? > > They will if they are using the typedef itself for something, which > isn''t restricted to the single use in the struct. They could be using it > for something entirely unrelated, or as a temporary variable while > calculating what they are going to put in the struct etc.Oh, and since the headers are public, and not everybody copies them (like Linux) this would blow them up. Right, this needs a fix. Bob, could you please reinstate a typedef back with an #idef __XEN_INTERFACE_VERSION please? Thanks.> > Ian. > > >
Bob Liu
2013-Nov-26 08:22 UTC
Re: [PATCH 04/16] tmem: cleanup: rm unneeded parameters from put path
On 11/23/2013 01:54 AM, Konrad Rzeszutek Wilk wrote:> On Wed, Nov 20, 2013 at 04:46:13PM +0800, Bob Liu wrote: >> tmem only takes a page as unit, so parameters tmem_offset, pfn_offset and len >> are meanless, this patch remove those parameters from do_tmem_put() related path > ^^^^^^^^ ^-s > > > Not exactly right. They were used by TMEM_WRITE and TMEM_READ, but your > other patch "tmem: cleanup: rm unused tmem_op" removed them. > > Looking a bit on Google I see this thread: > > http://marc.info/?l=xen-devel&m=136975816209598 > > > which implies that the Windows GPL driver is using it? Did you > check its code? (See http://xenbits.xen.org/ext/win-pvdrivers/) >Okay, I''ll check the Windows drivers.> If it is not in the code, can you also CC James Harper on it so > he knows not use those ops? >Sure. Thanks, -Bob
Bob Liu
2013-Nov-26 08:28 UTC
Re: [PATCH 09/16] tmem: cleanup: drop tmemc_list() temporary
On 11/23/2013 02:07 AM, Konrad Rzeszutek Wilk wrote:> On Wed, Nov 20, 2013 at 04:46:18PM +0800, Bob Liu wrote: >> tmemc_list() collects a lot of statistics which are not well defined, >> unreadable and most of them are useless. >> >> This patch drop those mess code and leave TMEMC_LIST subops unimplemented >> temporary. > > Could you mention the title of which patch puts it back? >My plan is put it back after the current tmem code is good enough. It''s easier to clean up the code without tmemc_list(). Thanks, -Bob
Bob Liu
2013-Nov-26 08:41 UTC
Re: [PATCH 13/16] tmem: cleanup: __tmem_alloc_page: drop unneed parameters
On 11/23/2013 02:17 AM, Konrad Rzeszutek Wilk wrote:> On Wed, Nov 20, 2013 at 04:46:22PM +0800, Bob Liu wrote: >> The two parameters of __tmem_alloc_page() can be reduced. > > That is dangerous. The last one (no_heap) was added to guard > against recursion. Check how tmem_alloc_page_thispool calls > alloc_domheap_pages, whcih calls alloc_heap_pages which > can call tmem_relinquish_pages, which can .. well, you will > get the idea. > >> tmem_called_from_tmem() was also dropped by this patch. >> >> Signed-off-by: Bob Liu <bob.liu@oracle.com> >> --- >> xen/common/tmem.c | 13 +++++-------- >> xen/include/xen/tmem_xen.h | 11 +++++------ >> 2 files changed, 10 insertions(+), 14 deletions(-) >> >> diff --git a/xen/common/tmem.c b/xen/common/tmem.c >> index ee61899..bf0fa1b 100644 >> --- a/xen/common/tmem.c >> +++ b/xen/common/tmem.c >> @@ -245,7 +245,7 @@ static struct page_info *tmem_alloc_page(struct tmem_pool *pool) >> if ( pool != NULL && is_persistent(pool) ) >> pfp = __tmem_alloc_page_thispool(pool->client->domain); >> else >> - pfp = __tmem_alloc_page(pool,0); >> + pfp = __tmem_alloc_page(); >> return pfp; >> } >> >> @@ -263,9 +263,8 @@ static noinline void *tmem_mempool_page_get(unsigned long size) >> struct page_info *pi; >> >> ASSERT(size == PAGE_SIZE); >> - if ( (pi = __tmem_alloc_page(NULL,0)) == NULL ) >> + if ( (pi = __tmem_alloc_page()) == NULL ) >> return NULL; >> - ASSERT(IS_VALID_PAGE(pi)); >> return page_to_virt(pi); >> } >> >> @@ -2450,7 +2449,6 @@ void tmem_freeze_all(unsigned char key) >> void *tmem_relinquish_pages(unsigned int order, unsigned int memflags) >> { >> struct page_info *pfp; >> - unsigned long evicts_per_relinq = 0; >> int max_evictions = 10; >> >> if (!tmem_enabled() || !tmem_freeable_pages()) >> @@ -2464,14 +2462,13 @@ void *tmem_relinquish_pages(unsigned int order, unsigned int memflags) >> return NULL; >> } >> >> - if ( tmem_called_from_tmem(memflags) ) >> + if ( memflags & MEMF_tmem ) >> read_lock(&tmem_rwlock); >> >> - while ( (pfp = __tmem_alloc_page(NULL,1)) == NULL ) >> + while ( (pfp = tmem_page_list_get()) == NULL ) > > No no. That could lead to bad recursion. You need to pass ''1'' somehow. >I don''t think so. If pass ''1'' to __tmem_alloc_page(), __tmem_alloc_page() only call tmem_page_list_get() and return. It''s no difference, so an extra parameter is unneeded.> >> { >> if ( (max_evictions-- <= 0) || !tmem_evict()) >> break; >> - evicts_per_relinq++; > > > ? Should this be a seperate patch? > >> } >> if ( pfp != NULL ) >> { >> @@ -2479,7 +2476,7 @@ void *tmem_relinquish_pages(unsigned int order, unsigned int memflags) >> scrub_one_page(pfp); >> } >> >> - if ( tmem_called_from_tmem(memflags) ) >> + if ( memflags & MEMF_tmem ) > > Perhaps a comment too? /* Called recursively from within tmem. */ > >> read_unlock(&tmem_rwlock); >> >> return pfp; >> diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h >> index 7468c28..c4b9f5a 100644 >> --- a/xen/include/xen/tmem_xen.h >> +++ b/xen/include/xen/tmem_xen.h >> @@ -144,15 +144,16 @@ static inline void __tmem_free_page_thispool(struct page_info *pi) >> /* >> * Memory allocation for ephemeral (non-persistent) data >> */ >> -static inline struct page_info *__tmem_alloc_page(void *pool, int no_heap) >> +static inline struct page_info *__tmem_alloc_page(void) >> { >> struct page_info *pi = tmem_page_list_get(); >> >> - if ( pi == NULL && !no_heap ) >> + if ( pi == NULL) >> pi = alloc_domheap_pages(0,0,MEMF_tmem); >> - ASSERT((pi == NULL) || IS_VALID_PAGE(pi)); >> - if ( pi != NULL && !no_heap ) >> + >> + if ( pi ) >> atomic_inc(&freeable_page_count); >> + ASSERT((pi == NULL) || IS_VALID_PAGE(pi)); >> return pi; >> } >>See __tmem_alloc_page() implementation here. -- Regards, -Bob
On 11/26/2013 03:56 AM, Konrad Rzeszutek Wilk wrote:> On Mon, Nov 25, 2013 at 05:12:36PM +0000, Ian Campbell wrote: >> On Mon, 2013-11-25 at 12:09 -0500, Konrad Rzeszutek Wilk wrote: >>> On Mon, Nov 25, 2013 at 04:40:18PM +0000, Ian Campbell wrote: >>>> On Mon, 2013-11-25 at 11:37 -0500, Konrad Rzeszutek Wilk wrote: >>>>> On Mon, Nov 25, 2013 at 09:52:16AM +0000, Ian Campbell wrote: >>>>>> On Mon, 2013-11-25 at 09:43 +0000, Jan Beulich wrote: >>>>>>>>>> On 22.11.13 at 18:38, Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> wrote: >>>>>>>> On Wed, Nov 20, 2013 at 04:46:12PM +0800, Bob Liu wrote: >>>>>>>>> TMEM_READ/TMEM_WRITE/TMEM_XCHG/TMEM_NEW_PAGE are never be used. >>>>>>>> >>>>>>>> You also need to delete them in include/public/tmem.h and rev up the >>>>>>>> TMEM_SPEC_VERSION I think? >>>>>>> >>>>>>> I said this elsewhere already - you can''t simply delete things >>>>>>> from the public interface. >>>>>> >>>>>> Should 006a687ba4de74d7933c09b43872abc19f126c63 be partially reverted >>>>>> then, to reinstate the typedef under a suitable #ifdef >>>>>> __XEN_INTERFACE_VERSION__ ? >>>>> >>>>> I think that is OK - the typedef was of ''tmem_cli_mfn_t'' which >>>>> was ''xen_pfn_t'' based. And the struct that got changed is now >>>>> using ''xen_pfn_t'' instead of ''tmem_cli_mfn_t''. >>>> >>>> It''s not OK for any existing code using the heads and the old name >>>> though, since it will fail to build. >>> >>> The name is still ''cmfn''. Are you saying that consumers (external >>> to the Xen tree) of this header will fail to build b/c the typedef is >>> gone? >> >> They will if they are using the typedef itself for something, which >> isn''t restricted to the single use in the struct. They could be using it >> for something entirely unrelated, or as a temporary variable while >> calculating what they are going to put in the struct etc. > > Oh, and since the headers are public, and not everybody copies them (like > Linux) this would blow them up. Right, this needs a fix. > > Bob, could you please reinstate a typedef back with an #idef __XEN_INTERFACE_VERSION > please? >Okay, I''ll have a try. -- Regards, -Bob
Konrad Rzeszutek Wilk
2013-Nov-26 17:38 UTC
Re: [PATCH 13/16] tmem: cleanup: __tmem_alloc_page: drop unneed parameters
On Tue, Nov 26, 2013 at 04:41:42PM +0800, Bob Liu wrote:> > On 11/23/2013 02:17 AM, Konrad Rzeszutek Wilk wrote: > > On Wed, Nov 20, 2013 at 04:46:22PM +0800, Bob Liu wrote: > >> The two parameters of __tmem_alloc_page() can be reduced. > > > > That is dangerous. The last one (no_heap) was added to guard > > against recursion. Check how tmem_alloc_page_thispool calls > > alloc_domheap_pages, whcih calls alloc_heap_pages which > > can call tmem_relinquish_pages, which can .. well, you will > > get the idea.And I think that is not an issue anymore.> > > >> tmem_called_from_tmem() was also dropped by this patch. > >> > >> Signed-off-by: Bob Liu <bob.liu@oracle.com> > >> --- > >> xen/common/tmem.c | 13 +++++-------- > >> xen/include/xen/tmem_xen.h | 11 +++++------ > >> 2 files changed, 10 insertions(+), 14 deletions(-) > >> > >> diff --git a/xen/common/tmem.c b/xen/common/tmem.c > >> index ee61899..bf0fa1b 100644 > >> --- a/xen/common/tmem.c > >> +++ b/xen/common/tmem.c > >> @@ -245,7 +245,7 @@ static struct page_info *tmem_alloc_page(struct tmem_pool *pool) > >> if ( pool != NULL && is_persistent(pool) ) > >> pfp = __tmem_alloc_page_thispool(pool->client->domain); > >> else > >> - pfp = __tmem_alloc_page(pool,0); > >> + pfp = __tmem_alloc_page(); > >> return pfp; > >> } > >> > >> @@ -263,9 +263,8 @@ static noinline void *tmem_mempool_page_get(unsigned long size) > >> struct page_info *pi; > >> > >> ASSERT(size == PAGE_SIZE); > >> - if ( (pi = __tmem_alloc_page(NULL,0)) == NULL ) > >> + if ( (pi = __tmem_alloc_page()) == NULL ) > >> return NULL; > >> - ASSERT(IS_VALID_PAGE(pi)); > >> return page_to_virt(pi); > >> } > >> > >> @@ -2450,7 +2449,6 @@ void tmem_freeze_all(unsigned char key) > >> void *tmem_relinquish_pages(unsigned int order, unsigned int memflags) > >> { > >> struct page_info *pfp; > >> - unsigned long evicts_per_relinq = 0; > >> int max_evictions = 10; > >> > >> if (!tmem_enabled() || !tmem_freeable_pages()) > >> @@ -2464,14 +2462,13 @@ void *tmem_relinquish_pages(unsigned int order, unsigned int memflags) > >> return NULL; > >> } > >> > >> - if ( tmem_called_from_tmem(memflags) ) > >> + if ( memflags & MEMF_tmem ) > >> read_lock(&tmem_rwlock); > >> > >> - while ( (pfp = __tmem_alloc_page(NULL,1)) == NULL ) > >> + while ( (pfp = tmem_page_list_get()) == NULL ) > > > > No no. That could lead to bad recursion. You need to pass ''1'' somehow. > > > > I don''t think so. > If pass ''1'' to __tmem_alloc_page(), __tmem_alloc_page() only call > tmem_page_list_get() and return. > It''s no difference, so an extra parameter is unneeded.Right! Thanks for spotting that. I read that new change as: + while ( (pfp = __tmem_alloc_page()) == NULL ) !> > > > >> { > >> if ( (max_evictions-- <= 0) || !tmem_evict()) > >> break; > >> - evicts_per_relinq++; > > > > > > ? Should this be a seperate patch? > > > >> } > >> if ( pfp != NULL ) > >> { > >> @@ -2479,7 +2476,7 @@ void *tmem_relinquish_pages(unsigned int order, unsigned int memflags) > >> scrub_one_page(pfp); > >> } > >> > >> - if ( tmem_called_from_tmem(memflags) ) > >> + if ( memflags & MEMF_tmem ) > > > > Perhaps a comment too? /* Called recursively from within tmem. */ > > > >> read_unlock(&tmem_rwlock); > >> > >> return pfp; > >> diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h > >> index 7468c28..c4b9f5a 100644 > >> --- a/xen/include/xen/tmem_xen.h > >> +++ b/xen/include/xen/tmem_xen.h > >> @@ -144,15 +144,16 @@ static inline void __tmem_free_page_thispool(struct page_info *pi) > >> /* > >> * Memory allocation for ephemeral (non-persistent) data > >> */ > >> -static inline struct page_info *__tmem_alloc_page(void *pool, int no_heap) > >> +static inline struct page_info *__tmem_alloc_page(void)> >> { > >> struct page_info *pi = tmem_page_list_get(); > >> > >> - if ( pi == NULL && !no_heap ) > >> + if ( pi == NULL) > >> pi = alloc_domheap_pages(0,0,MEMF_tmem); > >> - ASSERT((pi == NULL) || IS_VALID_PAGE(pi)); > >> - if ( pi != NULL && !no_heap ) > >> + > >> + if ( pi ) > >> atomic_inc(&freeable_page_count); > >> + ASSERT((pi == NULL) || IS_VALID_PAGE(pi)); > >> return pi; > >> } > >> > > See __tmem_alloc_page() implementation here.<nods>> > -- > Regards, > -Bob
Andrew Cooper
2013-Nov-27 14:38 UTC
Re: [PATCH 14/16] tmem: cleanup: drop useless functions from head file
On 20/11/13 08:46, Bob Liu wrote:> They are several one line functions in tmem_xen.h which are useless, this patch > embeded them into tmem.c directly. > > Signed-off-by: Bob Liu <bob.liu@oracle.com> > --- > xen/common/tmem.c | 22 +++++++++++----------- > xen/include/xen/tmem_xen.h | 27 --------------------------- > 2 files changed, 11 insertions(+), 38 deletions(-) > > diff --git a/xen/common/tmem.c b/xen/common/tmem.c > index bf0fa1b..2e807d4 100644 > --- a/xen/common/tmem.c > +++ b/xen/common/tmem.c > @@ -1212,7 +1212,7 @@ obj_unlock: > > static int tmem_evict(void) > { > - struct client *client = tmem_client_from_current(); > + struct client *client = (struct client *)current->domain->tmem;domain->tmem is a void pointer. Therefore this explicit cast to (struct client *) is redundant. I would however, suggest updating struct domain to have a struct client pointer rather than a void pointer. ~Andrew> struct tmem_page_descriptor *pgp = NULL, *pgp2, *pgp_del; > struct tmem_object_root *obj; > struct tmem_pool *pool; > @@ -1625,7 +1625,7 @@ static int do_tmem_get(struct tmem_pool *pool, struct oid *oidp, uint32_t index, > list_del(&pgp->us.client_eph_pages); > list_add_tail(&pgp->us.client_eph_pages,&client->ephemeral_page_list); > spin_unlock(&eph_lists_spinlock); > - obj->last_client = tmem_get_cli_id_from_current(); > + obj->last_client = current->domain->domain_id; > } > } > if ( obj != NULL ) > @@ -1694,7 +1694,7 @@ out: > > static int do_tmem_destroy_pool(uint32_t pool_id) > { > - struct client *client = tmem_client_from_current(); > + struct client *client = (struct client *)current->domain->tmem; > struct tmem_pool *pool; > > if ( client->pools == NULL ) > @@ -1725,7 +1725,7 @@ static int do_tmem_new_pool(domid_t this_cli_id, > int i; > > if ( this_cli_id == TMEM_CLI_ID_NULL ) > - cli_id = tmem_get_cli_id_from_current(); > + cli_id = current->domain->domain_id; > else > cli_id = this_cli_id; > tmem_client_info("tmem: allocating %s-%s tmem pool for %s=%d...", > @@ -1766,7 +1766,7 @@ static int do_tmem_new_pool(domid_t this_cli_id, > } > else > { > - client = tmem_client_from_current(); > + client = (struct client *)current->domain->tmem; > ASSERT(client != NULL); > for ( d_poolid = 0; d_poolid < MAX_POOLS_PER_DOMAIN; d_poolid++ ) > if ( client->pools[d_poolid] == NULL ) > @@ -2206,7 +2206,7 @@ static int do_tmem_control(struct tmem_op *op) > uint32_t subop = op->u.ctrl.subop; > struct oid *oidp = (struct oid *)(&op->u.ctrl.oid[0]); > > - if (!tmem_current_is_privileged()) > + if ( xsm_tmem_control(XSM_PRIV) ) > return -EPERM; > > switch(subop) > @@ -2278,7 +2278,7 @@ static int do_tmem_control(struct tmem_op *op) > long do_tmem_op(tmem_cli_op_t uops) > { > struct tmem_op op; > - struct client *client = tmem_client_from_current(); > + struct client *client = (struct client *)current->domain->tmem; > struct tmem_pool *pool = NULL; > struct oid *oidp; > int rc = 0; > @@ -2290,10 +2290,10 @@ long do_tmem_op(tmem_cli_op_t uops) > if ( !tmem_initialized ) > return -ENODEV; > > - if ( !tmem_current_permitted() ) > + if ( xsm_tmem_op(XSM_HOOK) ) > return -EPERM; > > - if ( client != NULL && tmem_client_is_dying(client) ) > + if ( client != NULL && client->domain->is_dying ) > return -ENODEV; > > if ( unlikely(tmem_get_tmemop_from_client(&op, uops) != 0) ) > @@ -2328,7 +2328,7 @@ long do_tmem_op(tmem_cli_op_t uops) > { > write_lock(&tmem_rwlock); > write_lock_set = 1; > - if ( (client = client_create(tmem_get_cli_id_from_current())) == NULL ) > + if ( (client = client_create(current->domain->domain_id)) == NULL ) > { > tmem_client_err("tmem: can''t create tmem structure for %s\n", > tmem_client_str); > @@ -2417,7 +2417,7 @@ void tmem_destroy(void *v) > if ( client == NULL ) > return; > > - if ( !tmem_client_is_dying(client) ) > + if ( !client->domain->is_dying ) > { > printk("tmem: tmem_destroy can only destroy dying client\n"); > return; > diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h > index c4b9f5a..bf3be62 100644 > --- a/xen/include/xen/tmem_xen.h > +++ b/xen/include/xen/tmem_xen.h > @@ -183,33 +183,6 @@ static inline struct client *tmem_client_from_cli_id(domid_t cli_id) > return c; > } > > -static inline struct client *tmem_client_from_current(void) > -{ > - return (struct client *)(current->domain->tmem); > -} > - > -#define tmem_client_is_dying(_client) (!!_client->domain->is_dying) > - > -static inline domid_t tmem_get_cli_id_from_current(void) > -{ > - return current->domain->domain_id; > -} > - > -static inline struct domain *tmem_get_cli_ptr_from_current(void) > -{ > - return current->domain; > -} > - > -static inline bool_t tmem_current_permitted(void) > -{ > - return !xsm_tmem_op(XSM_HOOK); > -} > - > -static inline bool_t tmem_current_is_privileged(void) > -{ > - return !xsm_tmem_control(XSM_PRIV); > -} > - > static inline uint8_t tmem_get_first_byte(struct page_info *pfp) > { > void *p = __map_domain_page(pfp);
Konrad Rzeszutek Wilk
2013-Nov-27 14:52 UTC
Re: [PATCH 14/16] tmem: cleanup: drop useless functions from head file
On Wed, Nov 27, 2013 at 02:38:07PM +0000, Andrew Cooper wrote:> On 20/11/13 08:46, Bob Liu wrote: > > They are several one line functions in tmem_xen.h which are useless, this patch > > embeded them into tmem.c directly. > > > > Signed-off-by: Bob Liu <bob.liu@oracle.com> > > --- > > xen/common/tmem.c | 22 +++++++++++----------- > > xen/include/xen/tmem_xen.h | 27 --------------------------- > > 2 files changed, 11 insertions(+), 38 deletions(-) > > > > diff --git a/xen/common/tmem.c b/xen/common/tmem.c > > index bf0fa1b..2e807d4 100644 > > --- a/xen/common/tmem.c > > +++ b/xen/common/tmem.c > > @@ -1212,7 +1212,7 @@ obj_unlock: > > > > static int tmem_evict(void) > > { > > - struct client *client = tmem_client_from_current(); > > + struct client *client = (struct client *)current->domain->tmem; > > domain->tmem is a void pointer. Therefore this explicit cast to (struct > client *) is redundant. > > I would however, suggest updating struct domain to have a struct client > pointer rather than a void pointer.That might bring more of #include in the file. That was I believe the initial reason for not doing that. But perhaps one can also just do: extern struct client; in the header that has domain struct (I always forget the name of it) and that would make this neater.> > ~Andrew > > > struct tmem_page_descriptor *pgp = NULL, *pgp2, *pgp_del; > > struct tmem_object_root *obj; > > struct tmem_pool *pool; > > @@ -1625,7 +1625,7 @@ static int do_tmem_get(struct tmem_pool *pool, struct oid *oidp, uint32_t index, > > list_del(&pgp->us.client_eph_pages); > > list_add_tail(&pgp->us.client_eph_pages,&client->ephemeral_page_list); > > spin_unlock(&eph_lists_spinlock); > > - obj->last_client = tmem_get_cli_id_from_current(); > > + obj->last_client = current->domain->domain_id; > > } > > } > > if ( obj != NULL ) > > @@ -1694,7 +1694,7 @@ out: > > > > static int do_tmem_destroy_pool(uint32_t pool_id) > > { > > - struct client *client = tmem_client_from_current(); > > + struct client *client = (struct client *)current->domain->tmem; > > struct tmem_pool *pool; > > > > if ( client->pools == NULL ) > > @@ -1725,7 +1725,7 @@ static int do_tmem_new_pool(domid_t this_cli_id, > > int i; > > > > if ( this_cli_id == TMEM_CLI_ID_NULL ) > > - cli_id = tmem_get_cli_id_from_current(); > > + cli_id = current->domain->domain_id; > > else > > cli_id = this_cli_id; > > tmem_client_info("tmem: allocating %s-%s tmem pool for %s=%d...", > > @@ -1766,7 +1766,7 @@ static int do_tmem_new_pool(domid_t this_cli_id, > > } > > else > > { > > - client = tmem_client_from_current(); > > + client = (struct client *)current->domain->tmem; > > ASSERT(client != NULL); > > for ( d_poolid = 0; d_poolid < MAX_POOLS_PER_DOMAIN; d_poolid++ ) > > if ( client->pools[d_poolid] == NULL ) > > @@ -2206,7 +2206,7 @@ static int do_tmem_control(struct tmem_op *op) > > uint32_t subop = op->u.ctrl.subop; > > struct oid *oidp = (struct oid *)(&op->u.ctrl.oid[0]); > > > > - if (!tmem_current_is_privileged()) > > + if ( xsm_tmem_control(XSM_PRIV) ) > > return -EPERM; > > > > switch(subop) > > @@ -2278,7 +2278,7 @@ static int do_tmem_control(struct tmem_op *op) > > long do_tmem_op(tmem_cli_op_t uops) > > { > > struct tmem_op op; > > - struct client *client = tmem_client_from_current(); > > + struct client *client = (struct client *)current->domain->tmem; > > struct tmem_pool *pool = NULL; > > struct oid *oidp; > > int rc = 0; > > @@ -2290,10 +2290,10 @@ long do_tmem_op(tmem_cli_op_t uops) > > if ( !tmem_initialized ) > > return -ENODEV; > > > > - if ( !tmem_current_permitted() ) > > + if ( xsm_tmem_op(XSM_HOOK) ) > > return -EPERM; > > > > - if ( client != NULL && tmem_client_is_dying(client) ) > > + if ( client != NULL && client->domain->is_dying ) > > return -ENODEV; > > > > if ( unlikely(tmem_get_tmemop_from_client(&op, uops) != 0) ) > > @@ -2328,7 +2328,7 @@ long do_tmem_op(tmem_cli_op_t uops) > > { > > write_lock(&tmem_rwlock); > > write_lock_set = 1; > > - if ( (client = client_create(tmem_get_cli_id_from_current())) == NULL ) > > + if ( (client = client_create(current->domain->domain_id)) == NULL ) > > { > > tmem_client_err("tmem: can''t create tmem structure for %s\n", > > tmem_client_str); > > @@ -2417,7 +2417,7 @@ void tmem_destroy(void *v) > > if ( client == NULL ) > > return; > > > > - if ( !tmem_client_is_dying(client) ) > > + if ( !client->domain->is_dying ) > > { > > printk("tmem: tmem_destroy can only destroy dying client\n"); > > return; > > diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h > > index c4b9f5a..bf3be62 100644 > > --- a/xen/include/xen/tmem_xen.h > > +++ b/xen/include/xen/tmem_xen.h > > @@ -183,33 +183,6 @@ static inline struct client *tmem_client_from_cli_id(domid_t cli_id) > > return c; > > } > > > > -static inline struct client *tmem_client_from_current(void) > > -{ > > - return (struct client *)(current->domain->tmem); > > -} > > - > > -#define tmem_client_is_dying(_client) (!!_client->domain->is_dying) > > - > > -static inline domid_t tmem_get_cli_id_from_current(void) > > -{ > > - return current->domain->domain_id; > > -} > > - > > -static inline struct domain *tmem_get_cli_ptr_from_current(void) > > -{ > > - return current->domain; > > -} > > - > > -static inline bool_t tmem_current_permitted(void) > > -{ > > - return !xsm_tmem_op(XSM_HOOK); > > -} > > - > > -static inline bool_t tmem_current_is_privileged(void) > > -{ > > - return !xsm_tmem_control(XSM_PRIV); > > -} > > - > > static inline uint8_t tmem_get_first_byte(struct page_info *pfp) > > { > > void *p = __map_domain_page(pfp); > > > _______________________________________________ > Xen-devel mailing list > Xen-devel@lists.xen.org > http://lists.xen.org/xen-devel
Andrew Cooper
2013-Nov-27 14:59 UTC
Re: [PATCH 14/16] tmem: cleanup: drop useless functions from head file
On 27/11/13 14:52, Konrad Rzeszutek Wilk wrote:> On Wed, Nov 27, 2013 at 02:38:07PM +0000, Andrew Cooper wrote: >> On 20/11/13 08:46, Bob Liu wrote: >>> They are several one line functions in tmem_xen.h which are useless, this patch >>> embeded them into tmem.c directly. >>> >>> Signed-off-by: Bob Liu <bob.liu@oracle.com> >>> --- >>> xen/common/tmem.c | 22 +++++++++++----------- >>> xen/include/xen/tmem_xen.h | 27 --------------------------- >>> 2 files changed, 11 insertions(+), 38 deletions(-) >>> >>> diff --git a/xen/common/tmem.c b/xen/common/tmem.c >>> index bf0fa1b..2e807d4 100644 >>> --- a/xen/common/tmem.c >>> +++ b/xen/common/tmem.c >>> @@ -1212,7 +1212,7 @@ obj_unlock: >>> >>> static int tmem_evict(void) >>> { >>> - struct client *client = tmem_client_from_current(); >>> + struct client *client = (struct client *)current->domain->tmem; >> domain->tmem is a void pointer. Therefore this explicit cast to (struct >> client *) is redundant. >> >> I would however, suggest updating struct domain to have a struct client >> pointer rather than a void pointer. > That might bring more of #include in the file. That was I believe the initial > reason for not doing that. But perhaps one can also just do: > > extern struct client; > > in the header that has domain struct (I always forget the name of it) > and that would make this neater.just "struct client;" is the correct way to "extern" a struct in C. I might also suggest a rename to tmem_client ? ~Andrew> >> ~Andrew >> >>> struct tmem_page_descriptor *pgp = NULL, *pgp2, *pgp_del; >>> struct tmem_object_root *obj; >>> struct tmem_pool *pool; >>> @@ -1625,7 +1625,7 @@ static int do_tmem_get(struct tmem_pool *pool, struct oid *oidp, uint32_t index, >>> list_del(&pgp->us.client_eph_pages); >>> list_add_tail(&pgp->us.client_eph_pages,&client->ephemeral_page_list); >>> spin_unlock(&eph_lists_spinlock); >>> - obj->last_client = tmem_get_cli_id_from_current(); >>> + obj->last_client = current->domain->domain_id; >>> } >>> } >>> if ( obj != NULL ) >>> @@ -1694,7 +1694,7 @@ out: >>> >>> static int do_tmem_destroy_pool(uint32_t pool_id) >>> { >>> - struct client *client = tmem_client_from_current(); >>> + struct client *client = (struct client *)current->domain->tmem; >>> struct tmem_pool *pool; >>> >>> if ( client->pools == NULL ) >>> @@ -1725,7 +1725,7 @@ static int do_tmem_new_pool(domid_t this_cli_id, >>> int i; >>> >>> if ( this_cli_id == TMEM_CLI_ID_NULL ) >>> - cli_id = tmem_get_cli_id_from_current(); >>> + cli_id = current->domain->domain_id; >>> else >>> cli_id = this_cli_id; >>> tmem_client_info("tmem: allocating %s-%s tmem pool for %s=%d...", >>> @@ -1766,7 +1766,7 @@ static int do_tmem_new_pool(domid_t this_cli_id, >>> } >>> else >>> { >>> - client = tmem_client_from_current(); >>> + client = (struct client *)current->domain->tmem; >>> ASSERT(client != NULL); >>> for ( d_poolid = 0; d_poolid < MAX_POOLS_PER_DOMAIN; d_poolid++ ) >>> if ( client->pools[d_poolid] == NULL ) >>> @@ -2206,7 +2206,7 @@ static int do_tmem_control(struct tmem_op *op) >>> uint32_t subop = op->u.ctrl.subop; >>> struct oid *oidp = (struct oid *)(&op->u.ctrl.oid[0]); >>> >>> - if (!tmem_current_is_privileged()) >>> + if ( xsm_tmem_control(XSM_PRIV) ) >>> return -EPERM; >>> >>> switch(subop) >>> @@ -2278,7 +2278,7 @@ static int do_tmem_control(struct tmem_op *op) >>> long do_tmem_op(tmem_cli_op_t uops) >>> { >>> struct tmem_op op; >>> - struct client *client = tmem_client_from_current(); >>> + struct client *client = (struct client *)current->domain->tmem; >>> struct tmem_pool *pool = NULL; >>> struct oid *oidp; >>> int rc = 0; >>> @@ -2290,10 +2290,10 @@ long do_tmem_op(tmem_cli_op_t uops) >>> if ( !tmem_initialized ) >>> return -ENODEV; >>> >>> - if ( !tmem_current_permitted() ) >>> + if ( xsm_tmem_op(XSM_HOOK) ) >>> return -EPERM; >>> >>> - if ( client != NULL && tmem_client_is_dying(client) ) >>> + if ( client != NULL && client->domain->is_dying ) >>> return -ENODEV; >>> >>> if ( unlikely(tmem_get_tmemop_from_client(&op, uops) != 0) ) >>> @@ -2328,7 +2328,7 @@ long do_tmem_op(tmem_cli_op_t uops) >>> { >>> write_lock(&tmem_rwlock); >>> write_lock_set = 1; >>> - if ( (client = client_create(tmem_get_cli_id_from_current())) == NULL ) >>> + if ( (client = client_create(current->domain->domain_id)) == NULL ) >>> { >>> tmem_client_err("tmem: can''t create tmem structure for %s\n", >>> tmem_client_str); >>> @@ -2417,7 +2417,7 @@ void tmem_destroy(void *v) >>> if ( client == NULL ) >>> return; >>> >>> - if ( !tmem_client_is_dying(client) ) >>> + if ( !client->domain->is_dying ) >>> { >>> printk("tmem: tmem_destroy can only destroy dying client\n"); >>> return; >>> diff --git a/xen/include/xen/tmem_xen.h b/xen/include/xen/tmem_xen.h >>> index c4b9f5a..bf3be62 100644 >>> --- a/xen/include/xen/tmem_xen.h >>> +++ b/xen/include/xen/tmem_xen.h >>> @@ -183,33 +183,6 @@ static inline struct client *tmem_client_from_cli_id(domid_t cli_id) >>> return c; >>> } >>> >>> -static inline struct client *tmem_client_from_current(void) >>> -{ >>> - return (struct client *)(current->domain->tmem); >>> -} >>> - >>> -#define tmem_client_is_dying(_client) (!!_client->domain->is_dying) >>> - >>> -static inline domid_t tmem_get_cli_id_from_current(void) >>> -{ >>> - return current->domain->domain_id; >>> -} >>> - >>> -static inline struct domain *tmem_get_cli_ptr_from_current(void) >>> -{ >>> - return current->domain; >>> -} >>> - >>> -static inline bool_t tmem_current_permitted(void) >>> -{ >>> - return !xsm_tmem_op(XSM_HOOK); >>> -} >>> - >>> -static inline bool_t tmem_current_is_privileged(void) >>> -{ >>> - return !xsm_tmem_control(XSM_PRIV); >>> -} >>> - >>> static inline uint8_t tmem_get_first_byte(struct page_info *pfp) >>> { >>> void *p = __map_domain_page(pfp); >> >> _______________________________________________ >> Xen-devel mailing list >> Xen-devel@lists.xen.org >> http://lists.xen.org/xen-devel
Jan Beulich
2013-Nov-27 15:55 UTC
Re: [PATCH 14/16] tmem: cleanup: drop useless functions from head file
>>> On 27.11.13 at 15:59, Andrew Cooper <andrew.cooper3@citrix.com> wrote: > On 27/11/13 14:52, Konrad Rzeszutek Wilk wrote: >> On Wed, Nov 27, 2013 at 02:38:07PM +0000, Andrew Cooper wrote: >>> On 20/11/13 08:46, Bob Liu wrote: >>>> They are several one line functions in tmem_xen.h which are useless, this > patch >>>> embeded them into tmem.c directly. >>>> >>>> Signed-off-by: Bob Liu <bob.liu@oracle.com> >>>> --- >>>> xen/common/tmem.c | 22 +++++++++++----------- >>>> xen/include/xen/tmem_xen.h | 27 --------------------------- >>>> 2 files changed, 11 insertions(+), 38 deletions(-) >>>> >>>> diff --git a/xen/common/tmem.c b/xen/common/tmem.c >>>> index bf0fa1b..2e807d4 100644 >>>> --- a/xen/common/tmem.c >>>> +++ b/xen/common/tmem.c >>>> @@ -1212,7 +1212,7 @@ obj_unlock: >>>> >>>> static int tmem_evict(void) >>>> { >>>> - struct client *client = tmem_client_from_current(); >>>> + struct client *client = (struct client *)current->domain->tmem; >>> domain->tmem is a void pointer. Therefore this explicit cast to (struct >>> client *) is redundant. >>> >>> I would however, suggest updating struct domain to have a struct client >>> pointer rather than a void pointer. >> That might bring more of #include in the file. That was I believe the initial >> reason for not doing that. But perhaps one can also just do: >> >> extern struct client; >> >> in the header that has domain struct (I always forget the name of it) >> and that would make this neater. > > just "struct client;" is the correct way to "extern" a struct in C.You don''t need anything in the case here. Such forward declarations are only needed when the type would otherwise first appear in a function parameter declaration (since then its scope becomes only that of the function).> I might also suggest a rename to tmem_client ?And it not being that way is what I think was the reason not to expose the type outside of tmem files. Jan