From: dlg Date: Fri, 16 Jun 2017 01:55:45 +0000 (+0000) Subject: add garbage collection of unused lists percpu cached items. X-Git-Url: http://artulab.com/gitweb/?a=commitdiff_plain;h=338a5c6849299c181ddfaa0bcbb0a7cf20fcfe17;p=openbsd add garbage collection of unused lists percpu cached items. the cpu caches in pools amortise the cost of accessing global structures by moving lists of items around instead of individual items. excess lists of items are stored in the global pool struct, but these idle lists never get returned back to the system for use elsewhere. this adds a timestamp to the global idle list, which is updated when the idle list stops being empty. if the idle list hasn't been empty for a while, it means the per cpu caches arent using the idle entries and they can be recovered. timestamping the pages prevents recovery of a lot of items that may be used again shortly. eg, rx ring processing and replenishing from rate limited interrupts tends to allocate and free items in large chunks, which the timestamping smooths out. gc'ed lists are returned to the pool pages, which in turn get gc'ed back to uvm. ok visa@ --- diff --git a/sys/kern/subr_pool.c b/sys/kern/subr_pool.c index 975d0d55720..62ac8cc6564 100644 --- a/sys/kern/subr_pool.c +++ b/sys/kern/subr_pool.c @@ -1,4 +1,4 @@ -/* $OpenBSD: subr_pool.c,v 1.213 2017/06/16 01:33:20 dlg Exp $ */ +/* $OpenBSD: subr_pool.c,v 1.214 2017/06/16 01:55:45 dlg Exp $ */ /* $NetBSD: subr_pool.c,v 1.61 2001/09/26 07:14:56 chs Exp $ */ /*- @@ -135,6 +135,7 @@ struct pool_cache { void *pool_cache_get(struct pool *); void pool_cache_put(struct pool *, void *); void pool_cache_destroy(struct pool *); +void pool_cache_gc(struct pool *); #endif void pool_cache_pool_info(struct pool *, struct kinfo_pool *); int pool_cache_info(struct pool *, void *, size_t *); @@ -1476,6 +1477,11 @@ pool_gc_pages(void *null) rw_enter_read(&pool_lock); s = splvm(); /* XXX go to splvm until all pools _setipl properly */ SIMPLEQ_FOREACH(pp, &pool_head, pr_poollist) { +#ifdef MULTIPROCESSOR + if (pp->pr_cache != NULL) + pool_cache_gc(pp); +#endif + if (pp->pr_nidle <= pp->pr_minpages || /* guess */ !mtx_enter_try(&pp->pr_mtx)) /* try */ continue; @@ -1642,8 +1648,10 @@ pool_cache_init(struct pool *pp) arc4random_buf(pp->pr_cache_magic, sizeof(pp->pr_cache_magic)); TAILQ_INIT(&pp->pr_cache_lists); pp->pr_cache_nlist = 0; + pp->pr_cache_tick = ticks; pp->pr_cache_items = 8; pp->pr_cache_contention = 0; + pp->pr_cache_ngc = 0; CPUMEM_FOREACH(pc, &i, cm) { pc->pc_actv = NULL; @@ -1659,6 +1667,8 @@ pool_cache_init(struct pool *pp) pc->pc_nout = 0; } + membar_producer(); + pp->pr_cache = cm; } @@ -1740,6 +1750,9 @@ pool_cache_list_free(struct pool *pp, struct pool_cache *pc, struct pool_cache_item *ci) { pool_list_enter(pp); + if (TAILQ_EMPTY(&pp->pr_cache_lists)) + pp->pr_cache_tick = ticks; + TAILQ_INSERT_TAIL(&pp->pr_cache_lists, ci, ci_nextl); pp->pr_cache_nlist++; @@ -1893,8 +1906,10 @@ pool_cache_destroy(struct pool *pp) struct cpumem_iter i; struct cpumem *cm; + rw_enter_write(&pool_lock); /* serialise with the gc */ cm = pp->pr_cache; pp->pr_cache = NULL; /* make pool_put avoid the cache */ + rw_exit_write(&pool_lock); CPUMEM_FOREACH(pc, &i, cm) { pool_cache_list_put(pp, pc->pc_actv); @@ -1908,6 +1923,29 @@ pool_cache_destroy(struct pool *pp) pl = pool_cache_list_put(pp, pl); } +void +pool_cache_gc(struct pool *pp) +{ + if ((ticks - pp->pr_cache_tick) > (hz * pool_wait_gc) && + !TAILQ_EMPTY(&pp->pr_cache_lists) && + mtx_enter_try(&pp->pr_cache_mtx)) { + struct pool_cache_item *pl = NULL; + + pl = TAILQ_FIRST(&pp->pr_cache_lists); + if (pl != NULL) { + TAILQ_REMOVE(&pp->pr_cache_lists, pl, ci_nextl); + pp->pr_cache_tick = ticks; + pp->pr_cache_nlist--; + + pp->pr_cache_ngc++; + } + + mtx_leave(&pp->pr_cache_mtx); + + pool_cache_list_put(pp, pl); + } +} + void pool_cache_pool_info(struct pool *pp, struct kinfo_pool *pi) { @@ -1955,7 +1993,7 @@ pool_cache_info(struct pool *pp, void *oldp, size_t *oldlenp) memset(&kpc, 0, sizeof(kpc)); /* don't leak padding */ mtx_enter(&pp->pr_cache_mtx); - kpc.pr_ngc = 0; /* notyet */ + kpc.pr_ngc = pp->pr_cache_ngc; kpc.pr_len = pp->pr_cache_items; kpc.pr_nlist = pp->pr_cache_nlist; kpc.pr_contention = pp->pr_cache_contention; diff --git a/sys/sys/pool.h b/sys/sys/pool.h index 1a5380ed652..82aa9ba42bc 100644 --- a/sys/sys/pool.h +++ b/sys/sys/pool.h @@ -1,4 +1,4 @@ -/* $OpenBSD: pool.h,v 1.70 2017/06/15 02:52:30 dlg Exp $ */ +/* $OpenBSD: pool.h,v 1.71 2017/06/16 01:55:45 dlg Exp $ */ /* $NetBSD: pool.h,v 1.27 2001/06/06 22:00:17 rafal Exp $ */ /*- @@ -185,11 +185,13 @@ struct pool { unsigned long pr_cache_magic[2]; struct mutex pr_cache_mtx; struct pool_cache_lists - pr_cache_lists; - u_int pr_cache_nlist; /* # of lists */ + pr_cache_lists; /* list of idle item lists */ + u_int pr_cache_nlist; /* # of idle lists */ u_int pr_cache_items; /* target list length */ u_int pr_cache_contention; + int pr_cache_tick; /* time idle list was empty */ int pr_cache_nout; + uint64_t pr_cache_ngc; /* # of times the gc released a list */ u_int pr_align; u_int pr_maxcolors; /* Cache coloring */