diff options
Diffstat (limited to 'mm/vmscan.c')
| -rw-r--r-- | mm/vmscan.c | 18 | 
1 files changed, 14 insertions, 4 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 7e7d25504651..c5ef7240cbcb 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -476,6 +476,17 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,  	delta = freeable >> priority;  	delta *= 4;  	do_div(delta, shrinker->seeks); + +	/* +	 * Make sure we apply some minimal pressure on default priority +	 * even on small cgroups. Stale objects are not only consuming memory +	 * by themselves, but can also hold a reference to a dying cgroup, +	 * preventing it from being reclaimed. A dying cgroup with all +	 * corresponding structures like per-cpu stats and kmem caches +	 * can be really big, so it may lead to a significant waste of memory. +	 */ +	delta = max_t(unsigned long long, delta, min(freeable, batch_size)); +  	total_scan += delta;  	if (total_scan < 0) {  		pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n", @@ -569,8 +580,8 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,  			struct mem_cgroup *memcg, int priority)  {  	struct memcg_shrinker_map *map; -	unsigned long freed = 0; -	int ret, i; +	unsigned long ret, freed = 0; +	int i;  	if (!memcg_kmem_enabled() || !mem_cgroup_online(memcg))  		return 0; @@ -666,9 +677,8 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,  				 struct mem_cgroup *memcg,  				 int priority)  { +	unsigned long ret, freed = 0;  	struct shrinker *shrinker; -	unsigned long freed = 0; -	int ret;  	if (!mem_cgroup_is_root(memcg))  		return shrink_slab_memcg(gfp_mask, nid, memcg, priority);  | 
