summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-09-03 11:01:05 -0700
committerDavid S. Miller <davem@davemloft.net>2017-09-03 11:01:05 -0700
commit4113f36bc7084ea0d47c0f5f1137776b603c91a7 (patch)
treedf2e5f6fbb0792262701818650a9d6f35f6b5151 /include
parent64327fc811268d4a24de03dac242ea29de6be75f (diff)
parent5a63643e583b6a9789d7a225ae076fb4e603991c (diff)
Merge branch 'net-revert-lib-percpu_counter-API-for-fragmentation-mem-accounting'
Jesper Dangaard Brouer says: ==================== net: revert lib/percpu_counter API for fragmentation mem accounting There is a bug in fragmentation codes use of the percpu_counter API, that can cause issues on systems with many CPUs, above 24 CPUs. After much consideration and different attempts at solving the API usage. The conclusion is to revert to the simple atomic_t API instead. The ratio between batch size and threshold size make it a bad use-case for the lib/percpu_counter API. As using the correct API calls will unfortunately cause systems with many CPUs to always execute an expensive sum across all CPUs. Plus the added complexity is not worth it. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/net/inet_frag.h35
1 files changed, 9 insertions, 26 deletions
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 6fdcd2427776..fc59e0775e00 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -1,14 +1,9 @@
#ifndef __NET_FRAG_H__
#define __NET_FRAG_H__
-#include <linux/percpu_counter.h>
-
struct netns_frags {
- /* The percpu_counter "mem" need to be cacheline aligned.
- * mem.count must not share cacheline with other writers
- */
- struct percpu_counter mem ____cacheline_aligned_in_smp;
-
+ /* Keep atomic mem on separate cachelines in structs that include it */
+ atomic_t mem ____cacheline_aligned_in_smp;
/* sysctls */
int timeout;
int high_thresh;
@@ -108,15 +103,10 @@ struct inet_frags {
int inet_frags_init(struct inet_frags *);
void inet_frags_fini(struct inet_frags *);
-static inline int inet_frags_init_net(struct netns_frags *nf)
-{
- return percpu_counter_init(&nf->mem, 0, GFP_KERNEL);
-}
-static inline void inet_frags_uninit_net(struct netns_frags *nf)
+static inline void inet_frags_init_net(struct netns_frags *nf)
{
- percpu_counter_destroy(&nf->mem);
+ atomic_set(&nf->mem, 0);
}
-
void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
@@ -140,31 +130,24 @@ static inline bool inet_frag_evicting(struct inet_frag_queue *q)
/* Memory Tracking Functions. */
-/* The default percpu_counter batch size is not big enough to scale to
- * fragmentation mem acct sizes.
- * The mem size of a 64K fragment is approx:
- * (44 fragments * 2944 truesize) + frag_queue struct(200) = 129736 bytes
- */
-static unsigned int frag_percpu_counter_batch = 130000;
-
static inline int frag_mem_limit(struct netns_frags *nf)
{
- return percpu_counter_read(&nf->mem);
+ return atomic_read(&nf->mem);
}
static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
{
- percpu_counter_add_batch(&nf->mem, -i, frag_percpu_counter_batch);
+ atomic_sub(i, &nf->mem);
}
static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
{
- percpu_counter_add_batch(&nf->mem, i, frag_percpu_counter_batch);
+ atomic_add(i, &nf->mem);
}
-static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf)
+static inline int sum_frag_mem_limit(struct netns_frags *nf)
{
- return percpu_counter_sum_positive(&nf->mem);
+ return atomic_read(&nf->mem);
}
/* RFC 3168 support :