summaryrefslogtreecommitdiff
path: root/net/ipv4/tcp_ipv4.c
diff options
context:
space:
mode:
authorJordan Rife <jordan@jrife.io>2025-07-14 11:09:07 -0700
committerMartin KaFai Lau <martin.lau@kernel.org>2025-07-14 12:09:09 -0700
commite25ab9b874a4bd8c6e3e5ce66cbe8a1dd4096e2e (patch)
treeae51b1b5ceaa558fe16e381633ae2fe39c88d69f /net/ipv4/tcp_ipv4.c
parentcdec67a489d4fdae3e83e04fca0419136a83c4c2 (diff)
bpf: tcp: Get rid of st_bucket_done
Get rid of the st_bucket_done field to simplify TCP iterator state and logic. Before, st_bucket_done could be false if bpf_iter_tcp_batch returned a partial batch; however, with the last patch ("bpf: tcp: Make sure iter->batch always contains a full bucket snapshot"), st_bucket_done == true is equivalent to iter->cur_sk == iter->end_sk. Signed-off-by: Jordan Rife <jordan@jrife.io> Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org> Reviewed-by: Kuniyuki Iwashima <kuniyu@amazon.com> Acked-by: Stanislav Fomichev <sdf@fomichev.me>
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r--net/ipv4/tcp_ipv4.c14
1 files changed, 6 insertions, 8 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 291b24508c2f..1c88b537109f 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -3020,7 +3020,6 @@ struct bpf_tcp_iter_state {
unsigned int end_sk;
unsigned int max_sk;
struct sock **batch;
- bool st_bucket_done;
};
struct bpf_iter__tcp {
@@ -3043,8 +3042,10 @@ static int tcp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
static void bpf_iter_tcp_put_batch(struct bpf_tcp_iter_state *iter)
{
- while (iter->cur_sk < iter->end_sk)
- sock_gen_put(iter->batch[iter->cur_sk++]);
+ unsigned int cur_sk = iter->cur_sk;
+
+ while (cur_sk < iter->end_sk)
+ sock_gen_put(iter->batch[cur_sk++]);
}
static int bpf_iter_tcp_realloc_batch(struct bpf_tcp_iter_state *iter,
@@ -3161,7 +3162,7 @@ static struct sock *bpf_iter_tcp_batch(struct seq_file *seq)
* one by one in the current bucket and eventually find out
* it has to advance to the next bucket.
*/
- if (iter->st_bucket_done) {
+ if (iter->end_sk && iter->cur_sk == iter->end_sk) {
st->offset = 0;
st->bucket++;
if (st->state == TCP_SEQ_STATE_LISTENING &&
@@ -3173,7 +3174,6 @@ static struct sock *bpf_iter_tcp_batch(struct seq_file *seq)
iter->cur_sk = 0;
iter->end_sk = 0;
- iter->st_bucket_done = true;
sk = tcp_seek_last_pos(seq);
if (!sk)
@@ -3321,10 +3321,8 @@ static void bpf_iter_tcp_seq_stop(struct seq_file *seq, void *v)
(void)tcp_prog_seq_show(prog, &meta, v, 0);
}
- if (iter->cur_sk < iter->end_sk) {
+ if (iter->cur_sk < iter->end_sk)
bpf_iter_tcp_put_batch(iter);
- iter->st_bucket_done = false;
- }
}
static const struct seq_operations bpf_iter_tcp_seq_ops = {