summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2004-11-09 06:01:33 -0800
committerDavid S. Miller <davem@nuts.davemloft.net>2004-11-09 06:01:33 -0800
commitb4cd3f93e2dec830286af1795a6e73c64ffffb5d (patch)
treea030d024047eb9b28d239880b4a1728dc202d437 /net
parent3b4b3bfb42039e06aac4972eae9c3fbc46320cc4 (diff)
[PKT_SCHED]: Unline inner qdiscs immediately in qdisc_destroy()
Before the RCU change distruction of the qdisc and all inner qdiscs happend immediately and under the rtnl semaphore. This made sure nothing holding the rtnl semaphore could end up with invalid memory. This is not true anymore, inner qdiscs found on dev->qdisc_list can be suddenly destroyed by the RCU callback. nothing can find them until they get destroyed. This also makes semantics sane again, an inner qdiscs should not be user-visible once the containing qdisc has been destroyed. The second part (locking in qdisc_lookup) is not really required, but currently the only purpose of qdisc_tree_lock seems to be to protect dev->qdisc_list, which is also protected by the rtnl. The rtnl is especially relied on for making sure nobody frees a qdisc while it is used in user-context, so qdisc_tree_lock looks unnecessary. I'm currently reviewing all qdisc locking, if this turns out to be right I will remove qdisc_tree_lock entirely in a follow-up patch, but for now I left it in for consistency. Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/sched/sch_api.c6
-rw-r--r--net/sched/sch_generic.c24
2 files changed, 28 insertions, 2 deletions
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 932bcddc55c3..fff96b79613e 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -196,10 +196,14 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
{
struct Qdisc *q;
+ read_lock_bh(&qdisc_tree_lock);
list_for_each_entry(q, &dev->qdisc_list, list) {
- if (q->handle == handle)
+ if (q->handle == handle) {
+ read_unlock_bh(&qdisc_tree_lock);
return q;
+ }
}
+ read_unlock_bh(&qdisc_tree_lock);
return NULL;
}
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index fda8f7429c68..1b9180944904 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -483,10 +483,32 @@ static void __qdisc_destroy(struct rcu_head *head)
void qdisc_destroy(struct Qdisc *qdisc)
{
+ struct list_head cql = LIST_HEAD_INIT(cql);
+ struct Qdisc *cq, *q, *n;
+
if (qdisc->flags & TCQ_F_BUILTIN ||
!atomic_dec_and_test(&qdisc->refcnt))
return;
- list_del(&qdisc->list);
+
+ if (!list_empty(&qdisc->list)) {
+ if (qdisc->ops->cl_ops == NULL)
+ list_del(&qdisc->list);
+ else
+ list_move(&qdisc->list, &cql);
+ }
+
+ /* unlink inner qdiscs from dev->qdisc_list immediately */
+ list_for_each_entry(cq, &cql, list)
+ list_for_each_entry_safe(q, n, &qdisc->dev->qdisc_list, list)
+ if (TC_H_MAJ(q->parent) == TC_H_MAJ(cq->handle)) {
+ if (q->ops->cl_ops == NULL)
+ list_del_init(&q->list);
+ else
+ list_move_tail(&q->list, &cql);
+ }
+ list_for_each_entry_safe(cq, n, &cql, list)
+ list_del_init(&cq->list);
+
call_rcu(&qdisc->q_rcu, __qdisc_destroy);
}