diff options
Diffstat (limited to 'include')
| -rw-r--r-- | include/linux/ipv6.h | 2 | ||||
| -rw-r--r-- | include/linux/mlx5/mlx5_ifc.h | 5 | ||||
| -rw-r--r-- | include/linux/skbuff.h | 9 | ||||
| -rw-r--r-- | include/net/amt.h | 2 | ||||
| -rw-r--r-- | include/net/ax25.h | 1 | ||||
| -rw-r--r-- | include/net/bonding.h | 6 | ||||
| -rw-r--r-- | include/net/netfilter/nf_conntrack_core.h | 7 | ||||
| -rw-r--r-- | include/net/sch_generic.h | 42 | ||||
| -rw-r--r-- | include/uapi/linux/ipv6.h | 2 | ||||
| -rw-r--r-- | include/uapi/linux/socket.h | 2 |
10 files changed, 41 insertions, 37 deletions
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 38c8203d52cb..37dfdcfcdd54 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -61,7 +61,7 @@ struct ipv6_devconf { __s32 suppress_frag_ndisc; __s32 accept_ra_mtu; __s32 drop_unsolicited_na; - __s32 accept_unsolicited_na; + __s32 accept_untracked_na; struct ipv6_stable_secret { bool initialized; struct in6_addr secret; diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 78b3d3465dd7..2cd7d611e7b3 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -5176,12 +5176,11 @@ struct mlx5_ifc_query_qp_out_bits { u8 syndrome[0x20]; - u8 reserved_at_40[0x20]; - u8 ece[0x20]; + u8 reserved_at_40[0x40]; u8 opt_param_mask[0x20]; - u8 reserved_at_a0[0x20]; + u8 ece[0x20]; struct mlx5_ifc_qpc_bits qpc; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index da96f0d3e753..d3d10556f0fa 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2696,7 +2696,14 @@ void *skb_pull(struct sk_buff *skb, unsigned int len); static inline void *__skb_pull(struct sk_buff *skb, unsigned int len) { skb->len -= len; - BUG_ON(skb->len < skb->data_len); + if (unlikely(skb->len < skb->data_len)) { +#if defined(CONFIG_DEBUG_NET) + skb->len += len; + pr_err("__skb_pull(len=%u)\n", len); + skb_dump(KERN_ERR, skb, false); +#endif + BUG(); + } return skb->data += len; } diff --git a/include/net/amt.h b/include/net/amt.h index 7a4db8b903ee..0e40c3d64fcf 100644 --- a/include/net/amt.h +++ b/include/net/amt.h @@ -15,7 +15,7 @@ enum amt_msg_type { AMT_MSG_MEMBERSHIP_QUERY, AMT_MSG_MEMBERSHIP_UPDATE, AMT_MSG_MULTICAST_DATA, - AMT_MSG_TEARDOWM, + AMT_MSG_TEARDOWN, __AMT_MSG_MAX, }; diff --git a/include/net/ax25.h b/include/net/ax25.h index 0f9790c455bb..a427a05672e2 100644 --- a/include/net/ax25.h +++ b/include/net/ax25.h @@ -228,6 +228,7 @@ typedef struct ax25_dev { ax25_dama_info dama; #endif refcount_t refcount; + bool device_up; } ax25_dev; typedef struct ax25_cb { diff --git a/include/net/bonding.h b/include/net/bonding.h index b14f4c0b4e9e..cb904d356e31 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -149,7 +149,9 @@ struct bond_params { struct reciprocal_value reciprocal_packets_per_slave; u16 ad_actor_sys_prio; u16 ad_user_port_key; +#if IS_ENABLED(CONFIG_IPV6) struct in6_addr ns_targets[BOND_MAX_NS_TARGETS]; +#endif /* 2 bytes of padding : see ether_addr_equal_64bits() */ u8 ad_actor_system[ETH_ALEN + 2]; @@ -503,12 +505,14 @@ static inline int bond_is_ip_target_ok(__be32 addr) return !ipv4_is_lbcast(addr) && !ipv4_is_zeronet(addr); } +#if IS_ENABLED(CONFIG_IPV6) static inline int bond_is_ip6_target_ok(struct in6_addr *addr) { return !ipv6_addr_any(addr) && !ipv6_addr_loopback(addr) && !ipv6_addr_is_multicast(addr); } +#endif /* Get the oldest arp which we've received on this slave for bond's * arp_targets. @@ -746,6 +750,7 @@ static inline int bond_get_targets_ip(__be32 *targets, __be32 ip) return -1; } +#if IS_ENABLED(CONFIG_IPV6) static inline int bond_get_targets_ip6(struct in6_addr *targets, struct in6_addr *ip) { int i; @@ -758,6 +763,7 @@ static inline int bond_get_targets_ip6(struct in6_addr *targets, struct in6_addr return -1; } +#endif /* exported from bond_main.c */ extern unsigned int bond_net_id; diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h index 6406cfee34c2..37866c8386e2 100644 --- a/include/net/netfilter/nf_conntrack_core.h +++ b/include/net/netfilter/nf_conntrack_core.h @@ -58,8 +58,13 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb) int ret = NF_ACCEPT; if (ct) { - if (!nf_ct_is_confirmed(ct)) + if (!nf_ct_is_confirmed(ct)) { ret = __nf_conntrack_confirm(skb); + + if (ret == NF_ACCEPT) + ct = (struct nf_conn *)skb_nfct(skb); + } + if (ret == NF_ACCEPT && nf_ct_ecache_exist(ct)) nf_ct_deliver_cached_events(ct); } diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 9bab396c1f3b..d6cf5116b5f9 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -187,37 +187,17 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc) if (spin_trylock(&qdisc->seqlock)) return true; - /* Paired with smp_mb__after_atomic() to make sure - * STATE_MISSED checking is synchronized with clearing - * in pfifo_fast_dequeue(). + /* No need to insist if the MISSED flag was already set. + * Note that test_and_set_bit() also gives us memory ordering + * guarantees wrt potential earlier enqueue() and below + * spin_trylock(), both of which are necessary to prevent races */ - smp_mb__before_atomic(); - - /* If the MISSED flag is set, it means other thread has - * set the MISSED flag before second spin_trylock(), so - * we can return false here to avoid multi cpus doing - * the set_bit() and second spin_trylock() concurrently. - */ - if (test_bit(__QDISC_STATE_MISSED, &qdisc->state)) + if (test_and_set_bit(__QDISC_STATE_MISSED, &qdisc->state)) return false; - /* Set the MISSED flag before the second spin_trylock(), - * if the second spin_trylock() return false, it means - * other cpu holding the lock will do dequeuing for us - * or it will see the MISSED flag set after releasing - * lock and reschedule the net_tx_action() to do the - * dequeuing. - */ - set_bit(__QDISC_STATE_MISSED, &qdisc->state); - - /* spin_trylock() only has load-acquire semantic, so use - * smp_mb__after_atomic() to ensure STATE_MISSED is set - * before doing the second spin_trylock(). - */ - smp_mb__after_atomic(); - - /* Retry again in case other CPU may not see the new flag - * after it releases the lock at the end of qdisc_run_end(). + /* Try to take the lock again to make sure that we will either + * grab it or the CPU that still has it will see MISSED set + * when testing it in qdisc_run_end() */ return spin_trylock(&qdisc->seqlock); } @@ -229,6 +209,12 @@ static inline void qdisc_run_end(struct Qdisc *qdisc) if (qdisc->flags & TCQ_F_NOLOCK) { spin_unlock(&qdisc->seqlock); + /* spin_unlock() only has store-release semantic. The unlock + * and test_bit() ordering is a store-load ordering, so a full + * memory barrier is needed here. + */ + smp_mb(); + if (unlikely(test_bit(__QDISC_STATE_MISSED, &qdisc->state))) __netif_schedule(qdisc); diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h index 549ddeaf788b..03cdbe798fe3 100644 --- a/include/uapi/linux/ipv6.h +++ b/include/uapi/linux/ipv6.h @@ -194,7 +194,7 @@ enum { DEVCONF_IOAM6_ID, DEVCONF_IOAM6_ID_WIDE, DEVCONF_NDISC_EVICT_NOCARRIER, - DEVCONF_ACCEPT_UNSOLICITED_NA, + DEVCONF_ACCEPT_UNTRACKED_NA, DEVCONF_MAX }; diff --git a/include/uapi/linux/socket.h b/include/uapi/linux/socket.h index 51d6bb2f6765..d3fcd3b5ec53 100644 --- a/include/uapi/linux/socket.h +++ b/include/uapi/linux/socket.h @@ -31,7 +31,7 @@ struct __kernel_sockaddr_storage { #define SOCK_BUF_LOCK_MASK (SOCK_SNDBUF_LOCK | SOCK_RCVBUF_LOCK) -#define SOCK_TXREHASH_DEFAULT ((u8)-1) +#define SOCK_TXREHASH_DEFAULT 255 #define SOCK_TXREHASH_DISABLED 0 #define SOCK_TXREHASH_ENABLED 1 |
