summaryrefslogtreecommitdiff
path: root/include/linux/netdevice.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/netdevice.h')
-rw-r--r--include/linux/netdevice.h30
1 files changed, 28 insertions, 2 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d1a687444b27..5870a9e514a5 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -377,6 +377,8 @@ struct napi_config {
* Structure for NAPI scheduling similar to tasklet but with weighting
*/
struct napi_struct {
+ /* This field should be first or softnet_data.backlog needs tweaks. */
+ unsigned long state;
/* The poll_list must only be managed by the entity which
* changes the state of the NAPI_STATE_SCHED bit. This means
* whoever atomically sets that bit can add this napi_struct
@@ -385,7 +387,6 @@ struct napi_struct {
*/
struct list_head poll_list;
- unsigned long state;
int weight;
u32 defer_hard_irqs_count;
int (*poll)(struct napi_struct *, int);
@@ -422,11 +423,12 @@ enum {
NAPI_STATE_NPSVC, /* Netpoll - don't dequeue from poll_list */
NAPI_STATE_LISTED, /* NAPI added to system lists */
NAPI_STATE_NO_BUSY_POLL, /* Do not add in napi_hash, no busy polling */
- NAPI_STATE_IN_BUSY_POLL, /* sk_busy_loop() owns this NAPI */
+ NAPI_STATE_IN_BUSY_POLL, /* Do not rearm NAPI interrupt */
NAPI_STATE_PREFER_BUSY_POLL, /* prefer busy-polling over softirq processing*/
NAPI_STATE_THREADED, /* The poll is performed inside its own thread*/
NAPI_STATE_SCHED_THREADED, /* Napi is currently scheduled in threaded mode */
NAPI_STATE_HAS_NOTIFIER, /* Napi has an IRQ notifier */
+ NAPI_STATE_THREADED_BUSY_POLL, /* The threaded NAPI poller will busy poll */
};
enum {
@@ -441,6 +443,7 @@ enum {
NAPIF_STATE_THREADED = BIT(NAPI_STATE_THREADED),
NAPIF_STATE_SCHED_THREADED = BIT(NAPI_STATE_SCHED_THREADED),
NAPIF_STATE_HAS_NOTIFIER = BIT(NAPI_STATE_HAS_NOTIFIER),
+ NAPIF_STATE_THREADED_BUSY_POLL = BIT(NAPI_STATE_THREADED_BUSY_POLL),
};
enum gro_result {
@@ -874,6 +877,7 @@ enum net_device_path_type {
DEV_PATH_PPPOE,
DEV_PATH_DSA,
DEV_PATH_MTK_WDMA,
+ DEV_PATH_TUN,
};
struct net_device_path {
@@ -886,6 +890,18 @@ struct net_device_path {
u8 h_dest[ETH_ALEN];
} encap;
struct {
+ union {
+ struct in_addr src_v4;
+ struct in6_addr src_v6;
+ };
+ union {
+ struct in_addr dst_v4;
+ struct in6_addr dst_v6;
+ };
+
+ u8 l3_proto;
+ } tun;
+ struct {
enum {
DEV_PATH_BR_VLAN_KEEP,
DEV_PATH_BR_VLAN_TAG,
@@ -3401,6 +3417,7 @@ struct net_device *dev_get_by_index(struct net *net, int ifindex);
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
struct net_device *netdev_get_by_index(struct net *net, int ifindex,
netdevice_tracker *tracker, gfp_t gfp);
+struct net_device *netdev_get_by_index_lock(struct net *net, int ifindex);
struct net_device *netdev_get_by_name(struct net *net, const char *name,
netdevice_tracker *tracker, gfp_t gfp);
struct net_device *netdev_get_by_flags_rcu(struct net *net, netdevice_tracker *tracker,
@@ -3529,9 +3546,17 @@ struct softnet_data {
call_single_data_t csd ____cacheline_aligned_in_smp;
struct softnet_data *rps_ipi_next;
unsigned int cpu;
+
+ /* We force a cacheline alignment from here, to hold together
+ * input_queue_tail, input_pkt_queue and backlog.state.
+ * We add holes so that backlog.state is the last field
+ * of this cache line.
+ */
+ long pad[3] ____cacheline_aligned_in_smp;
unsigned int input_queue_tail;
#endif
struct sk_buff_head input_pkt_queue;
+
struct napi_struct backlog;
struct numa_drop_counters drop_counters;
@@ -5304,6 +5329,7 @@ static inline netdev_features_t netdev_add_tso_features(netdev_features_t featur
int __netdev_update_features(struct net_device *dev);
void netdev_update_features(struct net_device *dev);
void netdev_change_features(struct net_device *dev);
+void netdev_compute_master_upper_features(struct net_device *dev, bool update_header);
void netif_stacked_transfer_operstate(const struct net_device *rootdev,
struct net_device *dev);