summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSven Wegener <sven.wegener@stealer.net>2008-06-29 15:08:15 +0000
committerSven Wegener <sven.wegener@stealer.net>2010-05-15 12:55:37 +0200
commiteed0ad8e9a55f7ff59d53cc6655f44864f37759d (patch)
treeccb0eff75dd72afc6fe70b468feac1931aadd7bc
parentbfcf1ae2b2b1c09ee2c420313afe47ac5cc32d3f (diff)
ipvs: checkpatch cleanupipvs/checkpatch-cleanup
Fix various style issues found by checkpatch.pl. Signed-off-by: Sven Wegener <sven.wegener@stealer.net>
-rw-r--r--net/netfilter/ipvs/ip_vs_app.c34
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c41
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c27
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c116
-rw-r--r--net/netfilter/ipvs/ip_vs_dh.c7
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_lblc.c21
-rw-r--r--net/netfilter/ipvs/ip_vs_lblcr.c24
-rw-r--r--net/netfilter/ipvs/ip_vs_nq.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_proto.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_tcp.c20
-rw-r--r--net/netfilter/ipvs/ip_vs_proto_udp.c8
-rw-r--r--net/netfilter/ipvs/ip_vs_rr.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_sched.c7
-rw-r--r--net/netfilter/ipvs/ip_vs_sed.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_sh.c7
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c59
-rw-r--r--net/netfilter/ipvs/ip_vs_wlc.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_wrr.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c52
20 files changed, 231 insertions, 220 deletions
diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
index 1cb0e834f8ff..6aa0be193fd0 100644
--- a/net/netfilter/ipvs/ip_vs_app.c
+++ b/net/netfilter/ipvs/ip_vs_app.c
@@ -39,10 +39,6 @@
#include <net/ip_vs.h>
-EXPORT_SYMBOL(register_ip_vs_app);
-EXPORT_SYMBOL(unregister_ip_vs_app);
-EXPORT_SYMBOL(register_ip_vs_app_inc);
-
/* ipvs application list head */
static LIST_HEAD(ip_vs_app_list);
static DEFINE_MUTEX(__ip_vs_app_mutex);
@@ -73,7 +69,8 @@ ip_vs_app_inc_new(struct ip_vs_app *app, __u16 proto, __u16 port)
struct ip_vs_app *inc;
int ret;
- if (!(pp = ip_vs_proto_get(proto)))
+ pp = ip_vs_proto_get(proto);
+ if (!pp)
return -EPROTONOSUPPORT;
if (!pp->unregister_app)
@@ -108,7 +105,7 @@ ip_vs_app_inc_new(struct ip_vs_app *app, __u16 proto, __u16 port)
return 0;
- out:
+out:
kfree(inc->timeout_table);
kfree(inc);
return ret;
@@ -123,7 +120,8 @@ ip_vs_app_inc_release(struct ip_vs_app *inc)
{
struct ip_vs_protocol *pp;
- if (!(pp = ip_vs_proto_get(inc->protocol)))
+ pp = ip_vs_proto_get(inc->protocol);
+ if (!pp)
return;
if (pp->unregister_app)
@@ -148,7 +146,8 @@ int ip_vs_app_inc_get(struct ip_vs_app *inc)
int result;
atomic_inc(&inc->usecnt);
- if (unlikely((result = ip_vs_app_get(inc->app)) != 1))
+ result = ip_vs_app_get(inc->app);
+ if (unlikely(result != 1))
atomic_dec(&inc->usecnt);
return result;
}
@@ -180,6 +179,7 @@ register_ip_vs_app_inc(struct ip_vs_app *app, __u16 proto, __u16 port)
return result;
}
+EXPORT_SYMBOL(register_ip_vs_app_inc);
/*
@@ -198,6 +198,7 @@ int register_ip_vs_app(struct ip_vs_app *app)
return 0;
}
+EXPORT_SYMBOL(register_ip_vs_app);
/*
@@ -221,6 +222,7 @@ void unregister_ip_vs_app(struct ip_vs_app *app)
/* decrease the module use count */
ip_vs_use_count_dec();
}
+EXPORT_SYMBOL(unregister_ip_vs_app);
/*
@@ -264,7 +266,7 @@ static inline void vs_fix_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
* for all packets before most recent resized pkt seq.
*/
if (vseq->delta || vseq->previous_delta) {
- if(after(seq, vseq->init_seq)) {
+ if (after(seq, vseq->init_seq)) {
th->seq = htonl(seq + vseq->delta);
IP_VS_DBG(9, "%s(): added delta (%d) to seq\n",
__func__, vseq->delta);
@@ -293,7 +295,7 @@ vs_fix_ack_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
if (vseq->delta || vseq->previous_delta) {
/* since ack_seq is the number of octet that is expected
to receive next, so compare it with init_seq+delta */
- if(after(ack_seq, vseq->init_seq+vseq->delta)) {
+ if (after(ack_seq, vseq->init_seq + vseq->delta)) {
th->ack_seq = htonl(ack_seq - vseq->delta);
IP_VS_DBG(9, "%s(): subtracted delta "
"(%d) from ack_seq\n", __func__, vseq->delta);
@@ -378,13 +380,13 @@ static inline int app_tcp_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb,
*/
int ip_vs_app_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb)
{
- struct ip_vs_app *app;
+ struct ip_vs_app *app = cp->app;
/*
* check if application module is bound to
* this ip_vs_conn.
*/
- if ((app = cp->app) == NULL)
+ if (app == NULL)
return 1;
/* TCP is complicated */
@@ -453,13 +455,13 @@ static inline int app_tcp_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb,
*/
int ip_vs_app_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb)
{
- struct ip_vs_app *app;
+ struct ip_vs_app *app = cp->app;
/*
* check if application module is bound to
* this ip_vs_conn.
*/
- if ((app = cp->app) == NULL)
+ if (app == NULL)
return 1;
/* TCP is complicated */
@@ -514,7 +516,8 @@ static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos)
inc = v;
app = inc->app;
- if ((e = inc->a_list.next) != &app->incs_list)
+ e = inc->a_list.next;
+ if (e != &app->incs_list)
return list_entry(e, struct ip_vs_app, a_list);
/* go on to next application */
@@ -610,6 +613,7 @@ int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri,
LeaveFunction(9);
return 0;
}
+EXPORT_SYMBOL(ip_vs_skb_replace);
int __init ip_vs_app_init(void)
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index d8f7e8ef67b4..38a5d79e5659 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -80,8 +80,7 @@ static unsigned int ip_vs_conn_rnd;
#define CT_LOCKARRAY_SIZE (1<<CT_LOCKARRAY_BITS)
#define CT_LOCKARRAY_MASK (CT_LOCKARRAY_SIZE-1)
-struct ip_vs_aligned_lock
-{
+struct ip_vs_aligned_lock {
rwlock_t l;
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
@@ -263,6 +262,7 @@ struct ip_vs_conn *ip_vs_conn_in_get
return cp;
}
+EXPORT_SYMBOL(ip_vs_conn_in_get);
/* Get reference to connection template */
struct ip_vs_conn *ip_vs_ct_in_get
@@ -293,7 +293,7 @@ struct ip_vs_conn *ip_vs_ct_in_get
}
cp = NULL;
- out:
+out:
ct_read_unlock(hash);
IP_VS_DBG_BUF(9, "template lookup/in %s %s:%d->%s:%d %s\n",
@@ -316,7 +316,7 @@ struct ip_vs_conn *ip_vs_conn_out_get
const union nf_inet_addr *d_addr, __be16 d_port)
{
unsigned hash;
- struct ip_vs_conn *cp, *ret=NULL;
+ struct ip_vs_conn *cp, *ret = NULL;
/*
* Check for "full" addressed entries
@@ -348,6 +348,7 @@ struct ip_vs_conn *ip_vs_conn_out_get
return ret;
}
+EXPORT_SYMBOL(ip_vs_conn_out_get);
/*
@@ -360,6 +361,7 @@ void ip_vs_conn_put(struct ip_vs_conn *cp)
__ip_vs_conn_put(cp);
}
+EXPORT_SYMBOL(ip_vs_conn_put);
/*
@@ -549,11 +551,10 @@ static inline void ip_vs_unbind_dest(struct ip_vs_conn *cp)
if (!(cp->flags & IP_VS_CONN_F_TEMPLATE)) {
/* It is a normal connection, so decrease the inactconns
or activeconns counter */
- if (cp->flags & IP_VS_CONN_F_INACTIVE) {
+ if (cp->flags & IP_VS_CONN_F_INACTIVE)
atomic_dec(&dest->inactconns);
- } else {
+ else
atomic_dec(&dest->activeconns);
- }
} else {
/* It is a persistent connection/template, so decrease
the peristent connection counter */
@@ -678,7 +679,7 @@ static void ip_vs_conn_expire(unsigned long data)
/* hash it back to the table */
ip_vs_conn_hash(cp);
- expire_later:
+expire_later:
IP_VS_DBG(7, "delayed: conn->refcnt-1=%d conn->n_control=%d\n",
atomic_read(&cp->refcnt)-1,
atomic_read(&cp->n_control));
@@ -764,6 +765,7 @@ ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport,
return cp;
}
+EXPORT_SYMBOL(ip_vs_conn_new);
/*
@@ -793,7 +795,7 @@ static void *ip_vs_conn_array(struct seq_file *seq, loff_t pos)
static void *ip_vs_conn_seq_start(struct seq_file *seq, loff_t *pos)
{
seq->private = NULL;
- return *pos ? ip_vs_conn_array(seq, *pos - 1) :SEQ_START_TOKEN;
+ return *pos ? ip_vs_conn_array(seq, *pos - 1) : SEQ_START_TOKEN;
}
static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
@@ -807,7 +809,8 @@ static void *ip_vs_conn_seq_next(struct seq_file *seq, void *v, loff_t *pos)
return ip_vs_conn_array(seq, 0);
/* more on same hash chain? */
- if ((e = cp->c_list.next) != l)
+ e = cp->c_list.next;
+ if (e != l)
return list_entry(e, struct ip_vs_conn, c_list);
idx = l - ip_vs_conn_tab;
@@ -974,10 +977,13 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
/* Don't drop the entry if its number of incoming packets is not
located in [0, 8] */
i = atomic_read(&cp->in_pkts);
- if (i > 8 || i < 0) return 0;
+ if (i > 8 || i < 0)
+ return 0;
- if (!todrop_rate[i]) return 0;
- if (--todrop_counter[i] > 0) return 0;
+ if (!todrop_rate[i])
+ return 0;
+ if (--todrop_counter[i] > 0)
+ return 0;
todrop_counter[i] = todrop_rate[i];
return 1;
@@ -1006,7 +1012,7 @@ void ip_vs_random_dropentry(void)
continue;
if (cp->protocol == IPPROTO_TCP) {
- switch(cp->state) {
+ switch (cp->state) {
case IP_VS_TCP_S_SYN_RECV:
case IP_VS_TCP_S_SYNACK:
break;
@@ -1106,14 +1112,13 @@ int __init ip_vs_conn_init(void)
for (idx = 0; idx < ip_vs_conn_tab_size; idx++) {
INIT_LIST_HEAD(&ip_vs_conn_tab[idx]);
- }
- for (idx = 0; idx < CT_LOCKARRAY_SIZE; idx++) {
+ for (idx = 0; idx < CT_LOCKARRAY_SIZE; idx++)
rwlock_init(&__ip_vs_conntbl_lock_array[idx].l);
- }
proc_net_fops_create(&init_net, "ip_vs_conn", 0, &ip_vs_conn_fops);
- proc_net_fops_create(&init_net, "ip_vs_conn_sync", 0, &ip_vs_conn_sync_fops);
+ proc_net_fops_create(&init_net, "ip_vs_conn_sync", 0,
+ &ip_vs_conn_sync_fops);
/* calculate the random value for connection hash */
get_random_bytes(&ip_vs_conn_rnd, sizeof(ip_vs_conn_rnd));
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 1cd6e3fd058b..446ac09173a1 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -52,22 +52,6 @@
#include <net/ip_vs.h>
-EXPORT_SYMBOL(register_ip_vs_scheduler);
-EXPORT_SYMBOL(unregister_ip_vs_scheduler);
-EXPORT_SYMBOL(ip_vs_skb_replace);
-EXPORT_SYMBOL(ip_vs_proto_name);
-EXPORT_SYMBOL(ip_vs_conn_new);
-EXPORT_SYMBOL(ip_vs_conn_in_get);
-EXPORT_SYMBOL(ip_vs_conn_out_get);
-#ifdef CONFIG_IP_VS_PROTO_TCP
-EXPORT_SYMBOL(ip_vs_tcp_conn_listen);
-#endif
-EXPORT_SYMBOL(ip_vs_conn_put);
-#ifdef CONFIG_IP_VS_DEBUG
-EXPORT_SYMBOL(ip_vs_get_debug_level);
-#endif
-
-
/* ID used in ICMP lookups */
#define icmp_id(icmph) (((icmph)->un).echo.id)
#define icmpv6_id(icmph) (icmph->icmp6_dataun.u_echo.identifier)
@@ -96,6 +80,7 @@ const char *ip_vs_proto_name(unsigned proto)
return buf;
}
}
+EXPORT_SYMBOL(ip_vs_proto_name);
void ip_vs_init_hash_table(struct list_head *table, int rows)
{
@@ -1169,7 +1154,7 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
verdict = ip_vs_icmp_xmit(skb, cp, pp, offset);
/* do not touch skb anymore */
- out:
+out:
__ip_vs_conn_put(cp);
return verdict;
@@ -1573,14 +1558,14 @@ static int __init ip_vs_init(void)
pr_info("ipvs loaded.\n");
return ret;
- cleanup_conn:
+cleanup_conn:
ip_vs_conn_cleanup();
- cleanup_app:
+cleanup_app:
ip_vs_app_cleanup();
- cleanup_protocol:
+cleanup_protocol:
ip_vs_protocol_cleanup();
ip_vs_control_cleanup();
- cleanup_estimator:
+cleanup_estimator:
ip_vs_estimator_cleanup();
return ret;
}
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 36dc1d88c2fa..88d161262928 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -47,7 +47,7 @@
#include <net/sock.h>
#include <net/genetlink.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include <net/ip_vs.h>
@@ -70,33 +70,34 @@ static DEFINE_SPINLOCK(__ip_vs_dropentry_lock);
static DEFINE_SPINLOCK(__ip_vs_droppacket_lock);
/* 1/rate drop and drop-entry variables */
-int ip_vs_drop_rate = 0;
-int ip_vs_drop_counter = 0;
+int ip_vs_drop_rate;
+int ip_vs_drop_counter;
static atomic_t ip_vs_dropentry = ATOMIC_INIT(0);
/* number of virtual services */
-static int ip_vs_num_services = 0;
+static int ip_vs_num_services;
/* sysctl variables */
-static int sysctl_ip_vs_drop_entry = 0;
-static int sysctl_ip_vs_drop_packet = 0;
-static int sysctl_ip_vs_secure_tcp = 0;
+static int sysctl_ip_vs_drop_entry;
+static int sysctl_ip_vs_drop_packet;
+static int sysctl_ip_vs_secure_tcp;
static int sysctl_ip_vs_amemthresh = 1024;
static int sysctl_ip_vs_am_droprate = 10;
-int sysctl_ip_vs_cache_bypass = 0;
-int sysctl_ip_vs_expire_nodest_conn = 0;
-int sysctl_ip_vs_expire_quiescent_template = 0;
+int sysctl_ip_vs_cache_bypass;
+int sysctl_ip_vs_expire_nodest_conn;
+int sysctl_ip_vs_expire_quiescent_template;
int sysctl_ip_vs_sync_threshold[2] = { 3, 50 };
-int sysctl_ip_vs_nat_icmp_send = 0;
+int sysctl_ip_vs_nat_icmp_send;
#ifdef CONFIG_IP_VS_DEBUG
-static int sysctl_ip_vs_debug_level = 0;
+static int sysctl_ip_vs_debug_level;
int ip_vs_get_debug_level(void)
{
return sysctl_ip_vs_debug_level;
}
+EXPORT_SYMBOL(ip_vs_get_debug_level);
#endif
#ifdef CONFIG_IP_VS_IPV6
@@ -126,7 +127,7 @@ static int __ip_vs_addr_is_local_v6(const struct in6_addr *addr)
static void update_defense_level(void)
{
struct sysinfo i;
- static int old_secure_tcp = 0;
+ static int old_secure_tcp;
int availmem;
int nomem;
int to_change = -1;
@@ -237,7 +238,7 @@ static void update_defense_level(void)
}
old_secure_tcp = sysctl_ip_vs_secure_tcp;
if (to_change >= 0)
- ip_vs_protocol_timeout_change(sysctl_ip_vs_secure_tcp>1);
+ ip_vs_protocol_timeout_change(sysctl_ip_vs_secure_tcp > 1);
write_unlock(&__ip_vs_securetcp_lock);
local_bh_enable();
@@ -247,7 +248,7 @@ static void update_defense_level(void)
/*
* Timer for checking the defense
*/
-#define DEFENSE_TIMER_PERIOD 1*HZ
+#define DEFENSE_TIMER_PERIOD HZ
static void defense_work_handler(struct work_struct *work);
static DECLARE_DELAYED_WORK(defense_work, defense_work_handler);
@@ -309,7 +310,7 @@ static atomic_t ip_vs_nullsvc_counter = ATOMIC_INIT(0);
/*
* Returns hash value for virtual service
*/
-static __inline__ unsigned
+static inline unsigned
ip_vs_svc_hashkey(int af, unsigned proto, const union nf_inet_addr *addr,
__be16 port)
{
@@ -329,7 +330,7 @@ ip_vs_svc_hashkey(int af, unsigned proto, const union nf_inet_addr *addr,
/*
* Returns hash value of fwmark for virtual service lookup
*/
-static __inline__ unsigned ip_vs_svc_fwm_hashkey(__u32 fwmark)
+static inline unsigned ip_vs_svc_fwm_hashkey(__u32 fwmark)
{
return fwmark & IP_VS_SVC_TAB_MASK;
}
@@ -410,7 +411,7 @@ __ip_vs_service_get(int af, __u16 protocol, const union nf_inet_addr *vaddr,
/* Check for "full" addressed entries */
hash = ip_vs_svc_hashkey(af, protocol, vaddr, vport);
- list_for_each_entry(svc, &ip_vs_svc_table[hash], s_list){
+ list_for_each_entry(svc, &ip_vs_svc_table[hash], s_list) {
if ((svc->af == af)
&& ip_vs_addr_equal(af, &svc->addr, vaddr)
&& (svc->port == vport)
@@ -459,8 +460,11 @@ ip_vs_service_get(int af, __u32 fwmark, __u16 protocol,
/*
* Check the table hashed by fwmark first
*/
- if (fwmark && (svc = __ip_vs_svc_fwm_get(af, fwmark)))
- goto out;
+ if (fwmark) {
+ svc = __ip_vs_svc_fwm_get(af, fwmark);
+ if (!svc)
+ goto out;
+ }
/*
* Check the table hashed by <protocol,addr,port>
@@ -487,7 +491,7 @@ ip_vs_service_get(int af, __u32 fwmark, __u16 protocol,
svc = __ip_vs_service_get(af, protocol, vaddr, 0);
}
- out:
+out:
read_unlock(&__ip_vs_svc_lock);
IP_VS_DBG_BUF(9, "lookup service: fwm %u %s %s:%u %s\n",
@@ -545,9 +549,8 @@ static int ip_vs_rs_hash(struct ip_vs_dest *dest)
{
unsigned hash;
- if (!list_empty(&dest->d_list)) {
+ if (!list_empty(&dest->d_list))
return 0;
- }
/*
* Hash by proto,addr,port,
@@ -957,9 +960,8 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest)
* Allocate and initialize the dest structure
*/
ret = ip_vs_new_dest(svc, udest, &dest);
- if (ret) {
+ if (ret)
return ret;
- }
/*
* Add the dest entry into the list
@@ -1227,7 +1229,7 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
*svc_p = svc;
return 0;
- out_err:
+out_err:
if (svc != NULL) {
if (svc->scheduler)
ip_vs_unbind_scheduler(svc);
@@ -1240,7 +1242,7 @@ ip_vs_add_service(struct ip_vs_service_user_kern *u,
}
ip_vs_scheduler_put(sched);
- out_mod_dec:
+out_mod_dec:
/* decrease the module use count */
ip_vs_use_count_dec();
@@ -1293,7 +1295,8 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
/*
* Unbind the old scheduler
*/
- if ((ret = ip_vs_unbind_scheduler(svc))) {
+ ret = ip_vs_unbind_scheduler(svc);
+ if (ret) {
old_sched = sched;
goto out_unlock;
}
@@ -1301,7 +1304,8 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
/*
* Bind the new scheduler
*/
- if ((ret = ip_vs_bind_scheduler(svc, sched))) {
+ ret = ip_vs_bind_scheduler(svc, sched);
+ if (ret) {
/*
* If ip_vs_bind_scheduler fails, restore the old
* scheduler.
@@ -1318,10 +1322,10 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
}
}
- out_unlock:
+out_unlock:
write_unlock_bh(&__ip_vs_svc_lock);
#ifdef CONFIG_IP_VS_IPV6
- out:
+out:
#endif
if (old_sched)
@@ -1424,8 +1428,9 @@ static int ip_vs_flush(void)
/*
* Flush the service table hashed by <protocol,addr,port>
*/
- for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
- list_for_each_entry_safe(svc, nxt, &ip_vs_svc_table[idx], s_list) {
+ for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
+ list_for_each_entry_safe(svc, nxt, &ip_vs_svc_table[idx],
+ s_list) {
write_lock_bh(&__ip_vs_svc_lock);
ip_vs_svc_unhash(svc);
/*
@@ -1440,7 +1445,7 @@ static int ip_vs_flush(void)
/*
* Flush the service table hashed by fwmark
*/
- for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
+ for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry_safe(svc, nxt,
&ip_vs_svc_fwm_table[idx], f_list) {
write_lock_bh(&__ip_vs_svc_lock);
@@ -1479,13 +1484,13 @@ static int ip_vs_zero_all(void)
int idx;
struct ip_vs_service *svc;
- for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
+ for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
ip_vs_zero_service(svc);
}
}
- for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
+ for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry(svc, &ip_vs_svc_fwm_table[idx], f_list) {
ip_vs_zero_service(svc);
}
@@ -1718,7 +1723,7 @@ const struct ctl_path net_vs_ctl_path[] = {
};
EXPORT_SYMBOL_GPL(net_vs_ctl_path);
-static struct ctl_table_header * sysctl_header;
+static struct ctl_table_header *sysctl_header;
#ifdef CONFIG_PROC_FS
@@ -1756,7 +1761,7 @@ static struct ip_vs_service *ip_vs_info_array(struct seq_file *seq, loff_t pos)
/* look in hash by protocol */
for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
list_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) {
- if (pos-- == 0){
+ if (pos-- == 0) {
iter->table = ip_vs_svc_table;
iter->bucket = idx;
return svc;
@@ -1795,19 +1800,20 @@ static void *ip_vs_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
++*pos;
if (v == SEQ_START_TOKEN)
- return ip_vs_info_array(seq,0);
+ return ip_vs_info_array(seq, 0);
svc = v;
iter = seq->private;
if (iter->table == ip_vs_svc_table) {
/* next service in table hashed by protocol */
- if ((e = svc->s_list.next) != &ip_vs_svc_table[iter->bucket])
+ e = svc->s_list.next;
+ if (e != &ip_vs_svc_table[iter->bucket])
return list_entry(e, struct ip_vs_service, s_list);
while (++iter->bucket < IP_VS_SVC_TAB_SIZE) {
- list_for_each_entry(svc,&ip_vs_svc_table[iter->bucket],
+ list_for_each_entry(svc, &ip_vs_svc_table[iter->bucket],
s_list) {
return svc;
}
@@ -1819,7 +1825,8 @@ static void *ip_vs_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
}
/* next service in hashed by fwmark */
- if ((e = svc->f_list.next) != &ip_vs_svc_fwm_table[iter->bucket])
+ e = svc->f_list.next;
+ if (e != &ip_vs_svc_fwm_table[iter->bucket])
return list_entry(e, struct ip_vs_service, f_list);
scan_fwmark:
@@ -1956,7 +1963,7 @@ static int ip_vs_stats_show(struct seq_file *seq, void *v)
/* 01234567 01234567 01234567 0123456701234567 0123456701234567 */
seq_puts(seq,
" Conns/s Pkts/s Pkts/s Bytes/s Bytes/s\n");
- seq_printf(seq,"%8X %8X %8X %16X %16X\n",
+ seq_printf(seq, "%8X %8X %8X %16X %16X\n",
ip_vs_stats.ustats.cps,
ip_vs_stats.ustats.inpps,
ip_vs_stats.ustats.outpps,
@@ -2190,9 +2197,9 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
if (svc)
ip_vs_service_put(svc);
- out_unlock:
+out_unlock:
mutex_unlock(&__ip_vs_mutex);
- out_dec:
+out_dec:
/* decrease the module use count */
ip_vs_use_count_dec();
@@ -2227,7 +2234,7 @@ static inline int
__ip_vs_get_service_entries(const struct ip_vs_get_services *get,
struct ip_vs_get_services __user *uptr)
{
- int idx, count=0;
+ int idx, count = 0;
struct ip_vs_service *svc;
struct ip_vs_service_entry entry;
int ret = 0;
@@ -2269,7 +2276,7 @@ __ip_vs_get_service_entries(const struct ip_vs_get_services *get,
count++;
}
}
- out:
+out:
return ret;
}
@@ -2482,12 +2489,14 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
memset(&d, 0, sizeof(d));
if (ip_vs_sync_state & IP_VS_STATE_MASTER) {
d[0].state = IP_VS_STATE_MASTER;
- strlcpy(d[0].mcast_ifn, ip_vs_master_mcast_ifn, sizeof(d[0].mcast_ifn));
+ strlcpy(d[0].mcast_ifn, ip_vs_master_mcast_ifn,
+ sizeof(d[0].mcast_ifn));
d[0].syncid = ip_vs_master_syncid;
}
if (ip_vs_sync_state & IP_VS_STATE_BACKUP) {
d[1].state = IP_VS_STATE_BACKUP;
- strlcpy(d[1].mcast_ifn, ip_vs_backup_mcast_ifn, sizeof(d[1].mcast_ifn));
+ strlcpy(d[1].mcast_ifn, ip_vs_backup_mcast_ifn,
+ sizeof(d[1].mcast_ifn));
d[1].syncid = ip_vs_backup_syncid;
}
if (copy_to_user(user, &d, sizeof(d)) != 0)
@@ -2499,7 +2508,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
ret = -EINVAL;
}
- out:
+out:
mutex_unlock(&__ip_vs_mutex);
return ret;
}
@@ -3397,18 +3406,17 @@ int __init ip_vs_control_init(void)
}
proc_net_fops_create(&init_net, "ip_vs", 0, &ip_vs_info_fops);
- proc_net_fops_create(&init_net, "ip_vs_stats",0, &ip_vs_stats_fops);
+ proc_net_fops_create(&init_net, "ip_vs_stats", 0, &ip_vs_stats_fops);
sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars);
/* Initialize ip_vs_svc_table, ip_vs_svc_fwm_table, ip_vs_rtable */
- for(idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
+ for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) {
INIT_LIST_HEAD(&ip_vs_svc_table[idx]);
INIT_LIST_HEAD(&ip_vs_svc_fwm_table[idx]);
}
- for(idx = 0; idx < IP_VS_RTAB_SIZE; idx++) {
+ for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++)
INIT_LIST_HEAD(&ip_vs_rtable[idx]);
- }
ip_vs_new_estimator(&ip_vs_stats);
diff --git a/net/netfilter/ipvs/ip_vs_dh.c b/net/netfilter/ipvs/ip_vs_dh.c
index 95fd0d14200b..b6768e750689 100644
--- a/net/netfilter/ipvs/ip_vs_dh.c
+++ b/net/netfilter/ipvs/ip_vs_dh.c
@@ -105,7 +105,7 @@ ip_vs_dh_assign(struct ip_vs_dh_bucket *tbl, struct ip_vs_service *svc)
b = tbl;
p = &svc->destinations;
- for (i=0; i<IP_VS_DH_TAB_SIZE; i++) {
+ for (i = 0; i < IP_VS_DH_TAB_SIZE; i++) {
if (list_empty(p)) {
b->dest = NULL;
} else {
@@ -133,7 +133,7 @@ static void ip_vs_dh_flush(struct ip_vs_dh_bucket *tbl)
struct ip_vs_dh_bucket *b;
b = tbl;
- for (i=0; i<IP_VS_DH_TAB_SIZE; i++) {
+ for (i = 0; i < IP_VS_DH_TAB_SIZE; i++) {
if (b->dest) {
atomic_dec(&b->dest->refcnt);
b->dest = NULL;
@@ -241,8 +241,7 @@ ip_vs_dh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
/*
* IPVS DH Scheduler structure
*/
-static struct ip_vs_scheduler ip_vs_dh_scheduler =
-{
+static struct ip_vs_scheduler ip_vs_dh_scheduler = {
.name = "dh",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 2c7f185dfae4..511542eec5d4 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -92,9 +92,8 @@ static int ip_vs_ftp_get_addrport(char *data, char *data_limit,
return 0;
}
- if (strnicmp(data, pattern, plen) != 0) {
+ if (strnicmp(data, pattern, plen) != 0)
return 0;
- }
*start = data + plen;
for (data = *start; *data != term; data++) {
@@ -380,7 +379,7 @@ static int __init ip_vs_ftp_init(void)
if (ret)
return ret;
- for (i=0; i<IP_VS_APP_MAX_PORTS; i++) {
+ for (i = 0; i < IP_VS_APP_MAX_PORTS; i++) {
if (!ports[i])
continue;
ret = register_ip_vs_app_inc(app, app->protocol, ports[i]);
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
index 94a45213faa6..325bb3c0da55 100644
--- a/net/netfilter/ipvs/ip_vs_lblc.c
+++ b/net/netfilter/ipvs/ip_vs_lblc.c
@@ -125,7 +125,7 @@ static ctl_table vs_vars_table[] = {
{ }
};
-static struct ctl_table_header * sysctl_header;
+static struct ctl_table_header *sysctl_header;
static inline void ip_vs_lblc_free(struct ip_vs_lblc_entry *en)
{
@@ -233,7 +233,7 @@ static void ip_vs_lblc_flush(struct ip_vs_lblc_table *tbl)
struct ip_vs_lblc_entry *en, *nxt;
int i;
- for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
+ for (i = 0; i < IP_VS_LBLC_TAB_SIZE; i++) {
list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) {
ip_vs_lblc_free(en);
atomic_dec(&tbl->entries);
@@ -249,13 +249,14 @@ static inline void ip_vs_lblc_full_check(struct ip_vs_service *svc)
unsigned long now = jiffies;
int i, j;
- for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
+ for (i = 0, j = tbl->rover; i < IP_VS_LBLC_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLC_TAB_MASK;
write_lock(&svc->sched_lock);
list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
if (time_before(now,
- en->lastuse + sysctl_ip_vs_lblc_expiration))
+ en->lastuse +
+ sysctl_ip_vs_lblc_expiration))
continue;
ip_vs_lblc_free(en);
@@ -303,7 +304,7 @@ static void ip_vs_lblc_check_expire(unsigned long data)
if (goal > tbl->max_size/2)
goal = tbl->max_size/2;
- for (i=0, j=tbl->rover; i<IP_VS_LBLC_TAB_SIZE; i++) {
+ for (i = 0, j = tbl->rover; i < IP_VS_LBLC_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLC_TAB_MASK;
write_lock(&svc->sched_lock);
@@ -321,7 +322,7 @@ static void ip_vs_lblc_check_expire(unsigned long data)
}
tbl->rover = j;
- out:
+out:
mod_timer(&tbl->periodic_timer, jiffies+CHECK_EXPIRE_INTERVAL);
}
@@ -346,9 +347,8 @@ static int ip_vs_lblc_init_svc(struct ip_vs_service *svc)
/*
* Initialize the hash buckets
*/
- for (i=0; i<IP_VS_LBLC_TAB_SIZE; i++) {
+ for (i = 0; i < IP_VS_LBLC_TAB_SIZE; i++)
INIT_LIST_HEAD(&tbl->bucket[i]);
- }
tbl->max_size = IP_VS_LBLC_TAB_SIZE*16;
tbl->rover = 0;
tbl->counter = 1;
@@ -421,7 +421,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc)
/*
* Find the destination with the least load.
*/
- nextstage:
+nextstage:
list_for_each_entry_continue(dest, &svc->destinations, n_list) {
if (dest->flags & IP_VS_DEST_F_OVERLOAD)
continue;
@@ -532,8 +532,7 @@ out:
/*
* IPVS LBLC Scheduler structure
*/
-static struct ip_vs_scheduler ip_vs_lblc_scheduler =
-{
+static struct ip_vs_scheduler ip_vs_lblc_scheduler = {
.name = "lblc",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
index 535dc2b419d8..4c0a8d130efb 100644
--- a/net/netfilter/ipvs/ip_vs_lblcr.c
+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
@@ -238,7 +238,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
return NULL;
/* find the destination with the weighted most load */
- nextstage:
+nextstage:
list_for_each_entry(e, &set->list, list) {
dest = e->dest;
doh = atomic_read(&dest->activeconns) * 50
@@ -304,7 +304,7 @@ static ctl_table vs_vars_table[] = {
{ }
};
-static struct ctl_table_header * sysctl_header;
+static struct ctl_table_header *sysctl_header;
static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en)
{
@@ -411,7 +411,7 @@ static void ip_vs_lblcr_flush(struct ip_vs_lblcr_table *tbl)
struct ip_vs_lblcr_entry *en, *nxt;
/* No locking required, only called during cleanup. */
- for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
+ for (i = 0; i < IP_VS_LBLCR_TAB_SIZE; i++) {
list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) {
ip_vs_lblcr_free(en);
}
@@ -426,13 +426,13 @@ static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
int i, j;
struct ip_vs_lblcr_entry *en, *nxt;
- for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
+ for (i = 0, j = tbl->rover; i < IP_VS_LBLCR_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
write_lock(&svc->sched_lock);
list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
- if (time_after(en->lastuse+sysctl_ip_vs_lblcr_expiration,
- now))
+ if (time_after(en->lastuse +
+ sysctl_ip_vs_lblcr_expiration, now))
continue;
ip_vs_lblcr_free(en);
@@ -480,7 +480,7 @@ static void ip_vs_lblcr_check_expire(unsigned long data)
if (goal > tbl->max_size/2)
goal = tbl->max_size/2;
- for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
+ for (i = 0, j = tbl->rover; i < IP_VS_LBLCR_TAB_SIZE; i++) {
j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
write_lock(&svc->sched_lock);
@@ -498,7 +498,7 @@ static void ip_vs_lblcr_check_expire(unsigned long data)
}
tbl->rover = j;
- out:
+out:
mod_timer(&tbl->periodic_timer, jiffies+CHECK_EXPIRE_INTERVAL);
}
@@ -522,9 +522,8 @@ static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
/*
* Initialize the hash buckets
*/
- for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
+ for (i = 0; i < IP_VS_LBLCR_TAB_SIZE; i++)
INIT_LIST_HEAD(&tbl->bucket[i]);
- }
tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16;
tbl->rover = 0;
tbl->counter = 1;
@@ -598,7 +597,7 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
/*
* Find the destination with the least load.
*/
- nextstage:
+nextstage:
list_for_each_entry_continue(dest, &svc->destinations, n_list) {
if (dest->flags & IP_VS_DEST_F_OVERLOAD)
continue;
@@ -733,8 +732,7 @@ out:
/*
* IPVS LBLCR Scheduler structure
*/
-static struct ip_vs_scheduler ip_vs_lblcr_scheduler =
-{
+static struct ip_vs_scheduler ip_vs_lblcr_scheduler = {
.name = "lblcr",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
diff --git a/net/netfilter/ipvs/ip_vs_nq.c b/net/netfilter/ipvs/ip_vs_nq.c
index c413e1830823..9270ee3aeab1 100644
--- a/net/netfilter/ipvs/ip_vs_nq.c
+++ b/net/netfilter/ipvs/ip_vs_nq.c
@@ -103,7 +103,7 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
return NULL;
}
- out:
+out:
IP_VS_DBG_BUF(6, "NQ: server %s:%u "
"activeconns %d refcnt %d weight %d overhead %d\n",
IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port),
@@ -115,8 +115,7 @@ ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
}
-static struct ip_vs_scheduler ip_vs_nq_scheduler =
-{
+static struct ip_vs_scheduler ip_vs_nq_scheduler = {
.name = "nq",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
diff --git a/net/netfilter/ipvs/ip_vs_proto.c b/net/netfilter/ipvs/ip_vs_proto.c
index 7fc49f4cf5ad..6e40e532adeb 100644
--- a/net/netfilter/ipvs/ip_vs_proto.c
+++ b/net/netfilter/ipvs/ip_vs_proto.c
@@ -86,7 +86,7 @@ static int unregister_ip_vs_protocol(struct ip_vs_protocol *pp)
/*
* get ip_vs_protocol object by its proto.
*/
-struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto)
+struct ip_vs_protocol *ip_vs_proto_get(unsigned short proto)
{
struct ip_vs_protocol *pp;
unsigned hash = IP_VS_PROTO_HASH(proto);
@@ -146,7 +146,7 @@ ip_vs_set_state_timeout(int *table, int num, const char *const *names,
}
-const char * ip_vs_state_name(__u16 proto, int state)
+const char *ip_vs_state_name(__u16 proto, int state)
{
struct ip_vs_protocol *pp = ip_vs_proto_get(proto);
diff --git a/net/netfilter/ipvs/ip_vs_proto_tcp.c b/net/netfilter/ipvs/ip_vs_proto_tcp.c
index 91d28e073742..6f631feafe39 100644
--- a/net/netfilter/ipvs/ip_vs_proto_tcp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_tcp.c
@@ -223,7 +223,7 @@ tcp_snat_handler(struct sk_buff *skb,
IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n",
pp->name, tcph->check,
- (char*)&(tcph->check) - (char*)tcph);
+ (char *)&(tcph->check) - (char *)tcph);
}
return 1;
}
@@ -408,14 +408,14 @@ struct tcp_states_t {
int next_state[IP_VS_TCP_S_LAST];
};
-static const char * tcp_state_name(int state)
+static const char *tcp_state_name(int state)
{
if (state >= IP_VS_TCP_S_LAST)
return "ERR!";
return tcp_state_name_table[state] ? tcp_state_name_table[state] : "?";
}
-static struct tcp_states_t tcp_states [] = {
+static struct tcp_states_t tcp_states[] = {
/* INPUT */
/* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */
/*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSR }},
@@ -438,7 +438,7 @@ static struct tcp_states_t tcp_states [] = {
/*rst*/ {{sCL, sCL, sCL, sSR, sCL, sCL, sCL, sCL, sLA, sLI, sCL }},
};
-static struct tcp_states_t tcp_states_dos [] = {
+static struct tcp_states_t tcp_states_dos[] = {
/* INPUT */
/* sNO, sES, sSS, sSR, sFW, sTW, sCL, sCW, sLA, sLI, sSA */
/*syn*/ {{sSR, sES, sES, sSR, sSR, sSR, sSR, sSR, sSR, sSR, sSA }},
@@ -474,7 +474,7 @@ static void tcp_timeout_change(struct ip_vs_protocol *pp, int flags)
** for most if not for all of the applications. Something
** like "capabilities" (flags) for each object.
*/
- tcp_state_table = (on? tcp_states_dos : tcp_states);
+ tcp_state_table = (on ? tcp_states_dos : tcp_states);
}
static int
@@ -516,14 +516,15 @@ set_tcp_state(struct ip_vs_protocol *pp, struct ip_vs_conn *cp,
state_off = TCP_DIR_INPUT_ONLY;
}
- if ((state_idx = tcp_state_idx(th)) < 0) {
+ state_idx = tcp_state_idx(th);
+ if (state_idx) {
IP_VS_DBG(8, "tcp_state_idx=%d!!!\n", state_idx);
goto tcp_state_out;
}
new_state = tcp_state_table[state_off+state_idx].next_state[cp->state];
- tcp_state_out:
+tcp_state_out:
if (new_state != cp->state) {
struct ip_vs_dest *dest = cp->dest;
@@ -627,7 +628,7 @@ static int tcp_register_app(struct ip_vs_app *inc)
list_add(&inc->p_list, &tcp_apps[hash]);
atomic_inc(&ip_vs_protocol_tcp.appcnt);
- out:
+out:
spin_unlock_bh(&tcp_app_lock);
return ret;
}
@@ -681,7 +682,7 @@ tcp_app_conn_bind(struct ip_vs_conn *cp)
}
spin_unlock(&tcp_app_lock);
- out:
+out:
return result;
}
@@ -696,6 +697,7 @@ void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp)
cp->timeout = ip_vs_protocol_tcp.timeout_table[IP_VS_TCP_S_LISTEN];
spin_unlock(&cp->lock);
}
+EXPORT_SYMBOL(ip_vs_tcp_conn_listen);
static void ip_vs_tcp_init(struct ip_vs_protocol *pp)
diff --git a/net/netfilter/ipvs/ip_vs_proto_udp.c b/net/netfilter/ipvs/ip_vs_proto_udp.c
index e7a6885e0167..658d8a6a0e73 100644
--- a/net/netfilter/ipvs/ip_vs_proto_udp.c
+++ b/net/netfilter/ipvs/ip_vs_proto_udp.c
@@ -235,7 +235,7 @@ udp_snat_handler(struct sk_buff *skb,
udph->check = CSUM_MANGLED_0;
IP_VS_DBG(11, "O-pkt: %s O-csum=%d (+%zd)\n",
pp->name, udph->check,
- (char*)&(udph->check) - (char*)udph);
+ (char *)&(udph->check) - (char *)udph);
}
return 1;
}
@@ -409,7 +409,7 @@ static int udp_register_app(struct ip_vs_app *inc)
list_add(&inc->p_list, &udp_apps[hash]);
atomic_inc(&ip_vs_protocol_udp.appcnt);
- out:
+out:
spin_unlock_bh(&udp_app_lock);
return ret;
}
@@ -462,7 +462,7 @@ static int udp_app_conn_bind(struct ip_vs_conn *cp)
}
spin_unlock(&udp_app_lock);
- out:
+out:
return result;
}
@@ -485,7 +485,7 @@ udp_set_state_timeout(struct ip_vs_protocol *pp, char *sname, int to)
udp_state_name_table, sname, to);
}
-static const char * udp_state_name(int state)
+static const char *udp_state_name(int state)
{
if (state >= IP_VS_UDP_S_LAST)
return "ERR!";
diff --git a/net/netfilter/ipvs/ip_vs_rr.c b/net/netfilter/ipvs/ip_vs_rr.c
index e210f37d8ea2..59c4ae32d927 100644
--- a/net/netfilter/ipvs/ip_vs_rr.c
+++ b/net/netfilter/ipvs/ip_vs_rr.c
@@ -75,7 +75,7 @@ ip_vs_rr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
IP_VS_ERR_RL("RR: no destination available\n");
return NULL;
- out:
+out:
svc->sched_data = q;
write_unlock(&svc->sched_lock);
IP_VS_DBG_BUF(6, "RR: server %s:%u "
diff --git a/net/netfilter/ipvs/ip_vs_sched.c b/net/netfilter/ipvs/ip_vs_sched.c
index bbc1ac795952..f3f5e170093e 100644
--- a/net/netfilter/ipvs/ip_vs_sched.c
+++ b/net/netfilter/ipvs/ip_vs_sched.c
@@ -23,7 +23,7 @@
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
-#include <asm/string.h>
+#include <linux/string.h>
#include <linux/kmod.h>
#include <linux/sysctl.h>
@@ -120,7 +120,7 @@ static struct ip_vs_scheduler *ip_vs_sched_getbyname(const char *sched_name)
*/
continue;
}
- if (strcmp(sched_name, sched->name)==0) {
+ if (strcmp(sched_name, sched->name) == 0) {
/* HIT */
read_unlock_bh(&__ip_vs_sched_lock);
return sched;
@@ -217,7 +217,7 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
return 0;
}
-
+EXPORT_SYMBOL(register_ip_vs_scheduler);
/*
* Unregister a scheduler from the scheduler list
@@ -250,3 +250,4 @@ int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
return 0;
}
+EXPORT_SYMBOL(unregister_ip_vs_scheduler);
diff --git a/net/netfilter/ipvs/ip_vs_sed.c b/net/netfilter/ipvs/ip_vs_sed.c
index 1ab75a9dc400..de64bf26210a 100644
--- a/net/netfilter/ipvs/ip_vs_sed.c
+++ b/net/netfilter/ipvs/ip_vs_sed.c
@@ -93,7 +93,7 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
/*
* Find the destination with the least load.
*/
- nextstage:
+nextstage:
list_for_each_entry_continue(dest, &svc->destinations, n_list) {
if (dest->flags & IP_VS_DEST_F_OVERLOAD)
continue;
@@ -116,8 +116,7 @@ ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
}
-static struct ip_vs_scheduler ip_vs_sed_scheduler =
-{
+static struct ip_vs_scheduler ip_vs_sed_scheduler = {
.name = "sed",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
diff --git a/net/netfilter/ipvs/ip_vs_sh.c b/net/netfilter/ipvs/ip_vs_sh.c
index e6cc174fbc06..4469110c7690 100644
--- a/net/netfilter/ipvs/ip_vs_sh.c
+++ b/net/netfilter/ipvs/ip_vs_sh.c
@@ -102,7 +102,7 @@ ip_vs_sh_assign(struct ip_vs_sh_bucket *tbl, struct ip_vs_service *svc)
b = tbl;
p = &svc->destinations;
- for (i=0; i<IP_VS_SH_TAB_SIZE; i++) {
+ for (i = 0; i < IP_VS_SH_TAB_SIZE; i++) {
if (list_empty(p)) {
b->dest = NULL;
} else {
@@ -130,7 +130,7 @@ static void ip_vs_sh_flush(struct ip_vs_sh_bucket *tbl)
struct ip_vs_sh_bucket *b;
b = tbl;
- for (i=0; i<IP_VS_SH_TAB_SIZE; i++) {
+ for (i = 0; i < IP_VS_SH_TAB_SIZE; i++) {
if (b->dest) {
atomic_dec(&b->dest->refcnt);
b->dest = NULL;
@@ -239,8 +239,7 @@ ip_vs_sh_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
/*
* IPVS SH Scheduler structure
*/
-static struct ip_vs_scheduler ip_vs_sh_scheduler =
-{
+static struct ip_vs_scheduler ip_vs_sh_scheduler = {
.name = "sh",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 8fb0ae616761..75472aa66109 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -133,13 +133,13 @@ static LIST_HEAD(ip_vs_sync_queue);
static DEFINE_SPINLOCK(ip_vs_sync_lock);
/* current sync_buff for accepting new conn entries */
-static struct ip_vs_sync_buff *curr_sb = NULL;
+static struct ip_vs_sync_buff *curr_sb;
static DEFINE_SPINLOCK(curr_sb_lock);
/* ipvs sync daemon state */
volatile int ip_vs_sync_state = IP_VS_STATE_NONE;
-volatile int ip_vs_master_syncid = 0;
-volatile int ip_vs_backup_syncid = 0;
+volatile int ip_vs_master_syncid;
+volatile int ip_vs_backup_syncid;
/* multicast interface name */
char ip_vs_master_mcast_ifn[IP_VS_IFNAME_MAXLEN];
@@ -165,8 +165,7 @@ static inline struct ip_vs_sync_buff *sb_dequeue(void)
if (list_empty(&ip_vs_sync_queue)) {
sb = NULL;
} else {
- sb = list_entry(ip_vs_sync_queue.next,
- struct ip_vs_sync_buff,
+ sb = list_first_entry(&ip_vs_sync_queue, struct ip_vs_sync_buff,
list);
list_del(&sb->list);
}
@@ -175,23 +174,27 @@ static inline struct ip_vs_sync_buff *sb_dequeue(void)
return sb;
}
-static inline struct ip_vs_sync_buff * ip_vs_sync_buff_create(void)
+static inline struct ip_vs_sync_buff *ip_vs_sync_buff_create(void)
{
struct ip_vs_sync_buff *sb;
- if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC)))
+ sb = kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC);
+ if (!sb)
return NULL;
- if (!(sb->mesg=kmalloc(sync_send_mesg_maxlen, GFP_ATOMIC))) {
+ sb->mesg = kmalloc(sync_send_mesg_maxlen, GFP_ATOMIC);
+ if (!sb->mesg) {
kfree(sb);
return NULL;
}
+
sb->mesg->nr_conns = 0;
sb->mesg->syncid = ip_vs_master_syncid;
sb->mesg->size = 4;
sb->head = (unsigned char *)sb->mesg + 4;
sb->end = (unsigned char *)sb->mesg + sync_send_mesg_maxlen;
sb->firstuse = jiffies;
+
return sb;
}
@@ -228,6 +231,7 @@ get_curr_sync_buff(unsigned long time)
} else
sb = NULL;
spin_unlock_bh(&curr_sb_lock);
+
return sb;
}
@@ -244,7 +248,8 @@ void ip_vs_sync_conn(struct ip_vs_conn *cp)
spin_lock(&curr_sb_lock);
if (!curr_sb) {
- if (!(curr_sb=ip_vs_sync_buff_create())) {
+ curr_sb = ip_vs_sync_buff_create();
+ if (!curr_sb) {
spin_unlock(&curr_sb_lock);
pr_err("ip_vs_sync_buff_create failed.\n");
return;
@@ -254,7 +259,7 @@ void ip_vs_sync_conn(struct ip_vs_conn *cp)
len = (cp->flags & IP_VS_CONN_F_SEQ_MASK) ? FULL_CONN_SIZE :
SIMPLE_CONN_SIZE;
m = curr_sb->mesg;
- s = (struct ip_vs_sync_conn *)curr_sb->head;
+ s = (struct ip_vs_sync_conn *) curr_sb->head;
/* copy members */
s->protocol = cp->protocol;
@@ -325,7 +330,7 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
}
p = (char *)buffer + sizeof(struct ip_vs_sync_mesg);
- for (i=0; i<m->nr_conns; i++) {
+ for (i = 0; i < m->nr_conns; i++) {
unsigned flags, state;
if (p + SIMPLE_CONN_SIZE > buffer+buflen) {
@@ -504,7 +509,8 @@ static int set_mcast_if(struct sock *sk, char *ifname)
struct net_device *dev;
struct inet_sock *inet = inet_sk(sk);
- if ((dev = __dev_get_by_name(&init_net, ifname)) == NULL)
+ dev = __dev_get_by_name(&init_net, ifname);
+ if (!dev)
return -ENODEV;
if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
@@ -529,7 +535,8 @@ static int set_sync_mesg_maxlen(int sync_state)
int num;
if (sync_state == IP_VS_STATE_MASTER) {
- if ((dev = __dev_get_by_name(&init_net, ip_vs_master_mcast_ifn)) == NULL)
+ dev = __dev_get_by_name(&init_net, ip_vs_master_mcast_ifn);
+ if (!dev)
return -ENODEV;
num = (dev->mtu - sizeof(struct iphdr) -
@@ -540,7 +547,8 @@ static int set_sync_mesg_maxlen(int sync_state)
IP_VS_DBG(7, "setting the maximum length of sync sending "
"message %d.\n", sync_send_mesg_maxlen);
} else if (sync_state == IP_VS_STATE_BACKUP) {
- if ((dev = __dev_get_by_name(&init_net, ip_vs_backup_mcast_ifn)) == NULL)
+ dev = __dev_get_by_name(&init_net, ip_vs_backup_mcast_ifn);
+ if (!dev)
return -ENODEV;
sync_recv_mesg_maxlen = dev->mtu -
@@ -568,7 +576,8 @@ join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
memset(&mreq, 0, sizeof(mreq));
memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr));
- if ((dev = __dev_get_by_name(&init_net, ifname)) == NULL)
+ dev = __dev_get_by_name(&init_net, ifname);
+ if (!dev)
return -ENODEV;
if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
return -EINVAL;
@@ -589,7 +598,8 @@ static int bind_mcastif_addr(struct socket *sock, char *ifname)
__be32 addr;
struct sockaddr_in sin;
- if ((dev = __dev_get_by_name(&init_net, ifname)) == NULL)
+ dev = __dev_get_by_name(&init_net, ifname);
+ if (!dev)
return -ENODEV;
addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
@@ -605,13 +615,13 @@ static int bind_mcastif_addr(struct socket *sock, char *ifname)
sin.sin_addr.s_addr = addr;
sin.sin_port = 0;
- return sock->ops->bind(sock, (struct sockaddr*)&sin, sizeof(sin));
+ return sock->ops->bind(sock, (struct sockaddr *) &sin, sizeof(sin));
}
/*
* Set up sending multicast socket over UDP
*/
-static struct socket * make_send_sock(void)
+static struct socket *make_send_sock(void)
{
struct socket *sock;
int result;
@@ -647,7 +657,7 @@ static struct socket * make_send_sock(void)
return sock;
- error:
+error:
sock_release(sock);
return ERR_PTR(result);
}
@@ -656,7 +666,7 @@ static struct socket * make_send_sock(void)
/*
* Set up receiving multicast socket over UDP
*/
-static struct socket * make_receive_sock(void)
+static struct socket *make_receive_sock(void)
{
struct socket *sock;
int result;
@@ -689,7 +699,7 @@ static struct socket * make_receive_sock(void)
return sock;
- error:
+error:
sock_release(sock);
return ERR_PTR(result);
}
@@ -775,14 +785,13 @@ static int sync_thread_master(void *data)
}
/* clean up the sync_buff queue */
- while ((sb=sb_dequeue())) {
+ while ((sb = sb_dequeue()))
ip_vs_sync_buff_release(sb);
- }
/* clean up the current sync_buff */
- if ((sb = get_curr_sync_buff(0))) {
+ sb = get_curr_sync_buff(0);
+ if (sb)
ip_vs_sync_buff_release(sb);
- }
/* release the sending multicast socket */
sock_release(tinfo->sock);
diff --git a/net/netfilter/ipvs/ip_vs_wlc.c b/net/netfilter/ipvs/ip_vs_wlc.c
index bbddfdb10db2..139d9b051038 100644
--- a/net/netfilter/ipvs/ip_vs_wlc.c
+++ b/net/netfilter/ipvs/ip_vs_wlc.c
@@ -81,7 +81,7 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
/*
* Find the destination with the least load.
*/
- nextstage:
+nextstage:
list_for_each_entry_continue(dest, &svc->destinations, n_list) {
if (dest->flags & IP_VS_DEST_F_OVERLOAD)
continue;
@@ -104,8 +104,7 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
}
-static struct ip_vs_scheduler ip_vs_wlc_scheduler =
-{
+static struct ip_vs_scheduler ip_vs_wlc_scheduler = {
.name = "wlc",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
diff --git a/net/netfilter/ipvs/ip_vs_wrr.c b/net/netfilter/ipvs/ip_vs_wrr.c
index 30db633f88f1..88bc88cb9460 100644
--- a/net/netfilter/ipvs/ip_vs_wrr.c
+++ b/net/netfilter/ipvs/ip_vs_wrr.c
@@ -198,7 +198,7 @@ ip_vs_wrr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
atomic_read(&dest->refcnt),
atomic_read(&dest->weight));
- out:
+out:
write_unlock(&svc->sched_lock);
return dest;
}
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index e450cd6f4eb5..b05a1da1a62a 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -73,8 +73,8 @@ __ip_vs_get_out_rt(struct ip_vs_conn *cp, u32 rtos)
if (dest) {
spin_lock(&dest->dst_lock);
- if (!(rt = (struct rtable *)
- __ip_vs_dst_check(dest, rtos, 0))) {
+ rt = (struct rtable *) __ip_vs_dst_check(dest, rtos, 0);
+ if (!rt) {
struct flowi fl = {
.oif = 0,
.nl_u = {
@@ -248,7 +248,7 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
mtu = dst_mtu(&rt->u.dst);
if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
ip_rt_put(rt);
- icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
goto tx_error;
}
@@ -257,7 +257,8 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
* Call ip_send_check because we are not sure it is called
* after ip_defrag. Is copy-on-write needed?
*/
- if (unlikely((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)) {
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (unlikely(skb == NULL)) {
ip_rt_put(rt);
return NF_STOLEN;
}
@@ -372,14 +373,15 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
}
- if (!(rt = __ip_vs_get_out_rt(cp, RT_TOS(iph->tos))))
+ rt = __ip_vs_get_out_rt(cp, RT_TOS(iph->tos));
+ if (!rt)
goto tx_error_icmp;
/* MTU checking */
mtu = dst_mtu(&rt->u.dst);
if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF))) {
ip_rt_put(rt);
- icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
IP_VS_DBG_RL_PKT(0, pp, skb, 0, "ip_vs_nat_xmit(): frag needed for");
goto tx_error;
}
@@ -415,13 +417,13 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
LeaveFunction(10);
return NF_STOLEN;
- tx_error_icmp:
+tx_error_icmp:
dst_link_failure(skb);
- tx_error:
+tx_error:
LeaveFunction(10);
kfree_skb(skb);
return NF_STOLEN;
- tx_error_put:
+tx_error_put:
ip_rt_put(rt);
goto tx_error;
}
@@ -546,7 +548,8 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
goto tx_error;
}
- if (!(rt = __ip_vs_get_out_rt(cp, RT_TOS(tos))))
+ rt = __ip_vs_get_out_rt(cp, RT_TOS(tos));
+ if (!rt)
goto tx_error_icmp;
tdev = rt->u.dst.dev;
@@ -564,7 +567,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
if ((old_iph->frag_off & htons(IP_DF))
&& mtu < ntohs(old_iph->tot_len)) {
- icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
ip_rt_put(rt);
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
goto tx_error;
@@ -626,9 +629,9 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
return NF_STOLEN;
- tx_error_icmp:
+tx_error_icmp:
dst_link_failure(skb);
- tx_error:
+tx_error:
kfree_skb(skb);
LeaveFunction(10);
return NF_STOLEN;
@@ -756,13 +759,14 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
EnterFunction(10);
- if (!(rt = __ip_vs_get_out_rt(cp, RT_TOS(iph->tos))))
+ rt = __ip_vs_get_out_rt(cp, RT_TOS(iph->tos));
+ if (!rt)
goto tx_error_icmp;
/* MTU checking */
mtu = dst_mtu(&rt->u.dst);
if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu) {
- icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
ip_rt_put(rt);
IP_VS_DBG_RL("%s(): frag needed\n", __func__);
goto tx_error;
@@ -772,7 +776,8 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
* Call ip_send_check because we are not sure it is called
* after ip_defrag. Is copy-on-write needed?
*/
- if (unlikely((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)) {
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (unlikely(skb == NULL)) {
ip_rt_put(rt);
return NF_STOLEN;
}
@@ -790,9 +795,9 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
LeaveFunction(10);
return NF_STOLEN;
- tx_error_icmp:
+tx_error_icmp:
dst_link_failure(skb);
- tx_error:
+tx_error:
kfree_skb(skb);
LeaveFunction(10);
return NF_STOLEN;
@@ -884,7 +889,8 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
* mangle and send the packet here (only for VS/NAT)
*/
- if (!(rt = __ip_vs_get_out_rt(cp, RT_TOS(ip_hdr(skb)->tos))))
+ rt = __ip_vs_get_out_rt(cp, RT_TOS(ip_hdr(skb)->tos));
+ if (!rt)
goto tx_error_icmp;
/* MTU checking */
@@ -917,15 +923,15 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
rc = NF_STOLEN;
goto out;
- tx_error_icmp:
+tx_error_icmp:
dst_link_failure(skb);
- tx_error:
+tx_error:
dev_kfree_skb(skb);
rc = NF_STOLEN;
- out:
+out:
LeaveFunction(10);
return rc;
- tx_error_put:
+tx_error_put:
ip_rt_put(rt);
goto tx_error;
}