From a1bcfacd0577ff477e934731d4ceb3d26eab947d Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Tue, 14 Nov 2006 10:43:58 -0800 Subject: netpoll: private skb pool (rev3) It was a dark and stormy night when Steve first saw the netpoll beast. The beast was odd, and misshapen but not extremely ugly. "Let me take off one of your warts" he said. This wart is where you tried to make an skb list yourself. If the beast had ever run out of memory, he would have stupefied himself unnecessarily. The first try was painful, so he tried again till the bleeding stopped. And again, and again... Signed-off-by: Stephen Hemminger --- net/core/netpoll.c | 53 +++++++++++++++++++++-------------------------------- 1 file changed, 21 insertions(+), 32 deletions(-) (limited to 'net/core/netpoll.c') diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 6589adb14cb..4de62f1f413 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -36,9 +36,7 @@ #define MAX_QUEUE_DEPTH (MAX_SKBS / 2) #define MAX_RETRIES 20000 -static DEFINE_SPINLOCK(skb_list_lock); -static int nr_skbs; -static struct sk_buff *skbs; +static struct sk_buff_head skb_pool; static DEFINE_SPINLOCK(queue_lock); static int queue_depth; @@ -190,17 +188,15 @@ static void refill_skbs(void) struct sk_buff *skb; unsigned long flags; - spin_lock_irqsave(&skb_list_lock, flags); - while (nr_skbs < MAX_SKBS) { + spin_lock_irqsave(&skb_pool.lock, flags); + while (skb_pool.qlen < MAX_SKBS) { skb = alloc_skb(MAX_SKB_SIZE, GFP_ATOMIC); if (!skb) break; - skb->next = skbs; - skbs = skb; - nr_skbs++; + __skb_queue_tail(&skb_pool, skb); } - spin_unlock_irqrestore(&skb_list_lock, flags); + spin_unlock_irqrestore(&skb_pool.lock, flags); } static void zap_completion_queue(void) @@ -229,38 +225,25 @@ static void zap_completion_queue(void) put_cpu_var(softnet_data); } -static struct sk_buff * find_skb(struct netpoll *np, int len, int reserve) +static struct sk_buff *find_skb(struct netpoll *np, int len, int reserve) { - int once = 1, count = 0; - unsigned long flags; - struct sk_buff *skb = NULL; + int count = 0; + struct sk_buff *skb; zap_completion_queue(); + refill_skbs(); repeat: - if (nr_skbs < MAX_SKBS) - refill_skbs(); skb = alloc_skb(len, GFP_ATOMIC); - - if (!skb) { - spin_lock_irqsave(&skb_list_lock, flags); - skb = skbs; - if (skb) { - skbs = skb->next; - skb->next = NULL; - nr_skbs--; - } - spin_unlock_irqrestore(&skb_list_lock, flags); - } + if (!skb) + skb = skb_dequeue(&skb_pool); if(!skb) { - count++; - if (once && (count == 1000000)) { - printk("out of netpoll skbs!\n"); - once = 0; + if (++count < 10) { + netpoll_poll(np); + goto repeat; } - netpoll_poll(np); - goto repeat; + return NULL; } atomic_set(&skb->users, 1); @@ -770,6 +753,12 @@ int netpoll_setup(struct netpoll *np) return -1; } +static int __init netpoll_init(void) { + skb_queue_head_init(&skb_pool); + return 0; +} +core_initcall(netpoll_init); + void netpoll_cleanup(struct netpoll *np) { struct netpoll_info *npinfo; -- cgit v1.2.3 From 93ec2c723e3f8a216dde2899aeb85c648672bc6b Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Oct 2006 15:46:50 -0700 Subject: netpoll info leak After looking harder, Steve noticed that the netpoll beast leaked a little every time it shutdown for a nap. Not a big leak, but a nuisance kind of thing. He took out his refcount duct tape and patched the leak. It was overkill since there was already other locking in that area, but it looked clean and wouldn't attract fleas. Signed-off-by: Stephen Hemminger --- include/linux/netpoll.h | 1 + net/core/netpoll.c | 25 +++++++++++++++++++------ 2 files changed, 20 insertions(+), 6 deletions(-) (limited to 'net/core/netpoll.c') diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index 1efe60c5c00..39845fc975f 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -25,6 +25,7 @@ struct netpoll { }; struct netpoll_info { + atomic_t refcnt; spinlock_t poll_lock; int poll_owner; int tries; diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 4de62f1f413..c66df2f45d2 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -658,8 +658,11 @@ int netpoll_setup(struct netpoll *np) npinfo->tries = MAX_RETRIES; spin_lock_init(&npinfo->rx_lock); skb_queue_head_init(&npinfo->arp_tx); - } else + atomic_set(&npinfo->refcnt, 1); + } else { npinfo = ndev->npinfo; + atomic_inc(&npinfo->refcnt); + } if (!ndev->poll_controller) { printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n", @@ -766,12 +769,22 @@ void netpoll_cleanup(struct netpoll *np) if (np->dev) { npinfo = np->dev->npinfo; - if (npinfo && npinfo->rx_np == np) { - spin_lock_irqsave(&npinfo->rx_lock, flags); - npinfo->rx_np = NULL; - npinfo->rx_flags &= ~NETPOLL_RX_ENABLED; - spin_unlock_irqrestore(&npinfo->rx_lock, flags); + if (npinfo) { + if (npinfo->rx_np == np) { + spin_lock_irqsave(&npinfo->rx_lock, flags); + npinfo->rx_np = NULL; + npinfo->rx_flags &= ~NETPOLL_RX_ENABLED; + spin_unlock_irqrestore(&npinfo->rx_lock, flags); + } + + np->dev->npinfo = NULL; + if (atomic_dec_and_test(&npinfo->refcnt)) { + skb_queue_purge(&npinfo->arp_tx); + + kfree(npinfo); + } } + dev_put(np->dev); } -- cgit v1.2.3 From b6cd27ed33886a5ffaf0925a6d98e13e18e8a1af Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Oct 2006 15:46:51 -0700 Subject: netpoll per device txq When the netpoll beast got really busy, it tended to clog things, so it stored them for later. But the beast was putting all it's skb's in one basket. This was bad because maybe some pipes were clogged and others were not. Signed-off-by: Stephen Hemminger --- include/linux/netpoll.h | 2 ++ net/core/netpoll.c | 50 +++++++++++++++---------------------------------- 2 files changed, 17 insertions(+), 35 deletions(-) (limited to 'net/core/netpoll.c') diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index 39845fc975f..93a8b766442 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -33,6 +33,8 @@ struct netpoll_info { spinlock_t rx_lock; struct netpoll *rx_np; /* netpoll that registered an rx_hook */ struct sk_buff_head arp_tx; /* list of arp requests to reply to */ + struct sk_buff_head txq; + struct work_struct tx_work; }; void netpoll_poll(struct netpoll *np); diff --git a/net/core/netpoll.c b/net/core/netpoll.c index c66df2f45d2..ac4e8b8f57d 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -38,10 +38,6 @@ static struct sk_buff_head skb_pool; -static DEFINE_SPINLOCK(queue_lock); -static int queue_depth; -static struct sk_buff *queue_head, *queue_tail; - static atomic_t trapped; #define NETPOLL_RX_ENABLED 1 @@ -56,46 +52,25 @@ static void arp_reply(struct sk_buff *skb); static void queue_process(void *p) { - unsigned long flags; + struct netpoll_info *npinfo = p; struct sk_buff *skb; - while (queue_head) { - spin_lock_irqsave(&queue_lock, flags); - - skb = queue_head; - queue_head = skb->next; - if (skb == queue_tail) - queue_head = NULL; - - queue_depth--; - - spin_unlock_irqrestore(&queue_lock, flags); - + while ((skb = skb_dequeue(&npinfo->txq))) dev_queue_xmit(skb); - } -} -static DECLARE_WORK(send_queue, queue_process, NULL); +} void netpoll_queue(struct sk_buff *skb) { - unsigned long flags; + struct net_device *dev = skb->dev; + struct netpoll_info *npinfo = dev->npinfo; - if (queue_depth == MAX_QUEUE_DEPTH) { - __kfree_skb(skb); - return; + if (!npinfo) + kfree_skb(skb); + else { + skb_queue_tail(&npinfo->txq, skb); + schedule_work(&npinfo->tx_work); } - - spin_lock_irqsave(&queue_lock, flags); - if (!queue_head) - queue_head = skb; - else - queue_tail->next = skb; - queue_tail = skb; - queue_depth++; - spin_unlock_irqrestore(&queue_lock, flags); - - schedule_work(&send_queue); } static int checksum_udp(struct sk_buff *skb, struct udphdr *uh, @@ -658,6 +633,9 @@ int netpoll_setup(struct netpoll *np) npinfo->tries = MAX_RETRIES; spin_lock_init(&npinfo->rx_lock); skb_queue_head_init(&npinfo->arp_tx); + skb_queue_head_init(&npinfo->txq); + INIT_WORK(&npinfo->tx_work, queue_process, npinfo); + atomic_set(&npinfo->refcnt, 1); } else { npinfo = ndev->npinfo; @@ -780,6 +758,8 @@ void netpoll_cleanup(struct netpoll *np) np->dev->npinfo = NULL; if (atomic_dec_and_test(&npinfo->refcnt)) { skb_queue_purge(&npinfo->arp_tx); + skb_queue_purge(&npinfo->txq); + flush_scheduled_work(); kfree(npinfo); } -- cgit v1.2.3 From b41848b61bae30e3661efd4ec62ea380cedef687 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Oct 2006 15:46:52 -0700 Subject: netpoll setup error handling The beast was not always healthy. When it was sick, it tended to be laconic and not tell anyone the real problem. A few small changes had it telling the world about its problems, if they really wanted to hear. Signed-off-by: Stephen Hemminger --- drivers/net/netconsole.c | 7 +++++-- net/core/netpoll.c | 20 +++++++++++++------- 2 files changed, 18 insertions(+), 9 deletions(-) (limited to 'net/core/netpoll.c') diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index bf58db29e2e..e6e8a9797b6 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -102,6 +102,8 @@ __setup("netconsole=", option_setup); static int init_netconsole(void) { + int err; + if(strlen(config)) option_setup(config); @@ -110,8 +112,9 @@ static int init_netconsole(void) return 0; } - if(netpoll_setup(&np)) - return -EINVAL; + err = netpoll_setup(&np); + if (err) + return err; register_console(&netconsole); printk(KERN_INFO "netconsole: network logging started\n"); diff --git a/net/core/netpoll.c b/net/core/netpoll.c index ac4e8b8f57d..621baa5da49 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -611,20 +611,23 @@ int netpoll_setup(struct netpoll *np) struct in_device *in_dev; struct netpoll_info *npinfo; unsigned long flags; + int err; if (np->dev_name) ndev = dev_get_by_name(np->dev_name); if (!ndev) { printk(KERN_ERR "%s: %s doesn't exist, aborting.\n", np->name, np->dev_name); - return -1; + return -ENODEV; } np->dev = ndev; if (!ndev->npinfo) { npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); - if (!npinfo) + if (!npinfo) { + err = -ENOMEM; goto release; + } npinfo->rx_flags = 0; npinfo->rx_np = NULL; @@ -645,6 +648,7 @@ int netpoll_setup(struct netpoll *np) if (!ndev->poll_controller) { printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n", np->name, np->dev_name); + err = -ENOTSUPP; goto release; } @@ -655,13 +659,14 @@ int netpoll_setup(struct netpoll *np) np->name, np->dev_name); rtnl_lock(); - if (dev_change_flags(ndev, ndev->flags | IFF_UP) < 0) { + err = dev_open(ndev); + rtnl_unlock(); + + if (err) { printk(KERN_ERR "%s: failed to open %s\n", - np->name, np->dev_name); - rtnl_unlock(); + np->name, ndev->name); goto release; } - rtnl_unlock(); atleast = jiffies + HZ/10; atmost = jiffies + 4*HZ; @@ -699,6 +704,7 @@ int netpoll_setup(struct netpoll *np) rcu_read_unlock(); printk(KERN_ERR "%s: no IP address for %s, aborting\n", np->name, np->dev_name); + err = -EDESTADDRREQ; goto release; } @@ -731,7 +737,7 @@ int netpoll_setup(struct netpoll *np) kfree(npinfo); np->dev = NULL; dev_put(ndev); - return -1; + return err; } static int __init netpoll_init(void) { -- cgit v1.2.3 From 6c43ff18f91e54aa7555d8ae4f26eab7da5bce68 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Oct 2006 15:46:53 -0700 Subject: netpoll deferred transmit path When the netpoll beast got busy, he tended to babble. Instead of talking out of his large mouth as normal, he tended to try to snort out other orifices. This lead to words (skbs) ending up in odd places (like NIT) that he did not intend. The normal way of talking wouldn't work, but he could at least change to using the same tone all the time. Signed-off-by: Stephen Hemminger --- net/core/netpoll.c | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) (limited to 'net/core/netpoll.c') diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 621baa5da49..93cb828f3aa 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -55,9 +55,25 @@ static void queue_process(void *p) struct netpoll_info *npinfo = p; struct sk_buff *skb; - while ((skb = skb_dequeue(&npinfo->txq))) - dev_queue_xmit(skb); + while ((skb = skb_dequeue(&npinfo->txq))) { + struct net_device *dev = skb->dev; + if (!netif_device_present(dev) || !netif_running(dev)) { + __kfree_skb(skb); + continue; + } + + netif_tx_lock_bh(dev); + if (netif_queue_stopped(dev) || + dev->hard_start_xmit(skb, dev) != NETDEV_TX_OK) { + skb_queue_head(&npinfo->txq, skb); + netif_tx_unlock_bh(dev); + + schedule_delayed_work(&npinfo->tx_work, HZ/10); + return; + } + netif_tx_unlock_bh(dev); + } } void netpoll_queue(struct sk_buff *skb) @@ -765,6 +781,7 @@ void netpoll_cleanup(struct netpoll *np) if (atomic_dec_and_test(&npinfo->refcnt)) { skb_queue_purge(&npinfo->arp_tx); skb_queue_purge(&npinfo->txq); + cancel_rearming_delayed_work(&npinfo->tx_work); flush_scheduled_work(); kfree(npinfo); -- cgit v1.2.3 From 2bdfe0baeca0e2750037b8fba71905c00ac3c515 Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Oct 2006 15:46:54 -0700 Subject: netpoll retry cleanup The netpoll beast was still not happy. If the beast got clogged pipes, it tended to stare blankly off in space for a long time. The problem couldn't be completely fixed because the beast talked with irq's disabled. But it could be made less painful and shorter. Signed-off-by: Stephen Hemminger --- include/linux/netpoll.h | 1 - net/core/netpoll.c | 71 +++++++++++++++++++++++-------------------------- 2 files changed, 33 insertions(+), 39 deletions(-) (limited to 'net/core/netpoll.c') diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index 93a8b766442..c65d12ec7bb 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -28,7 +28,6 @@ struct netpoll_info { atomic_t refcnt; spinlock_t poll_lock; int poll_owner; - int tries; int rx_flags; spinlock_t rx_lock; struct netpoll *rx_np; /* netpoll that registered an rx_hook */ diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 93cb828f3aa..6b34c394672 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -34,12 +34,12 @@ #define MAX_UDP_CHUNK 1460 #define MAX_SKBS 32 #define MAX_QUEUE_DEPTH (MAX_SKBS / 2) -#define MAX_RETRIES 20000 static struct sk_buff_head skb_pool; static atomic_t trapped; +#define USEC_PER_POLL 50 #define NETPOLL_RX_ENABLED 1 #define NETPOLL_RX_DROP 2 @@ -72,6 +72,7 @@ static void queue_process(void *p) schedule_delayed_work(&npinfo->tx_work, HZ/10); return; } + netif_tx_unlock_bh(dev); } } @@ -244,50 +245,44 @@ repeat: static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) { - int status; - struct netpoll_info *npinfo; + int status = NETDEV_TX_BUSY; + unsigned long tries; + struct net_device *dev = np->dev; + struct netpoll_info *npinfo = np->dev->npinfo; + + if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { + __kfree_skb(skb); + return; + } + + /* don't get messages out of order, and no recursion */ + if ( !(np->drop == netpoll_queue && skb_queue_len(&npinfo->txq)) + && npinfo->poll_owner != smp_processor_id() + && netif_tx_trylock(dev)) { + + /* try until next clock tick */ + for(tries = jiffies_to_usecs(1)/USEC_PER_POLL; tries > 0; --tries) { + if (!netif_queue_stopped(dev)) + status = dev->hard_start_xmit(skb, dev); + + if (status == NETDEV_TX_OK) + break; - if (!np || !np->dev || !netif_running(np->dev)) { - __kfree_skb(skb); - return; - } + /* tickle device maybe there is some cleanup */ + netpoll_poll(np); - npinfo = np->dev->npinfo; + udelay(USEC_PER_POLL); + } + netif_tx_unlock(dev); + } - /* avoid recursion */ - if (npinfo->poll_owner == smp_processor_id() || - np->dev->xmit_lock_owner == smp_processor_id()) { + if (status != NETDEV_TX_OK) { + /* requeue for later */ if (np->drop) np->drop(skb); else __kfree_skb(skb); - return; } - - do { - npinfo->tries--; - netif_tx_lock(np->dev); - - /* - * network drivers do not expect to be called if the queue is - * stopped. - */ - status = NETDEV_TX_BUSY; - if (!netif_queue_stopped(np->dev)) - status = np->dev->hard_start_xmit(skb, np->dev); - - netif_tx_unlock(np->dev); - - /* success */ - if(!status) { - npinfo->tries = MAX_RETRIES; /* reset */ - return; - } - - /* transmit busy */ - netpoll_poll(np); - udelay(50); - } while (npinfo->tries > 0); } void netpoll_send_udp(struct netpoll *np, const char *msg, int len) @@ -649,7 +644,7 @@ int netpoll_setup(struct netpoll *np) npinfo->rx_np = NULL; spin_lock_init(&npinfo->poll_lock); npinfo->poll_owner = -1; - npinfo->tries = MAX_RETRIES; + spin_lock_init(&npinfo->rx_lock); skb_queue_head_init(&npinfo->arp_tx); skb_queue_head_init(&npinfo->txq); -- cgit v1.2.3 From 5de4a473bda49554e4e9bd93b78f43c49a7ea69c Mon Sep 17 00:00:00 2001 From: Stephen Hemminger Date: Thu, 26 Oct 2006 15:46:55 -0700 Subject: netpoll queue cleanup The beast had a long and not very happy history. At one point, a friend (netdump) had asked that he open up a little. Well, the friend was long gone now, and the beast had this dangling piece hanging (netpoll_queue). It wasn't hard to stitch the netpoll_queue back in where it belonged and make everything tidy. Signed-off-by: Stephen Hemminger --- drivers/net/netconsole.c | 1 - include/linux/netpoll.h | 4 ++-- net/core/netpoll.c | 23 +++-------------------- 3 files changed, 5 insertions(+), 23 deletions(-) (limited to 'net/core/netpoll.c') diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index e6e8a9797b6..69233f6aa05 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -60,7 +60,6 @@ static struct netpoll np = { .local_port = 6665, .remote_port = 6666, .remote_mac = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, - .drop = netpoll_queue, }; static int configured = 0; diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index c65d12ec7bb..b7eb008c43d 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -18,7 +18,7 @@ struct netpoll { struct net_device *dev; char dev_name[16], *name; void (*rx_hook)(struct netpoll *, int, char *, int); - void (*drop)(struct sk_buff *skb); + u32 local_ip, remote_ip; u16 local_port, remote_port; unsigned char local_mac[6], remote_mac[6]; @@ -44,7 +44,7 @@ int netpoll_trap(void); void netpoll_set_trap(int trap); void netpoll_cleanup(struct netpoll *np); int __netpoll_rx(struct sk_buff *skb); -void netpoll_queue(struct sk_buff *skb); + #ifdef CONFIG_NETPOLL static inline int netpoll_rx(struct sk_buff *skb) diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 6b34c394672..0d1de3c47a0 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -77,19 +77,6 @@ static void queue_process(void *p) } } -void netpoll_queue(struct sk_buff *skb) -{ - struct net_device *dev = skb->dev; - struct netpoll_info *npinfo = dev->npinfo; - - if (!npinfo) - kfree_skb(skb); - else { - skb_queue_tail(&npinfo->txq, skb); - schedule_work(&npinfo->tx_work); - } -} - static int checksum_udp(struct sk_buff *skb, struct udphdr *uh, unsigned short ulen, u32 saddr, u32 daddr) { @@ -256,7 +243,7 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) } /* don't get messages out of order, and no recursion */ - if ( !(np->drop == netpoll_queue && skb_queue_len(&npinfo->txq)) + if ( skb_queue_len(&npinfo->txq) == 0 && npinfo->poll_owner != smp_processor_id() && netif_tx_trylock(dev)) { @@ -277,11 +264,8 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) } if (status != NETDEV_TX_OK) { - /* requeue for later */ - if (np->drop) - np->drop(skb); - else - __kfree_skb(skb); + skb_queue_tail(&npinfo->txq, skb); + schedule_work(&npinfo->tx_work); } } @@ -809,4 +793,3 @@ EXPORT_SYMBOL(netpoll_setup); EXPORT_SYMBOL(netpoll_cleanup); EXPORT_SYMBOL(netpoll_send_udp); EXPORT_SYMBOL(netpoll_poll); -EXPORT_SYMBOL(netpoll_queue); -- cgit v1.2.3 From c68b907028d35b0ad5a98b5e5552f0ad56a9ba1d Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Tue, 14 Nov 2006 20:40:49 -0800 Subject: [NETPOLL]: Minor coding-style cleanups. Signed-off-by: David S. Miller --- net/core/netpoll.c | 99 ++++++++++++++++++++++++++---------------------------- 1 file changed, 48 insertions(+), 51 deletions(-) (limited to 'net/core/netpoll.c') diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 0d1de3c47a0..0746c81c57b 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -78,7 +78,7 @@ static void queue_process(void *p) } static int checksum_udp(struct sk_buff *skb, struct udphdr *uh, - unsigned short ulen, u32 saddr, u32 daddr) + unsigned short ulen, u32 saddr, u32 daddr) { unsigned int psum; @@ -144,12 +144,11 @@ static void service_arp_queue(struct netpoll_info *npi) arp_reply(skb); skb = skb_dequeue(&npi->arp_tx); } - return; } void netpoll_poll(struct netpoll *np) { - if(!np->dev || !netif_running(np->dev) || !np->dev->poll_controller) + if (!np->dev || !netif_running(np->dev) || !np->dev->poll_controller) return; /* Process pending work on NIC */ @@ -194,7 +193,7 @@ static void zap_completion_queue(void) while (clist != NULL) { struct sk_buff *skb = clist; clist = clist->next; - if(skb->destructor) + if (skb->destructor) dev_kfree_skb_any(skb); /* put this one back */ else __kfree_skb(skb); @@ -217,7 +216,7 @@ repeat: if (!skb) skb = skb_dequeue(&skb_pool); - if(!skb) { + if (!skb) { if (++count < 10) { netpoll_poll(np); goto repeat; @@ -243,12 +242,11 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) } /* don't get messages out of order, and no recursion */ - if ( skb_queue_len(&npinfo->txq) == 0 - && npinfo->poll_owner != smp_processor_id() - && netif_tx_trylock(dev)) { - + if (skb_queue_len(&npinfo->txq) == 0 && + npinfo->poll_owner != smp_processor_id() && + netif_tx_trylock(dev)) { /* try until next clock tick */ - for(tries = jiffies_to_usecs(1)/USEC_PER_POLL; tries > 0; --tries) { + for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; tries > 0; --tries) { if (!netif_queue_stopped(dev)) status = dev->hard_start_xmit(skb, dev); @@ -384,8 +382,8 @@ static void arp_reply(struct sk_buff *skb) if (np->dev->hard_header && np->dev->hard_header(send_skb, skb->dev, ptype, - np->remote_mac, np->local_mac, - send_skb->len) < 0) { + np->remote_mac, np->local_mac, + send_skb->len) < 0) { kfree_skb(send_skb); return; } @@ -423,7 +421,6 @@ int __netpoll_rx(struct sk_buff *skb) struct netpoll_info *npi = skb->dev->npinfo; struct netpoll *np = npi->rx_np; - if (!np) goto out; if (skb->dev->type != ARPHRD_ETHER) @@ -496,47 +493,47 @@ int netpoll_parse_options(struct netpoll *np, char *opt) { char *cur=opt, *delim; - if(*cur != '@') { + if (*cur != '@') { if ((delim = strchr(cur, '@')) == NULL) goto parse_failed; - *delim=0; - np->local_port=simple_strtol(cur, NULL, 10); - cur=delim; + *delim = 0; + np->local_port = simple_strtol(cur, NULL, 10); + cur = delim; } cur++; printk(KERN_INFO "%s: local port %d\n", np->name, np->local_port); - if(*cur != '/') { + if (*cur != '/') { if ((delim = strchr(cur, '/')) == NULL) goto parse_failed; - *delim=0; - np->local_ip=ntohl(in_aton(cur)); - cur=delim; + *delim = 0; + np->local_ip = ntohl(in_aton(cur)); + cur = delim; printk(KERN_INFO "%s: local IP %d.%d.%d.%d\n", np->name, HIPQUAD(np->local_ip)); } cur++; - if ( *cur != ',') { + if (*cur != ',') { /* parse out dev name */ if ((delim = strchr(cur, ',')) == NULL) goto parse_failed; - *delim=0; + *delim = 0; strlcpy(np->dev_name, cur, sizeof(np->dev_name)); - cur=delim; + cur = delim; } cur++; printk(KERN_INFO "%s: interface %s\n", np->name, np->dev_name); - if ( *cur != '@' ) { + if (*cur != '@') { /* dst port */ if ((delim = strchr(cur, '@')) == NULL) goto parse_failed; - *delim=0; - np->remote_port=simple_strtol(cur, NULL, 10); - cur=delim; + *delim = 0; + np->remote_port = simple_strtol(cur, NULL, 10); + cur = delim; } cur++; printk(KERN_INFO "%s: remote port %d\n", np->name, np->remote_port); @@ -544,42 +541,41 @@ int netpoll_parse_options(struct netpoll *np, char *opt) /* dst ip */ if ((delim = strchr(cur, '/')) == NULL) goto parse_failed; - *delim=0; - np->remote_ip=ntohl(in_aton(cur)); - cur=delim+1; + *delim = 0; + np->remote_ip = ntohl(in_aton(cur)); + cur = delim + 1; printk(KERN_INFO "%s: remote IP %d.%d.%d.%d\n", - np->name, HIPQUAD(np->remote_ip)); + np->name, HIPQUAD(np->remote_ip)); - if( *cur != 0 ) - { + if (*cur != 0) { /* MAC address */ if ((delim = strchr(cur, ':')) == NULL) goto parse_failed; - *delim=0; - np->remote_mac[0]=simple_strtol(cur, NULL, 16); - cur=delim+1; + *delim = 0; + np->remote_mac[0] = simple_strtol(cur, NULL, 16); + cur = delim + 1; if ((delim = strchr(cur, ':')) == NULL) goto parse_failed; - *delim=0; - np->remote_mac[1]=simple_strtol(cur, NULL, 16); - cur=delim+1; + *delim = 0; + np->remote_mac[1] = simple_strtol(cur, NULL, 16); + cur = delim + 1; if ((delim = strchr(cur, ':')) == NULL) goto parse_failed; - *delim=0; - np->remote_mac[2]=simple_strtol(cur, NULL, 16); - cur=delim+1; + *delim = 0; + np->remote_mac[2] = simple_strtol(cur, NULL, 16); + cur = delim + 1; if ((delim = strchr(cur, ':')) == NULL) goto parse_failed; - *delim=0; - np->remote_mac[3]=simple_strtol(cur, NULL, 16); - cur=delim+1; + *delim = 0; + np->remote_mac[3] = simple_strtol(cur, NULL, 16); + cur = delim + 1; if ((delim = strchr(cur, ':')) == NULL) goto parse_failed; - *delim=0; - np->remote_mac[4]=simple_strtol(cur, NULL, 16); - cur=delim+1; - np->remote_mac[5]=simple_strtol(cur, NULL, 16); + *delim = 0; + np->remote_mac[4] = simple_strtol(cur, NULL, 16); + cur = delim + 1; + np->remote_mac[5] = simple_strtol(cur, NULL, 16); } printk(KERN_INFO "%s: remote ethernet address " @@ -735,7 +731,8 @@ int netpoll_setup(struct netpoll *np) return err; } -static int __init netpoll_init(void) { +static int __init netpoll_init(void) +{ skb_queue_head_init(&skb_pool); return 0; } -- cgit v1.2.3 From 252e33467a3b016f20dd8df12269cef3b167f21e Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 14 Nov 2006 20:48:11 -0800 Subject: [NET] net/core: Annotations. Signed-off-by: Al Viro Signed-off-by: David S. Miller --- net/core/dev.c | 4 ++-- net/core/filter.c | 6 ++--- net/core/netpoll.c | 2 +- net/core/pktgen.c | 68 +++++++++++++++++++++++++----------------------------- 4 files changed, 38 insertions(+), 42 deletions(-) (limited to 'net/core/netpoll.c') diff --git a/net/core/dev.c b/net/core/dev.c index 5bf13b132dd..a7be106d0fd 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1215,7 +1215,7 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features) { struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); struct packet_type *ptype; - int type = skb->protocol; + __be16 type = skb->protocol; int err; BUG_ON(skb_shinfo(skb)->frag_list); @@ -1766,7 +1766,7 @@ int netif_receive_skb(struct sk_buff *skb) struct packet_type *ptype, *pt_prev; struct net_device *orig_dev; int ret = NET_RX_DROP; - unsigned short type; + __be16 type; /* if we've gotten here through NAPI, check netpoll */ if (skb->dev->poll && netpoll_rx(skb)) diff --git a/net/core/filter.c b/net/core/filter.c index 6732782a5a4..0df843b667f 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -178,7 +178,7 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int load_w: ptr = load_pointer(skb, k, 4, &tmp); if (ptr != NULL) { - A = ntohl(get_unaligned((u32 *)ptr)); + A = ntohl(get_unaligned((__be32 *)ptr)); continue; } break; @@ -187,7 +187,7 @@ load_w: load_h: ptr = load_pointer(skb, k, 2, &tmp); if (ptr != NULL) { - A = ntohs(get_unaligned((u16 *)ptr)); + A = ntohs(get_unaligned((__be16 *)ptr)); continue; } break; @@ -261,7 +261,7 @@ load_b: */ switch (k-SKF_AD_OFF) { case SKF_AD_PROTOCOL: - A = htons(skb->protocol); + A = ntohs(skb->protocol); continue; case SKF_AD_PKTTYPE: A = skb->pkt_type; diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 0746c81c57b..523141ee920 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -330,7 +330,7 @@ static void arp_reply(struct sk_buff *skb) struct arphdr *arp; unsigned char *arp_ptr; int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; - u32 sip, tip; + __be32 sip, tip; struct sk_buff *send_skb; struct netpoll *np = NULL; diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 733d86d0a4f..1897a3a385d 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -207,7 +207,7 @@ static struct proc_dir_entry *pg_proc_dir = NULL; #define SVLAN_TAG_SIZE(x) ((x)->svlan_id == 0xffff ? 0 : 4) struct flow_state { - __u32 cur_daddr; + __be32 cur_daddr; int count; }; @@ -282,10 +282,10 @@ struct pktgen_dev { /* If we're doing ranges, random or incremental, then this * defines the min/max for those ranges. */ - __u32 saddr_min; /* inclusive, source IP address */ - __u32 saddr_max; /* exclusive, source IP address */ - __u32 daddr_min; /* inclusive, dest IP address */ - __u32 daddr_max; /* exclusive, dest IP address */ + __be32 saddr_min; /* inclusive, source IP address */ + __be32 saddr_max; /* exclusive, source IP address */ + __be32 daddr_min; /* inclusive, dest IP address */ + __be32 daddr_max; /* exclusive, dest IP address */ __u16 udp_src_min; /* inclusive, source UDP port */ __u16 udp_src_max; /* exclusive, source UDP port */ @@ -317,8 +317,8 @@ struct pktgen_dev { __u32 cur_dst_mac_offset; __u32 cur_src_mac_offset; - __u32 cur_saddr; - __u32 cur_daddr; + __be32 cur_saddr; + __be32 cur_daddr; __u16 cur_udp_dst; __u16 cur_udp_src; __u32 cur_pkt_size; @@ -350,10 +350,10 @@ struct pktgen_dev { }; struct pktgen_hdr { - __u32 pgh_magic; - __u32 seq_num; - __u32 tv_sec; - __u32 tv_usec; + __be32 pgh_magic; + __be32 seq_num; + __be32 tv_sec; + __be32 tv_usec; }; struct pktgen_thread { @@ -2160,7 +2160,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) for(i = 0; i < pkt_dev->nr_labels; i++) if (pkt_dev->labels[i] & MPLS_STACK_BOTTOM) pkt_dev->labels[i] = MPLS_STACK_BOTTOM | - (pktgen_random() & + ((__force __be32)pktgen_random() & htonl(0x000fffff)); } @@ -2220,29 +2220,25 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) if (pkt_dev->cflows && pkt_dev->flows[flow].count != 0) { pkt_dev->cur_daddr = pkt_dev->flows[flow].cur_daddr; } else { - - if ((imn = ntohl(pkt_dev->daddr_min)) < (imx = - ntohl(pkt_dev-> - daddr_max))) - { + imn = ntohl(pkt_dev->daddr_min); + imx = ntohl(pkt_dev->daddr_max); + if (imn < imx) { __u32 t; + __be32 s; if (pkt_dev->flags & F_IPDST_RND) { - t = ((pktgen_random() % (imx - imn)) + - imn); - t = htonl(t); + t = pktgen_random() % (imx - imn) + imn; + s = htonl(t); - while (LOOPBACK(t) || MULTICAST(t) - || BADCLASS(t) || ZERONET(t) - || LOCAL_MCAST(t)) { - t = ((pktgen_random() % - (imx - imn)) + imn); - t = htonl(t); + while (LOOPBACK(s) || MULTICAST(s) + || BADCLASS(s) || ZERONET(s) + || LOCAL_MCAST(s)) { + t = (pktgen_random() % + (imx - imn)) + imn; + s = htonl(t); } - pkt_dev->cur_daddr = t; - } - - else { + pkt_dev->cur_daddr = s; + } else { t = ntohl(pkt_dev->cur_daddr); t++; if (t > imx) { @@ -2270,7 +2266,7 @@ static void mod_cur_headers(struct pktgen_dev *pkt_dev) for (i = 0; i < 4; i++) { pkt_dev->cur_in6_daddr.s6_addr32[i] = - ((pktgen_random() | + (((__force __be32)pktgen_random() | pkt_dev->min_in6_daddr.s6_addr32[i]) & pkt_dev->max_in6_daddr.s6_addr32[i]); } @@ -2377,7 +2373,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr)); memcpy(eth, pkt_dev->hh, 12); - *(u16 *) & eth[12] = protocol; + *(__be16 *) & eth[12] = protocol; /* Eth + IPh + UDPh + mpls */ datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 - @@ -2497,7 +2493,7 @@ static unsigned int scan_ip6(const char *s, char ip[16]) char suffix[16]; unsigned int prefixlen = 0; unsigned int suffixlen = 0; - __u32 tmp; + __be32 tmp; for (i = 0; i < 16; i++) ip[i] = 0; @@ -2713,7 +2709,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, udph = (struct udphdr *)skb_put(skb, sizeof(struct udphdr)); memcpy(eth, pkt_dev->hh, 12); - *(u16 *) & eth[12] = protocol; + *(__be16 *) & eth[12] = protocol; /* Eth + IPh + UDPh + mpls */ datalen = pkt_dev->cur_pkt_size - 14 - @@ -2732,11 +2728,11 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, udph->len = htons(datalen + sizeof(struct udphdr)); udph->check = 0; /* No checksum */ - *(u32 *) iph = __constant_htonl(0x60000000); /* Version + flow */ + *(__be32 *) iph = __constant_htonl(0x60000000); /* Version + flow */ if (pkt_dev->traffic_class) { /* Version + traffic class + flow (0) */ - *(u32 *)iph |= htonl(0x60000000 | (pkt_dev->traffic_class << 20)); + *(__be32 *)iph |= htonl(0x60000000 | (pkt_dev->traffic_class << 20)); } iph->hop_limit = 32; -- cgit v1.2.3 From d3bc23e7ee9db8023dff5a86bb3b0069ed018789 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 14 Nov 2006 21:24:49 -0800 Subject: [NET]: Annotate callers of csum_fold() in net/* Signed-off-by: Al Viro Signed-off-by: David S. Miller --- net/core/datagram.c | 8 ++++---- net/core/dev.c | 4 ++-- net/core/netpoll.c | 2 +- net/core/skbuff.c | 4 ++-- net/ipv4/icmp.c | 4 ++-- net/ipv4/igmp.c | 2 +- net/ipv4/ip_gre.c | 4 ++-- net/ipv4/ip_output.c | 2 +- net/ipv4/ipmr.c | 2 +- net/ipv4/ipvs/ip_vs_core.c | 2 +- net/ipv4/netfilter.c | 2 +- net/ipv4/tcp.c | 8 +++++--- net/sunrpc/socklib.c | 2 +- 13 files changed, 24 insertions(+), 22 deletions(-) (limited to 'net/core/netpoll.c') diff --git a/net/core/datagram.c b/net/core/datagram.c index f558c61aecc..e5a05a046fe 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -413,9 +413,9 @@ fault: unsigned int __skb_checksum_complete(struct sk_buff *skb) { - unsigned int sum; + __sum16 sum; - sum = (u16)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum)); + sum = csum_fold(skb_checksum(skb, 0, skb->len, skb->csum)); if (likely(!sum)) { if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) netdev_rx_csum_fault(skb->dev); @@ -441,7 +441,7 @@ EXPORT_SYMBOL(__skb_checksum_complete); int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, int hlen, struct iovec *iov) { - unsigned int csum; + __wsum csum; int chunk = skb->len - hlen; /* Skip filled elements. @@ -460,7 +460,7 @@ int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb, if (skb_copy_and_csum_datagram(skb, hlen, iov->iov_base, chunk, &csum)) goto fault; - if ((unsigned short)csum_fold(csum)) + if (csum_fold(csum)) goto csum_error; if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) netdev_rx_csum_fault(skb->dev); diff --git a/net/core/dev.c b/net/core/dev.c index a7be106d0fd..1a36b17f4b5 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1169,7 +1169,7 @@ EXPORT_SYMBOL(netif_device_attach); */ int skb_checksum_help(struct sk_buff *skb) { - unsigned int csum; + __wsum csum; int ret = 0, offset = skb->h.raw - skb->data; if (skb->ip_summed == CHECKSUM_COMPLETE) @@ -1193,7 +1193,7 @@ int skb_checksum_help(struct sk_buff *skb) BUG_ON(offset <= 0); BUG_ON(skb->csum + 2 > offset); - *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum); + *(__sum16*)(skb->h.raw + skb->csum) = csum_fold(csum); out_set_summed: skb->ip_summed = CHECKSUM_NONE; diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 523141ee920..edd3246873e 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -88,7 +88,7 @@ static int checksum_udp(struct sk_buff *skb, struct udphdr *uh, psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0); if (skb->ip_summed == CHECKSUM_COMPLETE && - !(u16)csum_fold(csum_add(psum, skb->csum))) + !csum_fold(csum_add(psum, skb->csum))) return 0; skb->csum = psum; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index b3dea1ef953..dfa02cc8d68 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1396,7 +1396,7 @@ unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) { - unsigned int csum; + __wsum csum; long csstart; if (skb->ip_summed == CHECKSUM_PARTIAL) @@ -1416,7 +1416,7 @@ void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to) if (skb->ip_summed == CHECKSUM_PARTIAL) { long csstuff = csstart + skb->csum; - *((unsigned short *)(to + csstuff)) = csum_fold(csum); + *((__sum16 *)(to + csstuff)) = csum_fold(csum); } } diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index b39a37a4754..cb9da0842b7 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -356,7 +356,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param, ip_flush_pending_frames(icmp_socket->sk); else if ((skb = skb_peek(&icmp_socket->sk->sk_write_queue)) != NULL) { struct icmphdr *icmph = skb->h.icmph; - unsigned int csum = 0; + __wsum csum = 0; struct sk_buff *skb1; skb_queue_walk(&icmp_socket->sk->sk_write_queue, skb1) { @@ -931,7 +931,7 @@ int icmp_rcv(struct sk_buff *skb) switch (skb->ip_summed) { case CHECKSUM_COMPLETE: - if (!(u16)csum_fold(skb->csum)) + if (!csum_fold(skb->csum)) break; /* fall through */ case CHECKSUM_NONE: diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 6eee71647b7..0017ccb01d6 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -932,7 +932,7 @@ int igmp_rcv(struct sk_buff *skb) switch (skb->ip_summed) { case CHECKSUM_COMPLETE: - if (!(u16)csum_fold(skb->csum)) + if (!csum_fold(skb->csum)) break; /* fall through */ case CHECKSUM_NONE: diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 25221146deb..2bf54adee8c 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -557,7 +557,7 @@ static int ipgre_rcv(struct sk_buff *skb) struct iphdr *iph; u8 *h; __be16 flags; - u16 csum = 0; + __sum16 csum = 0; __be32 key = 0; u32 seqno = 0; struct ip_tunnel *tunnel; @@ -580,7 +580,7 @@ static int ipgre_rcv(struct sk_buff *skb) if (flags&GRE_CSUM) { switch (skb->ip_summed) { case CHECKSUM_COMPLETE: - csum = (u16)csum_fold(skb->csum); + csum = csum_fold(skb->csum); if (!csum) break; /* fall through */ diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 90942a384a4..5f3e35c0363 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1384,7 +1384,7 @@ void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *ar &ipc, rt, MSG_DONTWAIT); if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) { if (arg->csumoffset >= 0) - *((u16 *)skb->h.raw + arg->csumoffset) = csum_fold(csum_add(skb->csum, arg->csum)); + *((__sum16 *)skb->h.raw + arg->csumoffset) = csum_fold(csum_add(skb->csum, arg->csum)); skb->ip_summed = CHECKSUM_NONE; ip_push_pending_frames(sk); } diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 97cfa97c8ab..efcf45ecc81 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -1493,7 +1493,7 @@ static int pim_rcv(struct sk_buff * skb) if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) || (pim->flags&PIM_NULL_REGISTER) || (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 && - (u16)csum_fold(skb_checksum(skb, 0, skb->len, 0)))) + csum_fold(skb_checksum(skb, 0, skb->len, 0)))) goto drop; /* check if the inner packet is destined to mcast group */ diff --git a/net/ipv4/ipvs/ip_vs_core.c b/net/ipv4/ipvs/ip_vs_core.c index 1445bb47fea..fac2dffd667 100644 --- a/net/ipv4/ipvs/ip_vs_core.c +++ b/net/ipv4/ipvs/ip_vs_core.c @@ -538,7 +538,7 @@ static unsigned int ip_vs_post_routing(unsigned int hooknum, u16 ip_vs_checksum_complete(struct sk_buff *skb, int offset) { - return (u16) csum_fold(skb_checksum(skb, offset, skb->len - offset, 0)); + return csum_fold(skb_checksum(skb, offset, skb->len - offset, 0)); } static inline struct sk_buff * diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index e49441ac357..b797a37c01c 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c @@ -172,7 +172,7 @@ unsigned int nf_ip_checksum(struct sk_buff *skb, unsigned int hook, case CHECKSUM_COMPLETE: if (hook != NF_IP_PRE_ROUTING && hook != NF_IP_LOCAL_IN) break; - if ((protocol == 0 && !(u16)csum_fold(skb->csum)) || + if ((protocol == 0 && !csum_fold(skb->csum)) || !csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len - dataoff, protocol, skb->csum)) { diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index dadef867a3b..168f9de906b 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2162,7 +2162,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) struct tcphdr *th; unsigned thlen; unsigned int seq; - unsigned int delta; + __be32 delta; unsigned int oldlen; unsigned int len; @@ -2215,7 +2215,8 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) do { th->fin = th->psh = 0; - th->check = ~csum_fold(th->check + delta); + th->check = ~csum_fold((__force __wsum)((__force u32)th->check + + (__force u32)delta)); if (skb->ip_summed != CHECKSUM_PARTIAL) th->check = csum_fold(csum_partial(skb->h.raw, thlen, skb->csum)); @@ -2229,7 +2230,8 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features) } while (skb->next); delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len); - th->check = ~csum_fold(th->check + delta); + th->check = ~csum_fold((__force __wsum)((__force u32)th->check + + (__force u32)delta)); if (skb->ip_summed != CHECKSUM_PARTIAL) th->check = csum_fold(csum_partial(skb->h.raw, thlen, skb->csum)); diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c index 6f17527b9e6..61a038fc30c 100644 --- a/net/sunrpc/socklib.c +++ b/net/sunrpc/socklib.c @@ -166,7 +166,7 @@ int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) } if (desc.count) return -1; - if ((unsigned short)csum_fold(desc.csum)) + if (csum_fold(desc.csum)) return -1; if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE)) netdev_rx_csum_fault(skb->dev); -- cgit v1.2.3 From d6f5493c1a42b1203e5b0cb0507afd00bb2927bf Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 14 Nov 2006 21:26:08 -0800 Subject: [NET]: Annotate callers of csum_tcpudp_nofold() in net/* Signed-off-by: Al Viro Signed-off-by: David S. Miller --- include/net/ip.h | 2 +- net/core/netpoll.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'net/core/netpoll.c') diff --git a/include/net/ip.h b/include/net/ip.h index 412e8114667..eafbcaf728f 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -135,7 +135,7 @@ static inline void ip_tr_mc_map(__be32 addr, char *buf) struct ip_reply_arg { struct kvec iov[1]; - u32 csum; + __wsum csum; int csumoffset; /* u16 offset of csum in iov[0].iov_base */ /* -1 if not needed */ }; diff --git a/net/core/netpoll.c b/net/core/netpoll.c index edd3246873e..26ee1791aa0 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -78,9 +78,9 @@ static void queue_process(void *p) } static int checksum_udp(struct sk_buff *skb, struct udphdr *uh, - unsigned short ulen, u32 saddr, u32 daddr) + unsigned short ulen, __be32 saddr, __be32 daddr) { - unsigned int psum; + __wsum psum; if (uh->check == 0 || skb->ip_summed == CHECKSUM_UNNECESSARY) return 0; -- cgit v1.2.3 From b51655b958dfb1176bfcf99466231fdbef8751ff Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 14 Nov 2006 21:40:42 -0800 Subject: [NET]: Annotate __skb_checksum_complete() and friends. Signed-off-by: Al Viro Signed-off-by: David S. Miller --- include/linux/netfilter.h | 6 +++--- include/linux/netfilter_ipv4.h | 2 +- include/linux/netfilter_ipv6.h | 2 +- include/linux/skbuff.h | 2 +- include/net/tcp.h | 2 +- include/net/udp.h | 8 ++++---- net/core/datagram.c | 2 +- net/core/netpoll.c | 4 ++-- net/ipv4/netfilter.c | 4 ++-- net/ipv4/tcp_input.c | 4 ++-- net/ipv4/tcp_ipv4.c | 2 +- net/ipv6/netfilter.c | 4 ++-- net/ipv6/tcp_ipv6.c | 2 +- 13 files changed, 22 insertions(+), 22 deletions(-) (limited to 'net/core/netpoll.c') diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index b7e67d1d438..707bb2e53c4 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h @@ -290,7 +290,7 @@ extern u_int16_t nf_proto_csum_update(struct sk_buff *skb, struct nf_afinfo { unsigned short family; - unsigned int (*checksum)(struct sk_buff *skb, unsigned int hook, + __sum16 (*checksum)(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol); void (*saveroute)(const struct sk_buff *skb, struct nf_info *info); @@ -305,12 +305,12 @@ static inline struct nf_afinfo *nf_get_afinfo(unsigned short family) return rcu_dereference(nf_afinfo[family]); } -static inline unsigned int +static inline __sum16 nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol, unsigned short family) { struct nf_afinfo *afinfo; - unsigned int csum = 0; + __sum16 csum = 0; rcu_read_lock(); afinfo = nf_get_afinfo(family); diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h index 5b63a231a76..5821eb5a0a3 100644 --- a/include/linux/netfilter_ipv4.h +++ b/include/linux/netfilter_ipv4.h @@ -79,7 +79,7 @@ enum nf_ip_hook_priorities { #ifdef __KERNEL__ extern int ip_route_me_harder(struct sk_buff **pskb, unsigned addr_type); extern int ip_xfrm_me_harder(struct sk_buff **pskb); -extern unsigned int nf_ip_checksum(struct sk_buff *skb, unsigned int hook, +extern __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol); #endif /*__KERNEL__*/ diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index d97e268cdfe..ab81a6dc94e 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h @@ -74,7 +74,7 @@ enum nf_ip6_hook_priorities { #ifdef CONFIG_NETFILTER extern int ip6_route_me_harder(struct sk_buff *skb); -extern unsigned int nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, +extern __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol); extern int ipv6_netfilter_init(void); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 874ca029fbb..41753667541 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1398,7 +1398,7 @@ static inline void skb_set_timestamp(struct sk_buff *skb, const struct timeval * extern void __net_timestamp(struct sk_buff *skb); -extern unsigned int __skb_checksum_complete(struct sk_buff *skb); +extern __sum16 __skb_checksum_complete(struct sk_buff *skb); /** * skb_checksum_complete - Calculate checksum of an entire packet diff --git a/include/net/tcp.h b/include/net/tcp.h index 826aaecdb99..aa7989c5379 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -814,7 +814,7 @@ static inline __sum16 tcp_v4_check(struct tcphdr *th, int len, return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base); } -static inline int __tcp_checksum_complete(struct sk_buff *skb) +static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb) { return __skb_checksum_complete(skb); } diff --git a/include/net/udp.h b/include/net/udp.h index 39e825a6909..c5ccd9a3387 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -69,15 +69,15 @@ struct sk_buff; /* * Generic checksumming routines for UDP(-Lite) v4 and v6 */ -static inline u16 __udp_lib_checksum_complete(struct sk_buff *skb) +static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb) { if (! UDP_SKB_CB(skb)->partial_cov) return __skb_checksum_complete(skb); - return csum_fold(skb_checksum(skb, 0, UDP_SKB_CB(skb)->cscov, - skb->csum)); + return csum_fold(skb_checksum(skb, 0, UDP_SKB_CB(skb)->cscov, + skb->csum)); } -static __inline__ int udp_lib_checksum_complete(struct sk_buff *skb) +static inline __sum16 udp_lib_checksum_complete(struct sk_buff *skb) { return skb->ip_summed != CHECKSUM_UNNECESSARY && __udp_lib_checksum_complete(skb); diff --git a/net/core/datagram.c b/net/core/datagram.c index 0d9c9bac400..797fdd4352c 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -411,7 +411,7 @@ fault: return -EFAULT; } -unsigned int __skb_checksum_complete(struct sk_buff *skb) +__sum16 __skb_checksum_complete(struct sk_buff *skb) { __sum16 sum; diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 26ee1791aa0..8be3681d3d8 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -77,8 +77,8 @@ static void queue_process(void *p) } } -static int checksum_udp(struct sk_buff *skb, struct udphdr *uh, - unsigned short ulen, __be32 saddr, __be32 daddr) +static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh, + unsigned short ulen, __be32 saddr, __be32 daddr) { __wsum psum; diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c index b797a37c01c..a68966059b5 100644 --- a/net/ipv4/netfilter.c +++ b/net/ipv4/netfilter.c @@ -162,11 +162,11 @@ static int nf_ip_reroute(struct sk_buff **pskb, const struct nf_info *info) return 0; } -unsigned int nf_ip_checksum(struct sk_buff *skb, unsigned int hook, +__sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol) { struct iphdr *iph = skb->nh.iph; - unsigned int csum = 0; + __sum16 csum = 0; switch (skb->ip_summed) { case CHECKSUM_COMPLETE: diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 6ab3423674b..9304034c0c4 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3790,9 +3790,9 @@ static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) return err; } -static int __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb) +static __sum16 __tcp_checksum_complete_user(struct sock *sk, struct sk_buff *skb) { - int result; + __sum16 result; if (sock_owned_by_user(sk)) { local_bh_enable(); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 0ca8dead03b..dadf8027241 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -1544,7 +1544,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb) return sk; } -static int tcp_v4_checksum_init(struct sk_buff *skb) +static __sum16 tcp_v4_checksum_init(struct sk_buff *skb) { if (skb->ip_summed == CHECKSUM_COMPLETE) { if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr, diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index 8d1b542806c..f6294e5bcb3 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c @@ -80,11 +80,11 @@ static int nf_ip6_reroute(struct sk_buff **pskb, const struct nf_info *info) return 0; } -unsigned int nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, +__sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol) { struct ipv6hdr *ip6h = skb->nh.ipv6h; - unsigned int csum = 0; + __sum16 csum = 0; switch (skb->ip_summed) { case CHECKSUM_COMPLETE: diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 394bc54c5c2..147ce499f50 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1527,7 +1527,7 @@ out: return NULL; } -static int tcp_v6_checksum_init(struct sk_buff *skb) +static __sum16 tcp_v6_checksum_init(struct sk_buff *skb) { if (skb->ip_summed == CHECKSUM_COMPLETE) { if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr, -- cgit v1.2.3 From 5e57dff2c49956e24f27f9b6d2ec0db967680c39 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 20 Nov 2006 18:08:13 -0800 Subject: [NETPOLL]: Another udp checksum mangling. Signed-off-by: Al Viro Signed-off-by: David S. Miller --- net/core/netpoll.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'net/core/netpoll.c') diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 8be3681d3d8..3c58846fcaa 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -296,7 +296,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len) udp_len, IPPROTO_UDP, csum_partial((unsigned char *)udph, udp_len, 0)); if (udph->check == 0) - udph->check = -1; + udph->check = CSUM_MANGLED_0; skb->nh.iph = iph = (struct iphdr *)skb_push(skb, sizeof(*iph)); -- cgit v1.2.3