summaryrefslogtreecommitdiff
path: root/net/core
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@vyatta.com>2009-08-27 13:55:08 +0000
committerDavid S. Miller <davem@davemloft.net>2009-08-28 23:31:47 -0700
commit475ac1e4099a005e1307c416df19f2100b7a838d (patch)
tree433b73676a18606fb871ef385d4423691f509f11 /net/core
parent648fda7404630ba85ce462ee1279e1bc027ad915 (diff)
pktgen: change inlining
Don't force inlining where not needed. Gcc does better job of deciding to inline local functions. Signed-off-by: Stephen Hemminger <shemminger@vyatta.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/pktgen.c15
1 files changed, 7 insertions, 8 deletions
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index bb4631329b0..f96e9f09079 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -2438,7 +2438,7 @@ error:
return err;
}
-static inline void free_SAs(struct pktgen_dev *pkt_dev)
+static void free_SAs(struct pktgen_dev *pkt_dev)
{
if (pkt_dev->cflows) {
/* let go of the SAs if we have them */
@@ -2453,7 +2453,7 @@ static inline void free_SAs(struct pktgen_dev *pkt_dev)
}
}
-static inline int process_ipsec(struct pktgen_dev *pkt_dev,
+static int process_ipsec(struct pktgen_dev *pkt_dev,
struct sk_buff *skb, __be16 protocol)
{
if (pkt_dev->flags & F_IPSEC_ON) {
@@ -3029,8 +3029,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev,
return skb;
}
-static inline struct sk_buff *fill_packet(struct net_device *odev,
- struct pktgen_dev *pkt_dev)
+static struct sk_buff *fill_packet(struct net_device *odev,
+ struct pktgen_dev *pkt_dev)
{
if (pkt_dev->flags & F_IPV6)
return fill_packet_ipv6(odev, pkt_dev);
@@ -3341,13 +3341,12 @@ static void pktgen_rem_thread(struct pktgen_thread *t)
mutex_unlock(&pktgen_thread_lock);
}
-static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
+static void pktgen_xmit(struct pktgen_dev *pkt_dev)
{
struct net_device *odev = pkt_dev->odev;
int (*xmit)(struct sk_buff *, struct net_device *)
= odev->netdev_ops->ndo_start_xmit;
struct netdev_queue *txq;
- __u64 idle_start = 0;
u16 queue_map;
int ret;
@@ -3379,7 +3378,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
if (netif_tx_queue_stopped(txq) ||
netif_tx_queue_frozen(txq) ||
need_resched()) {
- idle_start = getCurUs();
+ u64 idle_start = getCurUs();
if (!netif_running(odev)) {
pktgen_stop_device(pkt_dev);
@@ -3475,7 +3474,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
/* If pkt_dev->count is zero, then run forever */
if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
if (atomic_read(&(pkt_dev->skb->users)) != 1) {
- idle_start = getCurUs();
+ u64 idle_start = getCurUs();
while (atomic_read(&(pkt_dev->skb->users)) != 1) {
if (signal_pending(current)) {
break;