summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorDavid Woodhouse <David.Woodhouse@intel.com>2008-09-01 11:32:13 +0100
committerDavid Woodhouse <David.Woodhouse@intel.com>2008-09-01 11:32:13 +0100
commit9d7548d4ca3c52ecb58f098a32b0756cdf8f96ee (patch)
tree651f7058bbaa2d8b2855286380d614afcf505118 /net
parent31db6e9ea1dbdcf66b8227b4f7035dee1b1dd8c0 (diff)
parentbef69ea0dcce574a425feb0a5aa4c63dd108b9a6 (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/af_bluetooth.c2
-rw-r--r--net/bluetooth/bnep/core.c2
-rw-r--r--net/bluetooth/hci_sysfs.c376
-rw-r--r--net/bluetooth/l2cap.c2
-rw-r--r--net/bluetooth/rfcomm/core.c2
-rw-r--r--net/bluetooth/sco.c2
-rw-r--r--net/bridge/br_device.c15
-rw-r--r--net/core/datagram.c87
-rw-r--r--net/core/dev.c49
-rw-r--r--net/core/pktgen.c29
-rw-r--r--net/core/skbuff.c12
-rw-r--r--net/dccp/input.c12
-rw-r--r--net/dccp/proto.c5
-rw-r--r--net/ipv4/icmp.c22
-rw-r--r--net/ipv4/igmp.c71
-rw-r--r--net/ipv4/ipvs/ip_vs_app.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_conn.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_ctl.c27
-rw-r--r--net/ipv4/ipvs/ip_vs_dh.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_est.c116
-rw-r--r--net/ipv4/ipvs/ip_vs_lblc.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_lblcr.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_lc.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_nq.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_proto.c4
-rw-r--r--net/ipv4/ipvs/ip_vs_rr.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_sched.c4
-rw-r--r--net/ipv4/ipvs/ip_vs_sed.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_sh.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_sync.c4
-rw-r--r--net/ipv4/ipvs/ip_vs_wlc.c2
-rw-r--r--net/ipv4/ipvs/ip_vs_wrr.c2
-rw-r--r--net/ipv4/netfilter/ipt_addrtype.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_proto_common.c8
-rw-r--r--net/ipv4/route.c100
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv6/addrconf.c4
-rw-r--r--net/ipv6/fib6_rules.c3
-rw-r--r--net/ipv6/icmp.c23
-rw-r--r--net/ipv6/ip6_fib.c1
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/ipv6_sockglue.c4
-rw-r--r--net/ipv6/ndisc.c2
-rw-r--r--net/ipv6/route.c13
-rw-r--r--net/ipv6/sysctl_net_ipv6.c2
-rw-r--r--net/ipv6/udp.c6
-rw-r--r--net/ipv6/xfrm6_policy.c4
-rw-r--r--net/mac80211/debugfs_netdev.c24
-rw-r--r--net/mac80211/ieee80211_i.h6
-rw-r--r--net/mac80211/mesh.c2
-rw-r--r--net/mac80211/mlme.c54
-rw-r--r--net/netfilter/nf_conntrack_netlink.c36
-rw-r--r--net/rfkill/rfkill.c16
-rw-r--r--net/rxrpc/ar-accept.c2
-rw-r--r--net/sched/act_api.c13
-rw-r--r--net/sched/cls_api.c2
-rw-r--r--net/sched/sch_api.c131
-rw-r--r--net/sched/sch_cbq.c10
-rw-r--r--net/sched/sch_generic.c76
-rw-r--r--net/sched/sch_hfsc.c4
-rw-r--r--net/sched/sch_htb.c11
-rw-r--r--net/sched/sch_prio.c4
-rw-r--r--net/sched/sch_tbf.c11
-rw-r--r--net/sctp/auth.c7
-rw-r--r--net/sctp/endpointola.c4
-rw-r--r--net/sctp/ipv6.c3
-rw-r--r--net/sctp/socket.c96
-rw-r--r--net/tipc/subscr.c2
-rw-r--r--net/wireless/wext.c1
-rw-r--r--net/xfrm/xfrm_output.c5
71 files changed, 911 insertions, 662 deletions
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 4e59df5f8e0..1edfdf4c095 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -456,7 +456,7 @@ static void __exit bt_exit(void)
subsys_initcall(bt_init);
module_exit(bt_exit);
-MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
+MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth Core ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 12bba6207a8..80ba30cf4b6 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -736,7 +736,7 @@ MODULE_PARM_DESC(compress_src, "Compress sources headers");
module_param(compress_dst, bool, 0644);
MODULE_PARM_DESC(compress_dst, "Compress destination headers");
-MODULE_AUTHOR("David Libault <david.libault@inventel.fr>, Maxim Krasnyansky <maxk@qualcomm.com>");
+MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth BNEP ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c
index c85bf8f678d..f4f6615cad9 100644
--- a/net/bluetooth/hci_sysfs.c
+++ b/net/bluetooth/hci_sysfs.c
@@ -3,8 +3,6 @@
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/platform_device.h>
-
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -12,10 +10,164 @@
#undef BT_DBG
#define BT_DBG(D...)
#endif
+
+struct class *bt_class = NULL;
+EXPORT_SYMBOL_GPL(bt_class);
+
static struct workqueue_struct *btaddconn;
static struct workqueue_struct *btdelconn;
-static inline char *typetostr(int type)
+static inline char *link_typetostr(int type)
+{
+ switch (type) {
+ case ACL_LINK:
+ return "ACL";
+ case SCO_LINK:
+ return "SCO";
+ case ESCO_LINK:
+ return "eSCO";
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static ssize_t show_link_type(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct hci_conn *conn = dev_get_drvdata(dev);
+ return sprintf(buf, "%s\n", link_typetostr(conn->type));
+}
+
+static ssize_t show_link_address(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct hci_conn *conn = dev_get_drvdata(dev);
+ bdaddr_t bdaddr;
+ baswap(&bdaddr, &conn->dst);
+ return sprintf(buf, "%s\n", batostr(&bdaddr));
+}
+
+static ssize_t show_link_features(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct hci_conn *conn = dev_get_drvdata(dev);
+
+ return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+ conn->features[0], conn->features[1],
+ conn->features[2], conn->features[3],
+ conn->features[4], conn->features[5],
+ conn->features[6], conn->features[7]);
+}
+
+#define LINK_ATTR(_name,_mode,_show,_store) \
+struct device_attribute link_attr_##_name = __ATTR(_name,_mode,_show,_store)
+
+static LINK_ATTR(type, S_IRUGO, show_link_type, NULL);
+static LINK_ATTR(address, S_IRUGO, show_link_address, NULL);
+static LINK_ATTR(features, S_IRUGO, show_link_features, NULL);
+
+static struct attribute *bt_link_attrs[] = {
+ &link_attr_type.attr,
+ &link_attr_address.attr,
+ &link_attr_features.attr,
+ NULL
+};
+
+static struct attribute_group bt_link_group = {
+ .attrs = bt_link_attrs,
+};
+
+static struct attribute_group *bt_link_groups[] = {
+ &bt_link_group,
+ NULL
+};
+
+static void bt_link_release(struct device *dev)
+{
+ void *data = dev_get_drvdata(dev);
+ kfree(data);
+}
+
+static struct device_type bt_link = {
+ .name = "link",
+ .groups = bt_link_groups,
+ .release = bt_link_release,
+};
+
+static void add_conn(struct work_struct *work)
+{
+ struct hci_conn *conn = container_of(work, struct hci_conn, work);
+
+ flush_workqueue(btdelconn);
+
+ if (device_add(&conn->dev) < 0) {
+ BT_ERR("Failed to register connection device");
+ return;
+ }
+}
+
+void hci_conn_add_sysfs(struct hci_conn *conn)
+{
+ struct hci_dev *hdev = conn->hdev;
+
+ BT_DBG("conn %p", conn);
+
+ conn->dev.type = &bt_link;
+ conn->dev.class = bt_class;
+ conn->dev.parent = &hdev->dev;
+
+ snprintf(conn->dev.bus_id, BUS_ID_SIZE, "%s:%d",
+ hdev->name, conn->handle);
+
+ dev_set_drvdata(&conn->dev, conn);
+
+ device_initialize(&conn->dev);
+
+ INIT_WORK(&conn->work, add_conn);
+
+ queue_work(btaddconn, &conn->work);
+}
+
+/*
+ * The rfcomm tty device will possibly retain even when conn
+ * is down, and sysfs doesn't support move zombie device,
+ * so we should move the device before conn device is destroyed.
+ */
+static int __match_tty(struct device *dev, void *data)
+{
+ return !strncmp(dev->bus_id, "rfcomm", 6);
+}
+
+static void del_conn(struct work_struct *work)
+{
+ struct hci_conn *conn = container_of(work, struct hci_conn, work);
+ struct hci_dev *hdev = conn->hdev;
+
+ while (1) {
+ struct device *dev;
+
+ dev = device_find_child(&conn->dev, NULL, __match_tty);
+ if (!dev)
+ break;
+ device_move(dev, NULL);
+ put_device(dev);
+ }
+
+ device_del(&conn->dev);
+ put_device(&conn->dev);
+ hci_dev_put(hdev);
+}
+
+void hci_conn_del_sysfs(struct hci_conn *conn)
+{
+ BT_DBG("conn %p", conn);
+
+ if (!device_is_registered(&conn->dev))
+ return;
+
+ INIT_WORK(&conn->work, del_conn);
+
+ queue_work(btdelconn, &conn->work);
+}
+
+static inline char *host_typetostr(int type)
{
switch (type) {
case HCI_VIRTUAL:
@@ -40,7 +192,7 @@ static inline char *typetostr(int type)
static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf)
{
struct hci_dev *hdev = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", typetostr(hdev->type));
+ return sprintf(buf, "%s\n", host_typetostr(hdev->type));
}
static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf)
@@ -221,183 +373,62 @@ static DEVICE_ATTR(sniff_max_interval, S_IRUGO | S_IWUSR,
static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR,
show_sniff_min_interval, store_sniff_min_interval);
-static struct device_attribute *bt_attrs[] = {
- &dev_attr_type,
- &dev_attr_name,
- &dev_attr_class,
- &dev_attr_address,
- &dev_attr_features,
- &dev_attr_manufacturer,
- &dev_attr_hci_version,
- &dev_attr_hci_revision,
- &dev_attr_inquiry_cache,
- &dev_attr_idle_timeout,
- &dev_attr_sniff_max_interval,
- &dev_attr_sniff_min_interval,
+static struct attribute *bt_host_attrs[] = {
+ &dev_attr_type.attr,
+ &dev_attr_name.attr,
+ &dev_attr_class.attr,
+ &dev_attr_address.attr,
+ &dev_attr_features.attr,
+ &dev_attr_manufacturer.attr,
+ &dev_attr_hci_version.attr,
+ &dev_attr_hci_revision.attr,
+ &dev_attr_inquiry_cache.attr,
+ &dev_attr_idle_timeout.attr,
+ &dev_attr_sniff_max_interval.attr,
+ &dev_attr_sniff_min_interval.attr,
NULL
};
-static ssize_t show_conn_type(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct hci_conn *conn = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", conn->type == ACL_LINK ? "ACL" : "SCO");
-}
-
-static ssize_t show_conn_address(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct hci_conn *conn = dev_get_drvdata(dev);
- bdaddr_t bdaddr;
- baswap(&bdaddr, &conn->dst);
- return sprintf(buf, "%s\n", batostr(&bdaddr));
-}
-
-static ssize_t show_conn_features(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct hci_conn *conn = dev_get_drvdata(dev);
-
- return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
- conn->features[0], conn->features[1],
- conn->features[2], conn->features[3],
- conn->features[4], conn->features[5],
- conn->features[6], conn->features[7]);
-}
-
-#define CONN_ATTR(_name,_mode,_show,_store) \
-struct device_attribute conn_attr_##_name = __ATTR(_name,_mode,_show,_store)
-
-static CONN_ATTR(type, S_IRUGO, show_conn_type, NULL);
-static CONN_ATTR(address, S_IRUGO, show_conn_address, NULL);
-static CONN_ATTR(features, S_IRUGO, show_conn_features, NULL);
-
-static struct device_attribute *conn_attrs[] = {
- &conn_attr_type,
- &conn_attr_address,
- &conn_attr_features,
- NULL
+static struct attribute_group bt_host_group = {
+ .attrs = bt_host_attrs,
};
-struct class *bt_class = NULL;
-EXPORT_SYMBOL_GPL(bt_class);
-
-static struct bus_type bt_bus = {
- .name = "bluetooth",
+static struct attribute_group *bt_host_groups[] = {
+ &bt_host_group,
+ NULL
};
-static struct platform_device *bt_platform;
-
-static void bt_release(struct device *dev)
+static void bt_host_release(struct device *dev)
{
void *data = dev_get_drvdata(dev);
kfree(data);
}
-static void add_conn(struct work_struct *work)
-{
- struct hci_conn *conn = container_of(work, struct hci_conn, work);
- int i;
-
- flush_workqueue(btdelconn);
-
- if (device_add(&conn->dev) < 0) {
- BT_ERR("Failed to register connection device");
- return;
- }
-
- for (i = 0; conn_attrs[i]; i++)
- if (device_create_file(&conn->dev, conn_attrs[i]) < 0)
- BT_ERR("Failed to create connection attribute");
-}
-
-void hci_conn_add_sysfs(struct hci_conn *conn)
-{
- struct hci_dev *hdev = conn->hdev;
-
- BT_DBG("conn %p", conn);
-
- conn->dev.bus = &bt_bus;
- conn->dev.parent = &hdev->dev;
-
- conn->dev.release = bt_release;
-
- snprintf(conn->dev.bus_id, BUS_ID_SIZE, "%s:%d",
- hdev->name, conn->handle);
-
- dev_set_drvdata(&conn->dev, conn);
-
- device_initialize(&conn->dev);
-
- INIT_WORK(&conn->work, add_conn);
-
- queue_work(btaddconn, &conn->work);
-}
-
-/*
- * The rfcomm tty device will possibly retain even when conn
- * is down, and sysfs doesn't support move zombie device,
- * so we should move the device before conn device is destroyed.
- */
-static int __match_tty(struct device *dev, void *data)
-{
- return !strncmp(dev->bus_id, "rfcomm", 6);
-}
-
-static void del_conn(struct work_struct *work)
-{
- struct hci_conn *conn = container_of(work, struct hci_conn, work);
- struct hci_dev *hdev = conn->hdev;
-
- while (1) {
- struct device *dev;
-
- dev = device_find_child(&conn->dev, NULL, __match_tty);
- if (!dev)
- break;
- device_move(dev, NULL);
- put_device(dev);
- }
-
- device_del(&conn->dev);
- put_device(&conn->dev);
- hci_dev_put(hdev);
-}
-
-void hci_conn_del_sysfs(struct hci_conn *conn)
-{
- BT_DBG("conn %p", conn);
-
- if (!device_is_registered(&conn->dev))
- return;
-
- INIT_WORK(&conn->work, del_conn);
-
- queue_work(btdelconn, &conn->work);
-}
+static struct device_type bt_host = {
+ .name = "host",
+ .groups = bt_host_groups,
+ .release = bt_host_release,
+};
int hci_register_sysfs(struct hci_dev *hdev)
{
struct device *dev = &hdev->dev;
- unsigned int i;
int err;
BT_DBG("%p name %s type %d", hdev, hdev->name, hdev->type);
- dev->bus = &bt_bus;
+ dev->type = &bt_host;
+ dev->class = bt_class;
dev->parent = hdev->parent;
strlcpy(dev->bus_id, hdev->name, BUS_ID_SIZE);
- dev->release = bt_release;
-
dev_set_drvdata(dev, hdev);
err = device_register(dev);
if (err < 0)
return err;
- for (i = 0; bt_attrs[i]; i++)
- if (device_create_file(dev, bt_attrs[i]) < 0)
- BT_ERR("Failed to create device attribute");
-
return 0;
}
@@ -410,59 +441,30 @@ void hci_unregister_sysfs(struct hci_dev *hdev)
int __init bt_sysfs_init(void)
{
- int err;
-
btaddconn = create_singlethread_workqueue("btaddconn");
- if (!btaddconn) {
- err = -ENOMEM;
- goto out;
- }
+ if (!btaddconn)
+ return -ENOMEM;
btdelconn = create_singlethread_workqueue("btdelconn");
if (!btdelconn) {
- err = -ENOMEM;
- goto out_del;
- }
-
- bt_platform = platform_device_register_simple("bluetooth", -1, NULL, 0);
- if (IS_ERR(bt_platform)) {
- err = PTR_ERR(bt_platform);
- goto out_platform;
+ destroy_workqueue(btaddconn);
+ return -ENOMEM;
}
- err = bus_register(&bt_bus);
- if (err < 0)
- goto out_bus;
-
bt_class = class_create(THIS_MODULE, "bluetooth");
if (IS_ERR(bt_class)) {
- err = PTR_ERR(bt_class);
- goto out_class;
+ destroy_workqueue(btdelconn);
+ destroy_workqueue(btaddconn);
+ return PTR_ERR(bt_class);
}
return 0;
-
-out_class:
- bus_unregister(&bt_bus);
-out_bus:
- platform_device_unregister(bt_platform);
-out_platform:
- destroy_workqueue(btdelconn);
-out_del:
- destroy_workqueue(btaddconn);
-out:
- return err;
}
void bt_sysfs_cleanup(void)
{
destroy_workqueue(btaddconn);
-
destroy_workqueue(btdelconn);
class_destroy(bt_class);
-
- bus_unregister(&bt_bus);
-
- platform_device_unregister(bt_platform);
}
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index c1239852834..3396d5bdef1 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -2516,7 +2516,7 @@ EXPORT_SYMBOL(l2cap_load);
module_init(l2cap_init);
module_exit(l2cap_exit);
-MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
+MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c
index 6cfc7ba611b..ba537fae0a4 100644
--- a/net/bluetooth/rfcomm/core.c
+++ b/net/bluetooth/rfcomm/core.c
@@ -2115,7 +2115,7 @@ MODULE_PARM_DESC(channel_mtu, "Default MTU for the RFCOMM channel");
module_param(l2cap_mtu, uint, 0644);
MODULE_PARM_DESC(l2cap_mtu, "Default MTU for the L2CAP connection");
-MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
+MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth RFCOMM ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 8cda4987486..a16011fedc1 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -1002,7 +1002,7 @@ module_exit(sco_exit);
module_param(disable_esco, bool, 0644);
MODULE_PARM_DESC(disable_esco, "Disable eSCO connection creation");
-MODULE_AUTHOR("Maxim Krasnyansky <maxk@qualcomm.com>, Marcel Holtmann <marcel@holtmann.org>");
+MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth SCO ver " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 9b58d70b0e7..4f52c3d50eb 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -148,11 +148,16 @@ static int br_set_tx_csum(struct net_device *dev, u32 data)
}
static struct ethtool_ops br_ethtool_ops = {
- .get_drvinfo = br_getinfo,
- .get_link = ethtool_op_get_link,
- .set_sg = br_set_sg,
- .set_tx_csum = br_set_tx_csum,
- .set_tso = br_set_tso,
+ .get_drvinfo = br_getinfo,
+ .get_link = ethtool_op_get_link,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = br_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = br_set_sg,
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = br_set_tso,
+ .get_ufo = ethtool_op_get_ufo,
+ .get_flags = ethtool_op_get_flags,
};
void br_dev_setup(struct net_device *dev)
diff --git a/net/core/datagram.c b/net/core/datagram.c
index dd61dcad601..52f577a0f54 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -339,6 +339,93 @@ fault:
return -EFAULT;
}
+/**
+ * skb_copy_datagram_from_iovec - Copy a datagram from an iovec.
+ * @skb: buffer to copy
+ * @offset: offset in the buffer to start copying to
+ * @from: io vector to copy to
+ * @len: amount of data to copy to buffer from iovec
+ *
+ * Returns 0 or -EFAULT.
+ * Note: the iovec is modified during the copy.
+ */
+int skb_copy_datagram_from_iovec(struct sk_buff *skb, int offset,
+ struct iovec *from, int len)
+{
+ int start = skb_headlen(skb);
+ int i, copy = start - offset;
+
+ /* Copy header. */
+ if (copy > 0) {
+ if (copy > len)
+ copy = len;
+ if (memcpy_fromiovec(skb->data + offset, from, copy))
+ goto fault;
+ if ((len -= copy) == 0)
+ return 0;
+ offset += copy;
+ }
+
+ /* Copy paged appendix. Hmm... why does this look so complicated? */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ int end;
+
+ WARN_ON(start > offset + len);
+
+ end = start + skb_shinfo(skb)->frags[i].size;
+ if ((copy = end - offset) > 0) {
+ int err;
+ u8 *vaddr;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ struct page *page = frag->page;
+
+ if (copy > len)
+ copy = len;
+ vaddr = kmap(page);
+ err = memcpy_fromiovec(vaddr + frag->page_offset +
+ offset - start, from, copy);
+ kunmap(page);
+ if (err)
+ goto fault;
+
+ if (!(len -= copy))
+ return 0;
+ offset += copy;
+ }
+ start = end;
+ }
+
+ if (skb_shinfo(skb)->frag_list) {
+ struct sk_buff *list = skb_shinfo(skb)->frag_list;
+
+ for (; list; list = list->next) {
+ int end;
+
+ WARN_ON(start > offset + len);
+
+ end = start + list->len;
+ if ((copy = end - offset) > 0) {
+ if (copy > len)
+ copy = len;
+ if (skb_copy_datagram_from_iovec(list,
+ offset - start,
+ from, copy))
+ goto fault;
+ if ((len -= copy) == 0)
+ return 0;
+ offset += copy;
+ }
+ start = end;
+ }
+ }
+ if (!len)
+ return 0;
+
+fault:
+ return -EFAULT;
+}
+EXPORT_SYMBOL(skb_copy_datagram_from_iovec);
+
static int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset,
u8 __user *to, int len,
__wsum *csump)
diff --git a/net/core/dev.c b/net/core/dev.c
index 600bb23c4c2..60c51f76588 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1339,19 +1339,23 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
}
-void __netif_schedule(struct Qdisc *q)
+static inline void __netif_reschedule(struct Qdisc *q)
{
- if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) {
- struct softnet_data *sd;
- unsigned long flags;
+ struct softnet_data *sd;
+ unsigned long flags;
- local_irq_save(flags);
- sd = &__get_cpu_var(softnet_data);
- q->next_sched = sd->output_queue;
- sd->output_queue = q;
- raise_softirq_irqoff(NET_TX_SOFTIRQ);
- local_irq_restore(flags);
- }
+ local_irq_save(flags);
+ sd = &__get_cpu_var(softnet_data);
+ q->next_sched = sd->output_queue;
+ sd->output_queue = q;
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_restore(flags);
+}
+
+void __netif_schedule(struct Qdisc *q)
+{
+ if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
+ __netif_reschedule(q);
}
EXPORT_SYMBOL(__netif_schedule);
@@ -1800,9 +1804,13 @@ gso:
spin_lock(root_lock);
- rc = qdisc_enqueue_root(skb, q);
- qdisc_run(q);
-
+ if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
+ kfree_skb(skb);
+ rc = NET_XMIT_DROP;
+ } else {
+ rc = qdisc_enqueue_root(skb, q);
+ qdisc_run(q);
+ }
spin_unlock(root_lock);
goto out;
@@ -1974,15 +1982,17 @@ static void net_tx_action(struct softirq_action *h)
head = head->next_sched;
- smp_mb__before_clear_bit();
- clear_bit(__QDISC_STATE_SCHED, &q->state);
-
root_lock = qdisc_lock(q);
if (spin_trylock(root_lock)) {
+ smp_mb__before_clear_bit();
+ clear_bit(__QDISC_STATE_SCHED,
+ &q->state);
qdisc_run(q);
spin_unlock(root_lock);
} else {
- __netif_schedule(q);
+ if (!test_bit(__QDISC_STATE_DEACTIVATED,
+ &q->state))
+ __netif_reschedule(q);
}
}
}
@@ -2084,7 +2094,8 @@ static int ing_filter(struct sk_buff *skb)
q = rxq->qdisc;
if (q != &noop_qdisc) {
spin_lock(qdisc_lock(q));
- result = qdisc_enqueue_root(skb, q);
+ if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
+ result = qdisc_enqueue_root(skb, q);
spin_unlock(qdisc_lock(q));
}
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 52623645390..a756847e381 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -1961,6 +1961,8 @@ static int pktgen_setup_dev(struct pktgen_dev *pkt_dev, const char *ifname)
*/
static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
{
+ int ntxq;
+
if (!pkt_dev->odev) {
printk(KERN_ERR "pktgen: ERROR: pkt_dev->odev == NULL in "
"setup_inject.\n");
@@ -1969,6 +1971,33 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
return;
}
+ /* make sure that we don't pick a non-existing transmit queue */
+ ntxq = pkt_dev->odev->real_num_tx_queues;
+ if (ntxq <= num_online_cpus() && (pkt_dev->flags & F_QUEUE_MAP_CPU)) {
+ printk(KERN_WARNING "pktgen: WARNING: QUEUE_MAP_CPU "
+ "disabled because CPU count (%d) exceeds number ",
+ num_online_cpus());
+ printk(KERN_WARNING "pktgen: WARNING: of tx queues "
+ "(%d) on %s \n", ntxq, pkt_dev->odev->name);
+ pkt_dev->flags &= ~F_QUEUE_MAP_CPU;
+ }
+ if (ntxq <= pkt_dev->queue_map_min) {
+ printk(KERN_WARNING "pktgen: WARNING: Requested "
+ "queue_map_min (%d) exceeds number of tx\n",
+ pkt_dev->queue_map_min);
+ printk(KERN_WARNING "pktgen: WARNING: queues (%d) on "
+ "%s, resetting\n", ntxq, pkt_dev->odev->name);
+ pkt_dev->queue_map_min = ntxq - 1;
+ }
+ if (ntxq <= pkt_dev->queue_map_max) {
+ printk(KERN_WARNING "pktgen: WARNING: Requested "
+ "queue_map_max (%d) exceeds number of tx\n",
+ pkt_dev->queue_map_max);
+ printk(KERN_WARNING "pktgen: WARNING: queues (%d) on "
+ "%s, resetting\n", ntxq, pkt_dev->odev->name);
+ pkt_dev->queue_map_max = ntxq - 1;
+ }
+
/* Default to the interface's mac if not explicitly set. */
if (is_zero_ether_addr(pkt_dev->src_mac))
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 84640172d65..ca1ccdf1ef7 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2256,14 +2256,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
segs = nskb;
tail = nskb;
- nskb->dev = skb->dev;
- skb_copy_queue_mapping(nskb, skb);
- nskb->priority = skb->priority;
- nskb->protocol = skb->protocol;
- nskb->vlan_tci = skb->vlan_tci;
- nskb->dst = dst_clone(skb->dst);
- memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
- nskb->pkt_type = skb->pkt_type;
+ __copy_skb_header(nskb, skb);
nskb->mac_len = skb->mac_len;
skb_reserve(nskb, headroom);
@@ -2274,6 +2267,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
skb_copy_from_linear_data(skb, skb_put(nskb, doffset),
doffset);
if (!sg) {
+ nskb->ip_summed = CHECKSUM_NONE;
nskb->csum = skb_copy_and_csum_bits(skb, offset,
skb_put(nskb, len),
len, 0);
@@ -2283,8 +2277,6 @@ struct sk_buff *skb_segment(struct sk_buff *skb, int features)
frag = skb_shinfo(nskb)->frags;
k = 0;
- nskb->ip_summed = CHECKSUM_PARTIAL;
- nskb->csum = skb->csum;
skb_copy_from_linear_data_offset(skb, offset,
skb_put(nskb, hsize), hsize);
diff --git a/net/dccp/input.c b/net/dccp/input.c
index df2f110df94..803933ab396 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -411,12 +411,6 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
struct dccp_sock *dp = dccp_sk(sk);
long tstamp = dccp_timestamp();
- /* Stop the REQUEST timer */
- inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
- WARN_ON(sk->sk_send_head == NULL);
- __kfree_skb(sk->sk_send_head);
- sk->sk_send_head = NULL;
-
if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
dp->dccps_awl, dp->dccps_awh)) {
dccp_pr_debug("invalid ackno: S.AWL=%llu, "
@@ -441,6 +435,12 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
DCCP_ACKVEC_STATE_RECEIVED))
goto out_invalid_packet; /* FIXME: change error code */
+ /* Stop the REQUEST timer */
+ inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS);
+ WARN_ON(sk->sk_send_head == NULL);
+ kfree_skb(sk->sk_send_head);
+ sk->sk_send_head = NULL;
+
dp->dccps_isr = DCCP_SKB_CB(skb)->dccpd_seq;
dccp_update_gsr(sk, dp->dccps_isr);
/*
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index b622d974485..1ca3b26eed0 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -474,6 +474,11 @@ static int dccp_setsockopt_change(struct sock *sk, int type,
if (copy_from_user(&opt, optval, sizeof(opt)))
return -EFAULT;
+ /*
+ * rfc4340: 6.1. Change Options
+ */
+ if (opt.dccpsf_len < 1)
+ return -EINVAL;
val = kmalloc(opt.dccpsf_len, GFP_KERNEL);
if (!val)
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 860558633b2..55c355e6323 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -204,18 +204,22 @@ static struct sock *icmp_sk(struct net *net)
return net->ipv4.icmp_sk[smp_processor_id()];
}
-static inline int icmp_xmit_lock(struct sock *sk)
+static inline struct sock *icmp_xmit_lock(struct net *net)
{
+ struct sock *sk;
+
local_bh_disable();
+ sk = icmp_sk(net);
+
if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
/* This can happen if the output path signals a
* dst_link_failure() for an outgoing ICMP packet.
*/
local_bh_enable();
- return 1;
+ return NULL;
}
- return 0;
+ return sk;
}
static inline void icmp_xmit_unlock(struct sock *sk)
@@ -354,15 +358,17 @@ static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
struct ipcm_cookie ipc;
struct rtable *rt = skb->rtable;
struct net *net = dev_net(rt->u.dst.dev);
- struct sock *sk = icmp_sk(net);
- struct inet_sock *inet = inet_sk(sk);
+ struct sock *sk;
+ struct inet_sock *inet;
__be32 daddr;
if (ip_options_echo(&icmp_param->replyopts, skb))
return;
- if (icmp_xmit_lock(sk))
+ sk = icmp_xmit_lock(net);
+ if (sk == NULL)
return;
+ inet = inet_sk(sk);
icmp_param->data.icmph.checksum = 0;
@@ -419,7 +425,6 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
if (!rt)
goto out;
net = dev_net(rt->u.dst.dev);
- sk = icmp_sk(net);
/*
* Find the original header. It is expected to be valid, of course.
@@ -483,7 +488,8 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
}
}
- if (icmp_xmit_lock(sk))
+ sk = icmp_xmit_lock(net);
+ if (sk == NULL)
return;
/*
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 6203ece5360..f70fac61259 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -289,6 +289,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
struct rtable *rt;
struct iphdr *pip;
struct igmpv3_report *pig;
+ struct net *net = dev_net(dev);
skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
if (skb == NULL)
@@ -299,7 +300,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
.nl_u = { .ip4_u = {
.daddr = IGMPV3_ALL_MCR } },
.proto = IPPROTO_IGMP };
- if (ip_route_output_key(&init_net, &rt, &fl)) {
+ if (ip_route_output_key(net, &rt, &fl)) {
kfree_skb(skb);
return NULL;
}
@@ -629,6 +630,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
struct igmphdr *ih;
struct rtable *rt;
struct net_device *dev = in_dev->dev;
+ struct net *net = dev_net(dev);
__be32 group = pmc ? pmc->multiaddr : 0;
__be32 dst;
@@ -643,7 +645,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
struct flowi fl = { .oif = dev->ifindex,
.nl_u = { .ip4_u = { .daddr = dst } },
.proto = IPPROTO_IGMP };
- if (ip_route_output_key(&init_net, &rt, &fl))
+ if (ip_route_output_key(net, &rt, &fl))
return -1;
}
if (rt->rt_src == 0) {
@@ -1196,9 +1198,6 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
ASSERT_RTNL();
- if (!net_eq(dev_net(in_dev->dev), &init_net))
- return;
-
for (im=in_dev->mc_list; im; im=im->next) {
if (im->multiaddr == addr) {
im->users++;
@@ -1278,9 +1277,6 @@ void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
ASSERT_RTNL();
- if (!net_eq(dev_net(in_dev->dev), &init_net))
- return;
-
for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) {
if (i->multiaddr==addr) {
if (--i->users == 0) {
@@ -1308,9 +1304,6 @@ void ip_mc_down(struct in_device *in_dev)
ASSERT_RTNL();
- if (!net_eq(dev_net(in_dev->dev), &init_net))
- return;
-
for (i=in_dev->mc_list; i; i=i->next)
igmp_group_dropped(i);
@@ -1331,9 +1324,6 @@ void ip_mc_init_dev(struct in_device *in_dev)
{
ASSERT_RTNL();
- if (!net_eq(dev_net(in_dev->dev), &init_net))
- return;
-
in_dev->mc_tomb = NULL;
#ifdef CONFIG_IP_MULTICAST
in_dev->mr_gq_running = 0;
@@ -1357,9 +1347,6 @@ void ip_mc_up(struct in_device *in_dev)
ASSERT_RTNL();
- if (!net_eq(dev_net(in_dev->dev), &init_net))
- return;
-
ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
for (i=in_dev->mc_list; i; i=i->next)
@@ -1376,9 +1363,6 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
ASSERT_RTNL();
- if (!net_eq(dev_net(in_dev->dev), &init_net))
- return;
-
/* Deactivate timers */
ip_mc_down(in_dev);
@@ -1395,7 +1379,7 @@ void ip_mc_destroy_dev(struct in_device *in_dev)
write_unlock_bh(&in_dev->mc_list_lock);
}
-static struct in_device * ip_mc_find_dev(struct ip_mreqn *imr)
+static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
{
struct flowi fl = { .nl_u = { .ip4_u =
{ .daddr = imr->imr_multiaddr.s_addr } } };
@@ -1404,19 +1388,19 @@ static struct in_device * ip_mc_find_dev(struct ip_mreqn *imr)
struct in_device *idev = NULL;
if (imr->imr_ifindex) {
- idev = inetdev_by_index(&init_net, imr->imr_ifindex);
+ idev = inetdev_by_index(net, imr->imr_ifindex);
if (idev)
__in_dev_put(idev);
return idev;
}
if (imr->imr_address.s_addr) {
- dev = ip_dev_find(&init_net, imr->imr_address.s_addr);
+ dev = ip_dev_find(net, imr->imr_address.s_addr);
if (!dev)
return NULL;
dev_put(dev);
}
- if (!dev && !ip_route_output_key(&init_net, &rt, &fl)) {
+ if (!dev && !ip_route_output_key(net, &rt, &fl)) {
dev = rt->u.dst.dev;
ip_rt_put(rt);
}
@@ -1754,18 +1738,16 @@ int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
struct ip_mc_socklist *iml=NULL, *i;
struct in_device *in_dev;
struct inet_sock *inet = inet_sk(sk);
+ struct net *net = sock_net(sk);
int ifindex;
int count = 0;
if (!ipv4_is_multicast(addr))
return -EINVAL;
- if (!net_eq(sock_net(sk), &init_net))
- return -EPROTONOSUPPORT;
-
rtnl_lock();
- in_dev = ip_mc_find_dev(imr);
+ in_dev = ip_mc_find_dev(net, imr);
if (!in_dev) {
iml = NULL;
@@ -1827,15 +1809,13 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
struct inet_sock *inet = inet_sk(sk);
struct ip_mc_socklist *iml, **imlp;
struct in_device *in_dev;
+ struct net *net = sock_net(sk);
__be32 group = imr->imr_multiaddr.s_addr;
u32 ifindex;
int ret = -EADDRNOTAVAIL;
- if (!net_eq(sock_net(sk), &init_net))
- return -EPROTONOSUPPORT;
-
rtnl_lock();
- in_dev = ip_mc_find_dev(imr);
+ in_dev = ip_mc_find_dev(net, imr);
ifindex = imr->imr_ifindex;
for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) {
if (iml->multi.imr_multiaddr.s_addr != group)
@@ -1873,21 +1853,19 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct
struct in_device *in_dev = NULL;
struct inet_sock *inet = inet_sk(sk);
struct ip_sf_socklist *psl;
+ struct net *net = sock_net(sk);
int leavegroup = 0;
int i, j, rv;
if (!ipv4_is_multicast(addr))
return -EINVAL;
- if (!net_eq(sock_net(sk), &init_net))
- return -EPROTONOSUPPORT;
-
rtnl_lock();
imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr;
imr.imr_address.s_addr = mreqs->imr_interface;
imr.imr_ifindex = ifindex;
- in_dev = ip_mc_find_dev(&imr);
+ in_dev = ip_mc_find_dev(net, &imr);
if (!in_dev) {
err = -ENODEV;
@@ -2007,6 +1985,7 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
struct in_device *in_dev;
struct inet_sock *inet = inet_sk(sk);
struct ip_sf_socklist *newpsl, *psl;
+ struct net *net = sock_net(sk);
int leavegroup = 0;
if (!ipv4_is_multicast(addr))
@@ -2015,15 +1994,12 @@ int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
msf->imsf_fmode != MCAST_EXCLUDE)
return -EINVAL;
- if (!net_eq(sock_net(sk), &init_net))
- return -EPROTONOSUPPORT;
-
rtnl_lock();
imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
imr.imr_address.s_addr = msf->imsf_interface;
imr.imr_ifindex = ifindex;
- in_dev = ip_mc_find_dev(&imr);
+ in_dev = ip_mc_find_dev(net, &imr);
if (!in_dev) {
err = -ENODEV;
@@ -2094,19 +2070,17 @@ int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
struct in_device *in_dev;
struct inet_sock *inet = inet_sk(sk);
struct ip_sf_socklist *psl;
+ struct net *net = sock_net(sk);
if (!ipv4_is_multicast(addr))
return -EINVAL;
- if (!net_eq(sock_net(sk), &init_net))
- return -EPROTONOSUPPORT;
-
rtnl_lock();
imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
imr.imr_address.s_addr = msf->imsf_interface;
imr.imr_ifindex = 0;
- in_dev = ip_mc_find_dev(&imr);
+ in_dev = ip_mc_find_dev(net, &imr);
if (!in_dev) {
err = -ENODEV;
@@ -2163,9 +2137,6 @@ int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
if (!ipv4_is_multicast(addr))
return -EINVAL;
- if (!net_eq(sock_net(sk), &init_net))
- return -EPROTONOSUPPORT;
-
rtnl_lock();
err = -EADDRNOTAVAIL;
@@ -2246,19 +2217,17 @@ void ip_mc_drop_socket(struct sock *sk)
{
struct inet_sock *inet = inet_sk(sk);
struct ip_mc_socklist *iml;
+ struct net *net = sock_net(sk);
if (inet->mc_list == NULL)
return;
- if (!net_eq(sock_net(sk), &init_net))
- return;
-
rtnl_lock();
while ((iml = inet->mc_list) != NULL) {
struct in_device *in_dev;
inet->mc_list = iml->next;
- in_dev = inetdev_by_index(&init_net, iml->multi.imr_ifindex);
+ in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
(void) ip_mc_leave_src(sk, iml, in_dev);
if (in_dev != NULL) {
ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
diff --git a/net/ipv4/ipvs/ip_vs_app.c b/net/ipv4/ipvs/ip_vs_app.c
index 1f1897a1a70..201b8ea3020 100644
--- a/net/ipv4/ipvs/ip_vs_app.c
+++ b/net/ipv4/ipvs/ip_vs_app.c
@@ -608,7 +608,7 @@ int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri,
}
-int ip_vs_app_init(void)
+int __init ip_vs_app_init(void)
{
/* we will replace it with proc_net_ipvs_create() soon */
proc_net_fops_create(&init_net, "ip_vs_app", 0, &ip_vs_app_fops);
diff --git a/net/ipv4/ipvs/ip_vs_conn.c b/net/ipv4/ipvs/ip_vs_conn.c
index f8bdae47a77..44a6872dc24 100644
--- a/net/ipv4/ipvs/ip_vs_conn.c
+++ b/net/ipv4/ipvs/ip_vs_conn.c
@@ -965,7 +965,7 @@ static void ip_vs_conn_flush(void)
}
-int ip_vs_conn_init(void)
+int __init ip_vs_conn_init(void)
{
int idx;
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c
index 9a5ace0b4dd..6379705a8dc 100644
--- a/net/ipv4/ipvs/ip_vs_ctl.c
+++ b/net/ipv4/ipvs/ip_vs_ctl.c
@@ -683,9 +683,22 @@ static void
ip_vs_zero_stats(struct ip_vs_stats *stats)
{
spin_lock_bh(&stats->lock);
- memset(stats, 0, (char *)&stats->lock - (char *)stats);
- spin_unlock_bh(&stats->lock);
+
+ stats->conns = 0;
+ stats->inpkts = 0;
+ stats->outpkts = 0;
+ stats->inbytes = 0;
+ stats->outbytes = 0;
+
+ stats->cps = 0;
+ stats->inpps = 0;
+ stats->outpps = 0;
+ stats->inbps = 0;
+ stats->outbps = 0;
+
ip_vs_zero_estimator(stats);
+
+ spin_unlock_bh(&stats->lock);
}
/*
@@ -1589,7 +1602,7 @@ static struct ctl_table vs_vars[] = {
{ .ctl_name = 0 }
};
-struct ctl_path net_vs_ctl_path[] = {
+const struct ctl_path net_vs_ctl_path[] = {
{ .procname = "net", .ctl_name = CTL_NET, },
{ .procname = "ipv4", .ctl_name = NET_IPV4, },
{ .procname = "vs", },
@@ -1784,7 +1797,9 @@ static const struct file_operations ip_vs_info_fops = {
#endif
-struct ip_vs_stats ip_vs_stats;
+struct ip_vs_stats ip_vs_stats = {
+ .lock = __SPIN_LOCK_UNLOCKED(ip_vs_stats.lock),
+};
#ifdef CONFIG_PROC_FS
static int ip_vs_stats_show(struct seq_file *seq, void *v)
@@ -2306,7 +2321,7 @@ static struct nf_sockopt_ops ip_vs_sockopts = {
};
-int ip_vs_control_init(void)
+int __init ip_vs_control_init(void)
{
int ret;
int idx;
@@ -2333,8 +2348,6 @@ int ip_vs_control_init(void)
INIT_LIST_HEAD(&ip_vs_rtable[idx]);
}
- memset(&ip_vs_stats, 0, sizeof(ip_vs_stats));
- spin_lock_init(&ip_vs_stats.lock);
ip_vs_new_estimator(&ip_vs_stats);
/* Hook the defense timer */
diff --git a/net/ipv4/ipvs/ip_vs_dh.c b/net/ipv4/ipvs/ip_vs_dh.c
index 8afc1503ed2..fa66824d264 100644
--- a/net/ipv4/ipvs/ip_vs_dh.c
+++ b/net/ipv4/ipvs/ip_vs_dh.c
@@ -233,6 +233,7 @@ static struct ip_vs_scheduler ip_vs_dh_scheduler =
.name = "dh",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
+ .n_list = LIST_HEAD_INIT(ip_vs_dh_scheduler.n_list),
.init_service = ip_vs_dh_init_svc,
.done_service = ip_vs_dh_done_svc,
.update_service = ip_vs_dh_update_svc,
@@ -242,7 +243,6 @@ static struct ip_vs_scheduler ip_vs_dh_scheduler =
static int __init ip_vs_dh_init(void)
{
- INIT_LIST_HEAD(&ip_vs_dh_scheduler.n_list);
return register_ip_vs_scheduler(&ip_vs_dh_scheduler);
}
diff --git a/net/ipv4/ipvs/ip_vs_est.c b/net/ipv4/ipvs/ip_vs_est.c
index bc04eedd6db..5a20f93bd7f 100644
--- a/net/ipv4/ipvs/ip_vs_est.c
+++ b/net/ipv4/ipvs/ip_vs_est.c
@@ -17,6 +17,7 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/sysctl.h>
+#include <linux/list.h>
#include <net/ip_vs.h>
@@ -44,28 +45,11 @@
*/
-struct ip_vs_estimator
-{
- struct ip_vs_estimator *next;
- struct ip_vs_stats *stats;
-
- u32 last_conns;
- u32 last_inpkts;
- u32 last_outpkts;
- u64 last_inbytes;
- u64 last_outbytes;
-
- u32 cps;
- u32 inpps;
- u32 outpps;
- u32 inbps;
- u32 outbps;
-};
-
+static void estimation_timer(unsigned long arg);
-static struct ip_vs_estimator *est_list = NULL;
-static DEFINE_RWLOCK(est_lock);
-static struct timer_list est_timer;
+static LIST_HEAD(est_list);
+static DEFINE_SPINLOCK(est_lock);
+static DEFINE_TIMER(est_timer, estimation_timer, 0, 0);
static void estimation_timer(unsigned long arg)
{
@@ -76,9 +60,9 @@ static void estimation_timer(unsigned long arg)
u64 n_inbytes, n_outbytes;
u32 rate;
- read_lock(&est_lock);
- for (e = est_list; e; e = e->next) {
- s = e->stats;
+ spin_lock(&est_lock);
+ list_for_each_entry(e, &est_list, list) {
+ s = container_of(e, struct ip_vs_stats, est);
spin_lock(&s->lock);
n_conns = s->conns;
@@ -114,19 +98,16 @@ static void estimation_timer(unsigned long arg)
s->outbps = (e->outbps+0xF)>>5;
spin_unlock(&s->lock);
}
- read_unlock(&est_lock);
+ spin_unlock(&est_lock);
mod_timer(&est_timer, jiffies + 2*HZ);
}
-int ip_vs_new_estimator(struct ip_vs_stats *stats)
+void ip_vs_new_estimator(struct ip_vs_stats *stats)
{
- struct ip_vs_estimator *est;
+ struct ip_vs_estimator *est = &stats->est;
- est = kzalloc(sizeof(*est), GFP_KERNEL);
- if (est == NULL)
- return -ENOMEM;
+ INIT_LIST_HEAD(&est->list);
- est->stats = stats;
est->last_conns = stats->conns;
est->cps = stats->cps<<10;
@@ -142,59 +123,40 @@ int ip_vs_new_estimator(struct ip_vs_stats *stats)
est->last_outbytes = stats->outbytes;
est->outbps = stats->outbps<<5;
- write_lock_bh(&est_lock);
- est->next = est_list;
- if (est->next == NULL) {
- setup_timer(&est_timer, estimation_timer, 0);
- est_timer.expires = jiffies + 2*HZ;
- add_timer(&est_timer);
- }
- est_list = est;
- write_unlock_bh(&est_lock);
- return 0;
+ spin_lock_bh(&est_lock);
+ if (list_empty(&est_list))
+ mod_timer(&est_timer, jiffies + 2 * HZ);
+ list_add(&est->list, &est_list);
+ spin_unlock_bh(&est_lock);
}
void ip_vs_kill_estimator(struct ip_vs_stats *stats)
{
- struct ip_vs_estimator *est, **pest;
- int killed = 0;
-
- write_lock_bh(&est_lock);
- pest = &est_list;
- while ((est=*pest) != NULL) {
- if (est->stats != stats) {
- pest = &est->next;
- continue;
- }
- *pest = est->next;
- kfree(est);
- killed++;
+ struct ip_vs_estimator *est = &stats->est;
+
+ spin_lock_bh(&est_lock);
+ list_del(&est->list);
+ while (list_empty(&est_list) && try_to_del_timer_sync(&est_timer) < 0) {
+ spin_unlock_bh(&est_lock);
+ cpu_relax();
+ spin_lock_bh(&est_lock);
}
- if (killed && est_list == NULL)
- del_timer_sync(&est_timer);
- write_unlock_bh(&est_lock);
+ spin_unlock_bh(&est_lock);
}
void ip_vs_zero_estimator(struct ip_vs_stats *stats)
{
- struct ip_vs_estimator *e;
-
- write_lock_bh(&est_lock);
- for (e = est_list; e; e = e->next) {
- if (e->stats != stats)
- continue;
-
- /* set counters zero */
- e->last_conns = 0;
- e->last_inpkts = 0;
- e->last_outpkts = 0;
- e->last_inbytes = 0;
- e->last_outbytes = 0;
- e->cps = 0;
- e->inpps = 0;
- e->outpps = 0;
- e->inbps = 0;
- e->outbps = 0;
- }
- write_unlock_bh(&est_lock);
+ struct ip_vs_estimator *est = &stats->est;
+
+ /* set counters zero, caller must hold the stats->lock lock */
+ est->last_inbytes = 0;
+ est->last_outbytes = 0;
+ est->last_conns = 0;
+ est->last_inpkts = 0;
+ est->last_outpkts = 0;
+ est->cps = 0;
+ est->inpps = 0;
+ est->outpps = 0;
+ est->inbps = 0;
+ est->outbps = 0;
}
diff --git a/net/ipv4/ipvs/ip_vs_lblc.c b/net/ipv4/ipvs/ip_vs_lblc.c
index 0efa3db4b18..7a6a319f544 100644
--- a/net/ipv4/ipvs/ip_vs_lblc.c
+++ b/net/ipv4/ipvs/ip_vs_lblc.c
@@ -539,6 +539,7 @@ static struct ip_vs_scheduler ip_vs_lblc_scheduler =
.name = "lblc",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
+ .n_list = LIST_HEAD_INIT(ip_vs_lblc_scheduler.n_list),
.init_service = ip_vs_lblc_init_svc,
.done_service = ip_vs_lblc_done_svc,
.update_service = ip_vs_lblc_update_svc,
@@ -550,7 +551,6 @@ static int __init ip_vs_lblc_init(void)
{
int ret;
- INIT_LIST_HEAD(&ip_vs_lblc_scheduler.n_list);
sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table);
ret = register_ip_vs_scheduler(&ip_vs_lblc_scheduler);
if (ret)
diff --git a/net/ipv4/ipvs/ip_vs_lblcr.c b/net/ipv4/ipvs/ip_vs_lblcr.c
index 8e3bbeb4513..c234e73968a 100644
--- a/net/ipv4/ipvs/ip_vs_lblcr.c
+++ b/net/ipv4/ipvs/ip_vs_lblcr.c
@@ -728,6 +728,7 @@ static struct ip_vs_scheduler ip_vs_lblcr_scheduler =
.name = "lblcr",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
+ .n_list = LIST_HEAD_INIT(ip_vs_lblcr_scheduler.n_list),
.init_service = ip_vs_lblcr_init_svc,
.done_service = ip_vs_lblcr_done_svc,
.update_service = ip_vs_lblcr_update_svc,
@@ -739,7 +740,6 @@ static int __init ip_vs_lblcr_init(void)
{
int ret;
- INIT_LIST_HEAD(&ip_vs_lblcr_scheduler.n_list);
sysctl_header = register_sysctl_paths(net_vs_ctl_path, vs_vars_table);
ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
if (ret)
diff --git a/net/ipv4/ipvs/ip_vs_lc.c b/net/ipv4/ipvs/ip_vs_lc.c
index ac9f08e065d..ebcdbf75ac6 100644
--- a/net/ipv4/ipvs/ip_vs_lc.c
+++ b/net/ipv4/ipvs/ip_vs_lc.c
@@ -98,6 +98,7 @@ static struct ip_vs_scheduler ip_vs_lc_scheduler = {
.name = "lc",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
+ .n_list = LIST_HEAD_INIT(ip_vs_lc_scheduler.n_list),
.init_service = ip_vs_lc_init_svc,
.done_service = ip_vs_lc_done_svc,
.update_service = ip_vs_lc_update_svc,
@@ -107,7 +108,6 @@ static struct ip_vs_scheduler ip_vs_lc_scheduler = {
static int __init ip_vs_lc_init(void)
{
- INIT_LIST_HEAD(&ip_vs_lc_scheduler.n_list);
return register_ip_vs_scheduler(&ip_vs_lc_scheduler) ;
}
diff --git a/net/ipv4/ipvs/ip_vs_nq.c b/net/ipv4/ipvs/ip_vs_nq.c
index a46bf258d42..92f3a677003 100644
--- a/net/ipv4/ipvs/ip_vs_nq.c
+++ b/net/ipv4/ipvs/ip_vs_nq.c
@@ -136,6 +136,7 @@ static struct ip_vs_scheduler ip_vs_nq_scheduler =
.name = "nq",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
+ .n_list = LIST_HEAD_INIT(ip_vs_nq_scheduler.n_list),
.init_service = ip_vs_nq_init_svc,
.done_service = ip_vs_nq_done_svc,
.update_service = ip_vs_nq_update_svc,
@@ -145,7 +146,6 @@ static struct ip_vs_scheduler ip_vs_nq_scheduler =
static int __init ip_vs_nq_init(void)
{
- INIT_LIST_HEAD(&ip_vs_nq_scheduler.n_list);
return register_ip_vs_scheduler(&ip_vs_nq_scheduler);
}
diff --git a/net/ipv4/ipvs/ip_vs_proto.c b/net/ipv4/ipvs/ip_vs_proto.c
index 876714f23d6..6099a88fc20 100644
--- a/net/ipv4/ipvs/ip_vs_proto.c
+++ b/net/ipv4/ipvs/ip_vs_proto.c
@@ -43,7 +43,7 @@ static struct ip_vs_protocol *ip_vs_proto_table[IP_VS_PROTO_TAB_SIZE];
/*
* register an ipvs protocol
*/
-static int __used register_ip_vs_protocol(struct ip_vs_protocol *pp)
+static int __used __init register_ip_vs_protocol(struct ip_vs_protocol *pp)
{
unsigned hash = IP_VS_PROTO_HASH(pp->protocol);
@@ -190,7 +190,7 @@ ip_vs_tcpudp_debug_packet(struct ip_vs_protocol *pp,
}
-int ip_vs_protocol_init(void)
+int __init ip_vs_protocol_init(void)
{
char protocols[64];
#define REGISTER_PROTOCOL(p) \
diff --git a/net/ipv4/ipvs/ip_vs_rr.c b/net/ipv4/ipvs/ip_vs_rr.c
index c8db12d39e6..358110d17e5 100644
--- a/net/ipv4/ipvs/ip_vs_rr.c
+++ b/net/ipv4/ipvs/ip_vs_rr.c
@@ -94,6 +94,7 @@ static struct ip_vs_scheduler ip_vs_rr_scheduler = {
.name = "rr", /* name */
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
+ .n_list = LIST_HEAD_INIT(ip_vs_rr_scheduler.n_list),
.init_service = ip_vs_rr_init_svc,
.done_service = ip_vs_rr_done_svc,
.update_service = ip_vs_rr_update_svc,
@@ -102,7 +103,6 @@ static struct ip_vs_scheduler ip_vs_rr_scheduler = {
static int __init ip_vs_rr_init(void)
{
- INIT_LIST_HEAD(&ip_vs_rr_scheduler.n_list);
return register_ip_vs_scheduler(&ip_vs_rr_scheduler);
}
diff --git a/net/ipv4/ipvs/ip_vs_sched.c b/net/ipv4/ipvs/ip_vs_sched.c
index b6476730985..a46ad9e3501 100644
--- a/net/ipv4/ipvs/ip_vs_sched.c
+++ b/net/ipv4/ipvs/ip_vs_sched.c
@@ -184,7 +184,7 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
write_lock_bh(&__ip_vs_sched_lock);
- if (scheduler->n_list.next != &scheduler->n_list) {
+ if (!list_empty(&scheduler->n_list)) {
write_unlock_bh(&__ip_vs_sched_lock);
ip_vs_use_count_dec();
IP_VS_ERR("register_ip_vs_scheduler(): [%s] scheduler "
@@ -229,7 +229,7 @@ int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler)
}
write_lock_bh(&__ip_vs_sched_lock);
- if (scheduler->n_list.next == &scheduler->n_list) {
+ if (list_empty(&scheduler->n_list)) {
write_unlock_bh(&__ip_vs_sched_lock);
IP_VS_ERR("unregister_ip_vs_scheduler(): [%s] scheduler "
"is not in the list. failed\n", scheduler->name);
diff --git a/net/ipv4/ipvs/ip_vs_sed.c b/net/ipv4/ipvs/ip_vs_sed.c
index 2a7d3135818..77663d84cbd 100644
--- a/net/ipv4/ipvs/ip_vs_sed.c
+++ b/net/ipv4/ipvs/ip_vs_sed.c
@@ -138,6 +138,7 @@ static struct ip_vs_scheduler ip_vs_sed_scheduler =
.name = "sed",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
+ .n_list = LIST_HEAD_INIT(ip_vs_sed_scheduler.n_list),
.init_service = ip_vs_sed_init_svc,
.done_service = ip_vs_sed_done_svc,
.update_service = ip_vs_sed_update_svc,
@@ -147,7 +148,6 @@ static struct ip_vs_scheduler ip_vs_sed_scheduler =
static int __init ip_vs_sed_init(void)
{
- INIT_LIST_HEAD(&ip_vs_sed_scheduler.n_list);
return register_ip_vs_scheduler(&ip_vs_sed_scheduler);
}
diff --git a/net/ipv4/ipvs/ip_vs_sh.c b/net/ipv4/ipvs/ip_vs_sh.c
index b8fdfac6500..7b979e22805 100644
--- a/net/ipv4/ipvs/ip_vs_sh.c
+++ b/net/ipv4/ipvs/ip_vs_sh.c
@@ -230,6 +230,7 @@ static struct ip_vs_scheduler ip_vs_sh_scheduler =
.name = "sh",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
+ .n_list = LIST_HEAD_INIT(ip_vs_sh_scheduler.n_list),
.init_service = ip_vs_sh_init_svc,
.done_service = ip_vs_sh_done_svc,
.update_service = ip_vs_sh_update_svc,
@@ -239,7 +240,6 @@ static struct ip_vs_scheduler ip_vs_sh_scheduler =
static int __init ip_vs_sh_init(void)
{
- INIT_LIST_HEAD(&ip_vs_sh_scheduler.n_list);
return register_ip_vs_scheduler(&ip_vs_sh_scheduler);
}
diff --git a/net/ipv4/ipvs/ip_vs_sync.c b/net/ipv4/ipvs/ip_vs_sync.c
index 45e9bd96c28..a652da2c320 100644
--- a/net/ipv4/ipvs/ip_vs_sync.c
+++ b/net/ipv4/ipvs/ip_vs_sync.c
@@ -904,9 +904,9 @@ int stop_sync_thread(int state)
* progress of stopping the master sync daemon.
*/
- spin_lock(&ip_vs_sync_lock);
+ spin_lock_bh(&ip_vs_sync_lock);
ip_vs_sync_state &= ~IP_VS_STATE_MASTER;
- spin_unlock(&ip_vs_sync_lock);
+ spin_unlock_bh(&ip_vs_sync_lock);
kthread_stop(sync_master_thread);
sync_master_thread = NULL;
} else if (state == IP_VS_STATE_BACKUP) {
diff --git a/net/ipv4/ipvs/ip_vs_wlc.c b/net/ipv4/ipvs/ip_vs_wlc.c
index 772c3cb4eca..9b0ef86bb1f 100644
--- a/net/ipv4/ipvs/ip_vs_wlc.c
+++ b/net/ipv4/ipvs/ip_vs_wlc.c
@@ -126,6 +126,7 @@ static struct ip_vs_scheduler ip_vs_wlc_scheduler =
.name = "wlc",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
+ .n_list = LIST_HEAD_INIT(ip_vs_wlc_scheduler.n_list),
.init_service = ip_vs_wlc_init_svc,
.done_service = ip_vs_wlc_done_svc,
.update_service = ip_vs_wlc_update_svc,
@@ -135,7 +136,6 @@ static struct ip_vs_scheduler ip_vs_wlc_scheduler =
static int __init ip_vs_wlc_init(void)
{
- INIT_LIST_HEAD(&ip_vs_wlc_scheduler.n_list);
return register_ip_vs_scheduler(&ip_vs_wlc_scheduler);
}
diff --git a/net/ipv4/ipvs/ip_vs_wrr.c b/net/ipv4/ipvs/ip_vs_wrr.c
index 1d6932d7dc9..0d86a79b87b 100644
--- a/net/ipv4/ipvs/ip_vs_wrr.c
+++ b/net/ipv4/ipvs/ip_vs_wrr.c
@@ -212,6 +212,7 @@ static struct ip_vs_scheduler ip_vs_wrr_scheduler = {
.name = "wrr",
.refcnt = ATOMIC_INIT(0),
.module = THIS_MODULE,
+ .n_list = LIST_HEAD_INIT(ip_vs_wrr_scheduler.n_list),
.init_service = ip_vs_wrr_init_svc,
.done_service = ip_vs_wrr_done_svc,
.update_service = ip_vs_wrr_update_svc,
@@ -220,7 +221,6 @@ static struct ip_vs_scheduler ip_vs_wrr_scheduler = {
static int __init ip_vs_wrr_init(void)
{
- INIT_LIST_HEAD(&ip_vs_wrr_scheduler.n_list);
return register_ip_vs_scheduler(&ip_vs_wrr_scheduler) ;
}
diff --git a/net/ipv4/netfilter/ipt_addrtype.c b/net/ipv4/netfilter/ipt_addrtype.c
index 49587a49722..462a22c9787 100644
--- a/net/ipv4/netfilter/ipt_addrtype.c
+++ b/net/ipv4/netfilter/ipt_addrtype.c
@@ -70,7 +70,7 @@ addrtype_mt_v1(const struct sk_buff *skb, const struct net_device *in,
(info->flags & IPT_ADDRTYPE_INVERT_SOURCE);
if (ret && info->dest)
ret &= match_type(dev, iph->daddr, info->dest) ^
- (info->flags & IPT_ADDRTYPE_INVERT_DEST);
+ !!(info->flags & IPT_ADDRTYPE_INVERT_DEST);
return ret;
}
diff --git a/net/ipv4/netfilter/nf_nat_proto_common.c b/net/ipv4/netfilter/nf_nat_proto_common.c
index 91537f11273..6c4f11f5144 100644
--- a/net/ipv4/netfilter/nf_nat_proto_common.c
+++ b/net/ipv4/netfilter/nf_nat_proto_common.c
@@ -73,9 +73,13 @@ bool nf_nat_proto_unique_tuple(struct nf_conntrack_tuple *tuple,
range_size = ntohs(range->max.all) - min + 1;
}
- off = *rover;
if (range->flags & IP_NAT_RANGE_PROTO_RANDOM)
- off = net_random();
+ off = secure_ipv4_port_ephemeral(tuple->src.u3.ip, tuple->dst.u3.ip,
+ maniptype == IP_NAT_MANIP_SRC
+ ? tuple->dst.u.all
+ : tuple->src.u.all);
+ else
+ off = *rover;
for (i = 0; i < range_size; i++, off++) {
*portptr = htons(min + off % range_size);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 16fc6f454a3..6ee5354c9aa 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2914,6 +2914,68 @@ static int ipv4_sysctl_rtcache_flush_strategy(ctl_table *table,
return 0;
}
+static void rt_secret_reschedule(int old)
+{
+ struct net *net;
+ int new = ip_rt_secret_interval;
+ int diff = new - old;
+
+ if (!diff)
+ return;
+
+ rtnl_lock();
+ for_each_net(net) {
+ int deleted = del_timer_sync(&net->ipv4.rt_secret_timer);
+
+ if (!new)
+ continue;
+
+ if (deleted) {
+ long time = net->ipv4.rt_secret_timer.expires - jiffies;
+
+ if (time <= 0 || (time += diff) <= 0)
+ time = 0;
+
+ net->ipv4.rt_secret_timer.expires = time;
+ } else
+ net->ipv4.rt_secret_timer.expires = new;
+
+ net->ipv4.rt_secret_timer.expires += jiffies;
+ add_timer(&net->ipv4.rt_secret_timer);
+ }
+ rtnl_unlock();
+}
+
+static int ipv4_sysctl_rt_secret_interval(ctl_table *ctl, int write,
+ struct file *filp,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int old = ip_rt_secret_interval;
+ int ret = proc_dointvec_jiffies(ctl, write, filp, buffer, lenp, ppos);
+
+ rt_secret_reschedule(old);
+
+ return ret;
+}
+
+static int ipv4_sysctl_rt_secret_interval_strategy(ctl_table *table,
+ int __user *name,
+ int nlen,
+ void __user *oldval,
+ size_t __user *oldlenp,
+ void __user *newval,
+ size_t newlen)
+{
+ int old = ip_rt_secret_interval;
+ int ret = sysctl_jiffies(table, name, nlen, oldval, oldlenp, newval,
+ newlen);
+
+ rt_secret_reschedule(old);
+
+ return ret;
+}
+
static ctl_table ipv4_route_table[] = {
{
.ctl_name = NET_IPV4_ROUTE_GC_THRESH,
@@ -3048,20 +3110,29 @@ static ctl_table ipv4_route_table[] = {
.data = &ip_rt_secret_interval,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec_jiffies,
- .strategy = &sysctl_jiffies,
+ .proc_handler = &ipv4_sysctl_rt_secret_interval,
+ .strategy = &ipv4_sysctl_rt_secret_interval_strategy,
},
{ .ctl_name = 0 }
};
-static __net_initdata struct ctl_path ipv4_route_path[] = {
+static struct ctl_table empty[1];
+
+static struct ctl_table ipv4_skeleton[] =
+{
+ { .procname = "route", .ctl_name = NET_IPV4_ROUTE,
+ .mode = 0555, .child = ipv4_route_table},
+ { .procname = "neigh", .ctl_name = NET_IPV4_NEIGH,
+ .mode = 0555, .child = empty},
+ { }
+};
+
+static __net_initdata struct ctl_path ipv4_path[] = {
{ .procname = "net", .ctl_name = CTL_NET, },
{ .procname = "ipv4", .ctl_name = NET_IPV4, },
- { .procname = "route", .ctl_name = NET_IPV4_ROUTE, },
{ },
};
-
static struct ctl_table ipv4_route_flush_table[] = {
{
.ctl_name = NET_IPV4_ROUTE_FLUSH,
@@ -3074,6 +3145,13 @@ static struct ctl_table ipv4_route_flush_table[] = {
{ .ctl_name = 0 },
};
+static __net_initdata struct ctl_path ipv4_route_path[] = {
+ { .procname = "net", .ctl_name = CTL_NET, },
+ { .procname = "ipv4", .ctl_name = NET_IPV4, },
+ { .procname = "route", .ctl_name = NET_IPV4_ROUTE, },
+ { },
+};
+
static __net_init int sysctl_route_net_init(struct net *net)
{
struct ctl_table *tbl;
@@ -3126,10 +3204,12 @@ static __net_init int rt_secret_timer_init(struct net *net)
net->ipv4.rt_secret_timer.data = (unsigned long)net;
init_timer_deferrable(&net->ipv4.rt_secret_timer);
- net->ipv4.rt_secret_timer.expires =
- jiffies + net_random() % ip_rt_secret_interval +
- ip_rt_secret_interval;
- add_timer(&net->ipv4.rt_secret_timer);
+ if (ip_rt_secret_interval) {
+ net->ipv4.rt_secret_timer.expires =
+ jiffies + net_random() % ip_rt_secret_interval +
+ ip_rt_secret_interval;
+ add_timer(&net->ipv4.rt_secret_timer);
+ }
return 0;
}
@@ -3223,7 +3303,7 @@ int __init ip_rt_init(void)
*/
void __init ip_static_sysctl_init(void)
{
- register_sysctl_paths(ipv4_route_path, ipv4_route_table);
+ register_sysctl_paths(ipv4_path, ipv4_skeleton);
}
#endif
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index a00532de2a8..8165f5aa8c7 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -468,7 +468,8 @@ static unsigned tcp_syn_options(struct sock *sk, struct sk_buff *skb,
}
if (likely(sysctl_tcp_window_scaling)) {
opts->ws = tp->rx_opt.rcv_wscale;
- size += TCPOLEN_WSCALE_ALIGNED;
+ if(likely(opts->ws))
+ size += TCPOLEN_WSCALE_ALIGNED;
}
if (likely(sysctl_tcp_sack)) {
opts->options |= OPTION_SACK_ADVERTISE;
@@ -509,7 +510,8 @@ static unsigned tcp_synack_options(struct sock *sk,
if (likely(ireq->wscale_ok)) {
opts->ws = ireq->rcv_wscale;
- size += TCPOLEN_WSCALE_ALIGNED;
+ if(likely(opts->ws))
+ size += TCPOLEN_WSCALE_ALIGNED;
}
if (likely(doing_ts)) {
opts->options |= OPTION_TS;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 383d17359d0..8e42fbbd576 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -989,7 +989,9 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
up->encap_rcv != NULL) {
int ret;
+ bh_unlock_sock(sk);
ret = (*up->encap_rcv)(sk, skb);
+ bh_lock_sock(sk);
if (ret <= 0) {
UDP_INC_STATS_BH(sock_net(sk),
UDP_MIB_INDATAGRAMS,
@@ -1092,7 +1094,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
if (skb1) {
int ret = 0;
- bh_lock_sock_nested(sk);
+ bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
ret = udp_queue_rcv_skb(sk, skb1);
else
@@ -1194,7 +1196,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
if (sk != NULL) {
int ret = 0;
- bh_lock_sock_nested(sk);
+ bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
ret = udp_queue_rcv_skb(sk, skb);
else
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index a7842c54f58..7b6a584b62d 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1106,13 +1106,12 @@ out:
return ret;
}
-int ipv6_dev_get_saddr(struct net_device *dst_dev,
+int ipv6_dev_get_saddr(struct net *net, struct net_device *dst_dev,
const struct in6_addr *daddr, unsigned int prefs,
struct in6_addr *saddr)
{
struct ipv6_saddr_score scores[2],
*score = &scores[0], *hiscore = &scores[1];
- struct net *net = dev_net(dst_dev);
struct ipv6_saddr_dst dst;
struct net_device *dev;
int dst_type;
@@ -1689,6 +1688,7 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev,
.fc_dst_len = plen,
.fc_flags = RTF_UP | flags,
.fc_nlinfo.nl_net = dev_net(dev),
+ .fc_protocol = RTPROT_KERNEL,
};
ipv6_addr_copy(&cfg.fc_dst, pfx);
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 8d05527524e..f5de3f9dc69 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -93,7 +93,8 @@ static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
if (flags & RT6_LOOKUP_F_SRCPREF_COA)
srcprefs |= IPV6_PREFER_SRC_COA;
- if (ipv6_dev_get_saddr(ip6_dst_idev(&rt->u.dst)->dev,
+ if (ipv6_dev_get_saddr(net,
+ ip6_dst_idev(&rt->u.dst)->dev,
&flp->fl6_dst, srcprefs,
&saddr))
goto again;
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index abedf95fdf2..b3157a0cc15 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -91,19 +91,22 @@ static struct inet6_protocol icmpv6_protocol = {
.flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
};
-static __inline__ int icmpv6_xmit_lock(struct sock *sk)
+static __inline__ struct sock *icmpv6_xmit_lock(struct net *net)
{
+ struct sock *sk;
+
local_bh_disable();
+ sk = icmpv6_sk(net);
if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
/* This can happen if the output path (f.e. SIT or
* ip6ip6 tunnel) signals dst_link_failure() for an
* outgoing ICMP6 packet.
*/
local_bh_enable();
- return 1;
+ return NULL;
}
- return 0;
+ return sk;
}
static __inline__ void icmpv6_xmit_unlock(struct sock *sk)
@@ -392,11 +395,10 @@ void icmpv6_send(struct sk_buff *skb, int type, int code, __u32 info,
fl.fl_icmp_code = code;
security_skb_classify_flow(skb, &fl);
- sk = icmpv6_sk(net);
- np = inet6_sk(sk);
-
- if (icmpv6_xmit_lock(sk))
+ sk = icmpv6_xmit_lock(net);
+ if (sk == NULL)
return;
+ np = inet6_sk(sk);
if (!icmpv6_xrlim_allow(sk, type, &fl))
goto out;
@@ -539,11 +541,10 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
fl.fl_icmp_type = ICMPV6_ECHO_REPLY;
security_skb_classify_flow(skb, &fl);
- sk = icmpv6_sk(net);
- np = inet6_sk(sk);
-
- if (icmpv6_xmit_lock(sk))
+ sk = icmpv6_xmit_lock(net);
+ if (sk == NULL)
return;
+ np = inet6_sk(sk);
if (!fl.oif && ipv6_addr_is_multicast(&fl.fl6_dst))
fl.oif = np->mcast_oif;
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 52dddc25d3e..29c7c99e69f 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -378,6 +378,7 @@ static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
arg.skb = skb;
arg.cb = cb;
+ arg.net = net;
w->args = &arg;
for (h = s_h; h < FIB_TABLE_HASHSZ; h++, s_e = 0) {
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index a4402de425d..0e844c2736a 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -934,7 +934,7 @@ static int ip6_dst_lookup_tail(struct sock *sk,
goto out_err_release;
if (ipv6_addr_any(&fl->fl6_src)) {
- err = ipv6_dev_get_saddr(ip6_dst_idev(*dst)->dev,
+ err = ipv6_dev_get_saddr(net, ip6_dst_idev(*dst)->dev,
&fl->fl6_dst,
sk ? inet6_sk(sk)->srcprefs : 0,
&fl->fl6_src);
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 741cfcd96f8..4e5eac301f9 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -911,7 +911,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
} else {
if (np->rxopt.bits.rxinfo) {
struct in6_pktinfo src_info;
- src_info.ipi6_ifindex = np->mcast_oif;
+ src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif : sk->sk_bound_dev_if;
ipv6_addr_copy(&src_info.ipi6_addr, &np->daddr);
put_cmsg(&msg, SOL_IPV6, IPV6_PKTINFO, sizeof(src_info), &src_info);
}
@@ -921,7 +921,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
}
if (np->rxopt.bits.rxoinfo) {
struct in6_pktinfo src_info;
- src_info.ipi6_ifindex = np->mcast_oif;
+ src_info.ipi6_ifindex = np->mcast_oif ? np->mcast_oif : sk->sk_bound_dev_if;
ipv6_addr_copy(&src_info.ipi6_addr, &np->daddr);
put_cmsg(&msg, SOL_IPV6, IPV6_2292PKTINFO, sizeof(src_info), &src_info);
}
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index beb48e3f038..f1c62ba0f56 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -549,7 +549,7 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
override = 0;
in6_ifa_put(ifp);
} else {
- if (ipv6_dev_get_saddr(dev, daddr,
+ if (ipv6_dev_get_saddr(dev_net(dev), dev, daddr,
inet6_sk(dev_net(dev)->ipv6.ndisc_sk)->srcprefs,
&tmpaddr))
return;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 5a3e87e4b18..9af6115f0f5 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2106,7 +2106,8 @@ static inline size_t rt6_nlmsg_size(void)
+ nla_total_size(sizeof(struct rta_cacheinfo));
}
-static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
+static int rt6_fill_node(struct net *net,
+ struct sk_buff *skb, struct rt6_info *rt,
struct in6_addr *dst, struct in6_addr *src,
int iif, int type, u32 pid, u32 seq,
int prefix, int nowait, unsigned int flags)
@@ -2187,8 +2188,9 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt,
#endif
NLA_PUT_U32(skb, RTA_IIF, iif);
} else if (dst) {
+ struct inet6_dev *idev = ip6_dst_idev(&rt->u.dst);
struct in6_addr saddr_buf;
- if (ipv6_dev_get_saddr(ip6_dst_idev(&rt->u.dst)->dev,
+ if (ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
dst, 0, &saddr_buf) == 0)
NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
}
@@ -2233,7 +2235,8 @@ int rt6_dump_route(struct rt6_info *rt, void *p_arg)
} else
prefix = 0;
- return rt6_fill_node(arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
+ return rt6_fill_node(arg->net,
+ arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
NETLINK_CB(arg->cb->skb).pid, arg->cb->nlh->nlmsg_seq,
prefix, 0, NLM_F_MULTI);
}
@@ -2299,7 +2302,7 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void
rt = (struct rt6_info*) ip6_route_output(net, NULL, &fl);
skb->dst = &rt->u.dst;
- err = rt6_fill_node(skb, rt, &fl.fl6_dst, &fl.fl6_src, iif,
+ err = rt6_fill_node(net, skb, rt, &fl.fl6_dst, &fl.fl6_src, iif,
RTM_NEWROUTE, NETLINK_CB(in_skb).pid,
nlh->nlmsg_seq, 0, 0, 0);
if (err < 0) {
@@ -2326,7 +2329,7 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
if (skb == NULL)
goto errout;
- err = rt6_fill_node(skb, rt, NULL, NULL, 0,
+ err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
event, info->pid, seq, 0, 0, 0);
if (err < 0) {
/* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index e6dfaeac6be..587f8f60c48 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -156,7 +156,7 @@ static struct ctl_table_header *ip6_base;
int ipv6_static_sysctl_register(void)
{
static struct ctl_table empty[1];
- ip6_base = register_net_sysctl_rotable(net_ipv6_ctl_path, empty);
+ ip6_base = register_sysctl_paths(net_ipv6_ctl_path, empty);
if (ip6_base == NULL)
return -ENOMEM;
return 0;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index d1477b350f7..a6aecf76a71 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -379,7 +379,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
uh->source, saddr, dif))) {
struct sk_buff *buff = skb_clone(skb, GFP_ATOMIC);
if (buff) {
- bh_lock_sock_nested(sk2);
+ bh_lock_sock(sk2);
if (!sock_owned_by_user(sk2))
udpv6_queue_rcv_skb(sk2, buff);
else
@@ -387,7 +387,7 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
bh_unlock_sock(sk2);
}
}
- bh_lock_sock_nested(sk);
+ bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
udpv6_queue_rcv_skb(sk, skb);
else
@@ -508,7 +508,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
/* deliver */
- bh_lock_sock_nested(sk);
+ bh_lock_sock(sk);
if (!sock_owned_by_user(sk))
udpv6_queue_rcv_skb(sk, skb);
else
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 8f1e0543b3c..08e4cbbe3f0 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -52,12 +52,14 @@ static struct dst_entry *xfrm6_dst_lookup(int tos, xfrm_address_t *saddr,
static int xfrm6_get_saddr(xfrm_address_t *saddr, xfrm_address_t *daddr)
{
struct dst_entry *dst;
+ struct net_device *dev;
dst = xfrm6_dst_lookup(0, NULL, daddr);
if (IS_ERR(dst))
return -EHOSTUNREACH;
- ipv6_dev_get_saddr(ip6_dst_idev(dst)->dev,
+ dev = ip6_dst_idev(dst)->dev;
+ ipv6_dev_get_saddr(dev_net(dev), dev,
(struct in6_addr *)&daddr->a6, 0,
(struct in6_addr *)&saddr->a6);
dst_release(dst);
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 475f89a8aee..8165df578c9 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -248,8 +248,8 @@ IEEE80211_IF_WFILE(min_discovery_timeout,
static void add_sta_files(struct ieee80211_sub_if_data *sdata)
{
DEBUGFS_ADD(drop_unencrypted, sta);
- DEBUGFS_ADD(force_unicast_rateidx, ap);
- DEBUGFS_ADD(max_ratectrl_rateidx, ap);
+ DEBUGFS_ADD(force_unicast_rateidx, sta);
+ DEBUGFS_ADD(max_ratectrl_rateidx, sta);
DEBUGFS_ADD(state, sta);
DEBUGFS_ADD(bssid, sta);
@@ -283,8 +283,8 @@ static void add_ap_files(struct ieee80211_sub_if_data *sdata)
static void add_wds_files(struct ieee80211_sub_if_data *sdata)
{
DEBUGFS_ADD(drop_unencrypted, wds);
- DEBUGFS_ADD(force_unicast_rateidx, ap);
- DEBUGFS_ADD(max_ratectrl_rateidx, ap);
+ DEBUGFS_ADD(force_unicast_rateidx, wds);
+ DEBUGFS_ADD(max_ratectrl_rateidx, wds);
DEBUGFS_ADD(peer, wds);
}
@@ -292,8 +292,8 @@ static void add_wds_files(struct ieee80211_sub_if_data *sdata)
static void add_vlan_files(struct ieee80211_sub_if_data *sdata)
{
DEBUGFS_ADD(drop_unencrypted, vlan);
- DEBUGFS_ADD(force_unicast_rateidx, ap);
- DEBUGFS_ADD(max_ratectrl_rateidx, ap);
+ DEBUGFS_ADD(force_unicast_rateidx, vlan);
+ DEBUGFS_ADD(max_ratectrl_rateidx, vlan);
}
static void add_monitor_files(struct ieee80211_sub_if_data *sdata)
@@ -381,8 +381,8 @@ static void add_files(struct ieee80211_sub_if_data *sdata)
static void del_sta_files(struct ieee80211_sub_if_data *sdata)
{
DEBUGFS_DEL(drop_unencrypted, sta);
- DEBUGFS_DEL(force_unicast_rateidx, ap);
- DEBUGFS_DEL(max_ratectrl_rateidx, ap);
+ DEBUGFS_DEL(force_unicast_rateidx, sta);
+ DEBUGFS_DEL(max_ratectrl_rateidx, sta);
DEBUGFS_DEL(state, sta);
DEBUGFS_DEL(bssid, sta);
@@ -416,8 +416,8 @@ static void del_ap_files(struct ieee80211_sub_if_data *sdata)
static void del_wds_files(struct ieee80211_sub_if_data *sdata)
{
DEBUGFS_DEL(drop_unencrypted, wds);
- DEBUGFS_DEL(force_unicast_rateidx, ap);
- DEBUGFS_DEL(max_ratectrl_rateidx, ap);
+ DEBUGFS_DEL(force_unicast_rateidx, wds);
+ DEBUGFS_DEL(max_ratectrl_rateidx, wds);
DEBUGFS_DEL(peer, wds);
}
@@ -425,8 +425,8 @@ static void del_wds_files(struct ieee80211_sub_if_data *sdata)
static void del_vlan_files(struct ieee80211_sub_if_data *sdata)
{
DEBUGFS_DEL(drop_unencrypted, vlan);
- DEBUGFS_DEL(force_unicast_rateidx, ap);
- DEBUGFS_DEL(max_ratectrl_rateidx, ap);
+ DEBUGFS_DEL(force_unicast_rateidx, vlan);
+ DEBUGFS_DEL(max_ratectrl_rateidx, vlan);
}
static void del_monitor_files(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index ec59345af65..586a9b49b0f 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -470,6 +470,8 @@ struct ieee80211_sub_if_data {
struct dentry *auth_transaction;
struct dentry *flags;
struct dentry *num_beacons_sta;
+ struct dentry *force_unicast_rateidx;
+ struct dentry *max_ratectrl_rateidx;
} sta;
struct {
struct dentry *drop_unencrypted;
@@ -483,9 +485,13 @@ struct ieee80211_sub_if_data {
struct {
struct dentry *drop_unencrypted;
struct dentry *peer;
+ struct dentry *force_unicast_rateidx;
+ struct dentry *max_ratectrl_rateidx;
} wds;
struct {
struct dentry *drop_unencrypted;
+ struct dentry *force_unicast_rateidx;
+ struct dentry *max_ratectrl_rateidx;
} vlan;
struct {
struct dentry *mode;
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index b5933b27149..35f2f95f2fa 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -383,7 +383,7 @@ errcopy:
hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
tbl->free_node(p, 0);
}
- __mesh_table_free(tbl);
+ __mesh_table_free(newtbl);
endgrow:
return NULL;
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index e1d11c9b672..9bb68c6a8f4 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -478,51 +478,21 @@ int ieee80211_ht_addt_info_ie_to_ht_bss_info(
static void ieee80211_sta_send_associnfo(struct net_device *dev,
struct ieee80211_if_sta *ifsta)
{
- char *buf;
- size_t len;
- int i;
union iwreq_data wrqu;
- if (!ifsta->assocreq_ies && !ifsta->assocresp_ies)
- return;
-
- buf = kmalloc(50 + 2 * (ifsta->assocreq_ies_len +
- ifsta->assocresp_ies_len), GFP_KERNEL);
- if (!buf)
- return;
-
- len = sprintf(buf, "ASSOCINFO(");
if (ifsta->assocreq_ies) {
- len += sprintf(buf + len, "ReqIEs=");
- for (i = 0; i < ifsta->assocreq_ies_len; i++) {
- len += sprintf(buf + len, "%02x",
- ifsta->assocreq_ies[i]);
- }
+ memset(&wrqu, 0, sizeof(wrqu));
+ wrqu.data.length = ifsta->assocreq_ies_len;
+ wireless_send_event(dev, IWEVASSOCREQIE, &wrqu,
+ ifsta->assocreq_ies);
}
- if (ifsta->assocresp_ies) {
- if (ifsta->assocreq_ies)
- len += sprintf(buf + len, " ");
- len += sprintf(buf + len, "RespIEs=");
- for (i = 0; i < ifsta->assocresp_ies_len; i++) {
- len += sprintf(buf + len, "%02x",
- ifsta->assocresp_ies[i]);
- }
- }
- len += sprintf(buf + len, ")");
- if (len > IW_CUSTOM_MAX) {
- len = sprintf(buf, "ASSOCRESPIE=");
- for (i = 0; i < ifsta->assocresp_ies_len; i++) {
- len += sprintf(buf + len, "%02x",
- ifsta->assocresp_ies[i]);
- }
+ if (ifsta->assocresp_ies) {
+ memset(&wrqu, 0, sizeof(wrqu));
+ wrqu.data.length = ifsta->assocresp_ies_len;
+ wireless_send_event(dev, IWEVASSOCRESPIE, &wrqu,
+ ifsta->assocresp_ies);
}
-
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = len;
- wireless_send_event(dev, IWEVCUSTOM, &wrqu, buf);
-
- kfree(buf);
}
@@ -813,7 +783,7 @@ static void ieee80211_send_assoc(struct net_device *dev,
}
}
- if (count == 8) {
+ if (rates_len > count) {
pos = skb_put(skb, rates_len - count + 2);
*pos++ = WLAN_EID_EXT_SUPP_RATES;
*pos++ = rates_len - count;
@@ -2103,6 +2073,8 @@ static void ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
rcu_read_unlock();
return;
}
+ /* update new sta with its last rx activity */
+ sta->last_rx = jiffies;
}
/*
@@ -2866,7 +2838,7 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
jiffies);
#endif /* CONFIG_MAC80211_IBSS_DEBUG */
if (beacon_timestamp > rx_timestamp) {
-#ifndef CONFIG_MAC80211_IBSS_DEBUG
+#ifdef CONFIG_MAC80211_IBSS_DEBUG
printk(KERN_DEBUG "%s: beacon TSF higher than "
"local TSF - IBSS merge with BSSID %s\n",
dev->name, print_mac(mac, mgmt->bssid));
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 105a616c5c7..a8752031adc 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -968,7 +968,7 @@ ctnetlink_change_helper(struct nf_conn *ct, struct nlattr *cda[])
/* need to zero data of old helper */
memset(&help->help, 0, sizeof(help->help));
} else {
- help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
+ help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
if (help == NULL)
return -ENOMEM;
}
@@ -1136,16 +1136,33 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
ct->timeout.expires = jiffies + ct->timeout.expires * HZ;
ct->status |= IPS_CONFIRMED;
+ rcu_read_lock();
+ helper = __nf_ct_helper_find(rtuple);
+ if (helper) {
+ help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
+ if (help == NULL) {
+ rcu_read_unlock();
+ err = -ENOMEM;
+ goto err;
+ }
+ /* not in hash table yet so not strictly necessary */
+ rcu_assign_pointer(help->helper, helper);
+ }
+
if (cda[CTA_STATUS]) {
err = ctnetlink_change_status(ct, cda);
- if (err < 0)
+ if (err < 0) {
+ rcu_read_unlock();
goto err;
+ }
}
if (cda[CTA_PROTOINFO]) {
err = ctnetlink_change_protoinfo(ct, cda);
- if (err < 0)
+ if (err < 0) {
+ rcu_read_unlock();
goto err;
+ }
}
nf_ct_acct_ext_add(ct, GFP_KERNEL);
@@ -1155,19 +1172,6 @@ ctnetlink_create_conntrack(struct nlattr *cda[],
ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
#endif
- rcu_read_lock();
- helper = __nf_ct_helper_find(rtuple);
- if (helper) {
- help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
- if (help == NULL) {
- rcu_read_unlock();
- err = -ENOMEM;
- goto err;
- }
- /* not in hash table yet so not strictly necessary */
- rcu_assign_pointer(help->helper, helper);
- }
-
/* setup master conntrack: this is a confirmed expectation */
if (master_ct) {
__set_bit(IPS_EXPECTED_BIT, &ct->status);
diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c
index d2d45655cd1..74aecc098ba 100644
--- a/net/rfkill/rfkill.c
+++ b/net/rfkill/rfkill.c
@@ -150,6 +150,8 @@ static void update_rfkill_state(struct rfkill *rfkill)
* calls and handling all the red tape such as issuing notifications
* if the call is successful.
*
+ * Suspended devices are not touched at all, and -EAGAIN is returned.
+ *
* Note that the @force parameter cannot override a (possibly cached)
* state of RFKILL_STATE_HARD_BLOCKED. Any device making use of
* RFKILL_STATE_HARD_BLOCKED implements either get_state() or
@@ -168,6 +170,9 @@ static int rfkill_toggle_radio(struct rfkill *rfkill,
int retval = 0;
enum rfkill_state oldstate, newstate;
+ if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP))
+ return -EBUSY;
+
oldstate = rfkill->state;
if (rfkill->get_state && !force &&
@@ -214,7 +219,7 @@ static int rfkill_toggle_radio(struct rfkill *rfkill,
*
* This function toggles the state of all switches of given type,
* unless a specific switch is claimed by userspace (in which case,
- * that switch is left alone).
+ * that switch is left alone) or suspended.
*/
void rfkill_switch_all(enum rfkill_type type, enum rfkill_state state)
{
@@ -239,8 +244,8 @@ EXPORT_SYMBOL(rfkill_switch_all);
/**
* rfkill_epo - emergency power off all transmitters
*
- * This kicks all rfkill devices to RFKILL_STATE_SOFT_BLOCKED, ignoring
- * everything in its path but rfkill_mutex and rfkill->mutex.
+ * This kicks all non-suspended rfkill devices to RFKILL_STATE_SOFT_BLOCKED,
+ * ignoring everything in its path but rfkill_mutex and rfkill->mutex.
*/
void rfkill_epo(void)
{
@@ -372,7 +377,7 @@ static ssize_t rfkill_claim_show(struct device *dev,
{
struct rfkill *rfkill = to_rfkill(dev);
- return sprintf(buf, "%d", rfkill->user_claim);
+ return sprintf(buf, "%d\n", rfkill->user_claim);
}
static ssize_t rfkill_claim_store(struct device *dev,
@@ -458,13 +463,14 @@ static int rfkill_resume(struct device *dev)
if (dev->power.power_state.event != PM_EVENT_ON) {
mutex_lock(&rfkill->mutex);
+ dev->power.power_state.event = PM_EVENT_ON;
+
/* restore radio state AND notify everybody */
rfkill_toggle_radio(rfkill, rfkill->state, 1);
mutex_unlock(&rfkill->mutex);
}
- dev->power.power_state = PMSG_ON;
return 0;
}
#else
diff --git a/net/rxrpc/ar-accept.c b/net/rxrpc/ar-accept.c
index bdfb7741779..77228f28fa3 100644
--- a/net/rxrpc/ar-accept.c
+++ b/net/rxrpc/ar-accept.c
@@ -100,7 +100,7 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local,
trans = rxrpc_get_transport(local, peer, GFP_NOIO);
rxrpc_put_peer(peer);
- if (!trans) {
+ if (IS_ERR(trans)) {
_debug("no trans");
ret = -EBUSY;
goto error;
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 26c7e1f9a35..9974b3f04f0 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -751,7 +751,7 @@ static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid)
struct nlattr *tb[TCA_ACT_MAX+1];
struct nlattr *kind;
struct tc_action *a = create_a(0);
- int err = -EINVAL;
+ int err = -ENOMEM;
if (a == NULL) {
printk("tca_action_flush: couldnt create tc_action\n");
@@ -762,7 +762,7 @@ static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid)
if (!skb) {
printk("tca_action_flush: failed skb alloc\n");
kfree(a);
- return -ENOBUFS;
+ return err;
}
b = skb_tail_pointer(skb);
@@ -790,6 +790,8 @@ static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid)
err = a->ops->walk(skb, &dcb, RTM_DELACTION, a);
if (err < 0)
goto nla_put_failure;
+ if (err == 0)
+ goto noflush_out;
nla_nest_end(skb, nest);
@@ -807,6 +809,7 @@ nla_put_failure:
nlmsg_failure:
module_put(a->ops->owner);
err_out:
+noflush_out:
kfree_skb(skb);
kfree(a);
return err;
@@ -824,8 +827,10 @@ tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event)
return ret;
if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) {
- if (tb[0] != NULL && tb[1] == NULL)
- return tca_action_flush(tb[0], n, pid);
+ if (tb[1] != NULL)
+ return tca_action_flush(tb[1], n, pid);
+ else
+ return -EINVAL;
}
for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index d2b6f54a626..5cafdd4c801 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -280,7 +280,7 @@ replay:
if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
spin_lock_bh(root_lock);
*back = tp->next;
- spin_lock_bh(root_lock);
+ spin_unlock_bh(root_lock);
tfilter_notify(skb, n, tp, fh, RTM_DELTFILTER);
tcf_destroy(tp);
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index ba1d121f312..506b709510b 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -27,6 +27,7 @@
#include <linux/kmod.h>
#include <linux/list.h>
#include <linux/hrtimer.h>
+#include <linux/lockdep.h>
#include <net/net_namespace.h>
#include <net/sock.h>
@@ -183,24 +184,68 @@ EXPORT_SYMBOL(unregister_qdisc);
(root qdisc, all its children, children of children etc.)
*/
+struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
+{
+ struct Qdisc *q;
+
+ if (!(root->flags & TCQ_F_BUILTIN) &&
+ root->handle == handle)
+ return root;
+
+ list_for_each_entry(q, &root->list, list) {
+ if (q->handle == handle)
+ return q;
+ }
+ return NULL;
+}
+
+/*
+ * This lock is needed until some qdiscs stop calling qdisc_tree_decrease_qlen()
+ * without rtnl_lock(); currently hfsc_dequeue(), netem_dequeue(), tbf_dequeue()
+ */
+static DEFINE_SPINLOCK(qdisc_list_lock);
+
+static void qdisc_list_add(struct Qdisc *q)
+{
+ if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
+ spin_lock_bh(&qdisc_list_lock);
+ list_add_tail(&q->list, &qdisc_root_sleeping(q)->list);
+ spin_unlock_bh(&qdisc_list_lock);
+ }
+}
+
+void qdisc_list_del(struct Qdisc *q)
+{
+ if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
+ spin_lock_bh(&qdisc_list_lock);
+ list_del(&q->list);
+ spin_unlock_bh(&qdisc_list_lock);
+ }
+}
+EXPORT_SYMBOL(qdisc_list_del);
+
struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle)
{
unsigned int i;
+ struct Qdisc *q;
+
+ spin_lock_bh(&qdisc_list_lock);
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
- struct Qdisc *q, *txq_root = txq->qdisc_sleeping;
-
- if (!(txq_root->flags & TCQ_F_BUILTIN) &&
- txq_root->handle == handle)
- return txq_root;
+ struct Qdisc *txq_root = txq->qdisc_sleeping;
- list_for_each_entry(q, &txq_root->list, list) {
- if (q->handle == handle)
- return q;
- }
+ q = qdisc_match_from_root(txq_root, handle);
+ if (q)
+ goto unlock;
}
- return NULL;
+
+ q = qdisc_match_from_root(dev->rx_queue.qdisc_sleeping, handle);
+
+unlock:
+ spin_unlock_bh(&qdisc_list_lock);
+
+ return q;
}
static struct Qdisc *qdisc_leaf(struct Qdisc *p, u32 classid)
@@ -416,7 +461,7 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
wd->qdisc->flags &= ~TCQ_F_THROTTLED;
smp_wmb();
- __netif_schedule(wd->qdisc);
+ __netif_schedule(qdisc_root(wd->qdisc));
return HRTIMER_NORESTART;
}
@@ -433,6 +478,10 @@ void qdisc_watchdog_schedule(struct qdisc_watchdog *wd, psched_time_t expires)
{
ktime_t time;
+ if (test_bit(__QDISC_STATE_DEACTIVATED,
+ &qdisc_root_sleeping(wd->qdisc)->state))
+ return;
+
wd->qdisc->flags |= TCQ_F_THROTTLED;
time = ktime_set(0, 0);
time = ktime_add_ns(time, PSCHED_US2NS(expires));
@@ -575,7 +624,7 @@ static struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
struct Qdisc *oqdisc = dev_queue->qdisc_sleeping;
spinlock_t *root_lock;
- root_lock = qdisc_root_lock(oqdisc);
+ root_lock = qdisc_lock(oqdisc);
spin_lock_bh(root_lock);
/* Prune old scheduler */
@@ -586,7 +635,7 @@ static struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
if (qdisc == NULL)
qdisc = &noop_qdisc;
dev_queue->qdisc_sleeping = qdisc;
- dev_queue->qdisc = &noop_qdisc;
+ rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
spin_unlock_bh(root_lock);
@@ -627,11 +676,8 @@ static void notify_and_destroy(struct sk_buff *skb, struct nlmsghdr *n, u32 clid
if (new || old)
qdisc_notify(skb, n, clid, old, new);
- if (old) {
- spin_lock_bh(&old->q.lock);
+ if (old)
qdisc_destroy(old);
- spin_unlock_bh(&old->q.lock);
- }
}
/* Graft qdisc "new" to class "classid" of qdisc "parent" or
@@ -697,6 +743,10 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
return err;
}
+/* lockdep annotation is needed for ingress; egress gets it only for name */
+static struct lock_class_key qdisc_tx_lock;
+static struct lock_class_key qdisc_rx_lock;
+
/*
Allocate and initialize new qdisc.
@@ -757,6 +807,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
if (handle == TC_H_INGRESS) {
sch->flags |= TCQ_F_INGRESS;
handle = TC_H_MAKE(TC_H_INGRESS, 0);
+ lockdep_set_class(qdisc_lock(sch), &qdisc_rx_lock);
} else {
if (handle == 0) {
handle = qdisc_alloc_handle(dev);
@@ -764,6 +815,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
if (handle == 0)
goto err_out3;
}
+ lockdep_set_class(qdisc_lock(sch), &qdisc_tx_lock);
}
sch->handle = handle;
@@ -778,9 +830,16 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
sch->stab = stab;
}
if (tca[TCA_RATE]) {
+ spinlock_t *root_lock;
+
+ if ((sch->parent != TC_H_ROOT) &&
+ !(sch->flags & TCQ_F_INGRESS))
+ root_lock = qdisc_root_sleeping_lock(sch);
+ else
+ root_lock = qdisc_lock(sch);
+
err = gen_new_estimator(&sch->bstats, &sch->rate_est,
- qdisc_root_lock(sch),
- tca[TCA_RATE]);
+ root_lock, tca[TCA_RATE]);
if (err) {
/*
* Any broken qdiscs that would require
@@ -792,8 +851,8 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
goto err_out3;
}
}
- if ((parent != TC_H_ROOT) && !(sch->flags & TCQ_F_INGRESS))
- list_add_tail(&sch->list, &dev_queue->qdisc_sleeping->list);
+
+ qdisc_list_add(sch);
return sch;
}
@@ -832,7 +891,8 @@ static int qdisc_change(struct Qdisc *sch, struct nlattr **tca)
if (tca[TCA_RATE])
gen_replace_estimator(&sch->bstats, &sch->rate_est,
- qdisc_root_lock(sch), tca[TCA_RATE]);
+ qdisc_root_sleeping_lock(sch),
+ tca[TCA_RATE]);
return 0;
}
@@ -908,7 +968,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
return -ENOENT;
q = qdisc_leaf(p, clid);
} else { /* ingress */
- q = dev->rx_queue.qdisc;
+ q = dev->rx_queue.qdisc_sleeping;
}
} else {
struct netdev_queue *dev_queue;
@@ -978,7 +1038,7 @@ replay:
return -ENOENT;
q = qdisc_leaf(p, clid);
} else { /*ingress */
- q = dev->rx_queue.qdisc;
+ q = dev->rx_queue.qdisc_sleeping;
}
} else {
struct netdev_queue *dev_queue;
@@ -1074,20 +1134,13 @@ create_n_graft:
}
graft:
- if (1) {
- spinlock_t *root_lock;
-
- err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
- if (err) {
- if (q) {
- root_lock = qdisc_root_lock(q);
- spin_lock_bh(root_lock);
- qdisc_destroy(q);
- spin_unlock_bh(root_lock);
- }
- return err;
- }
+ err = qdisc_graft(dev, p, skb, n, clid, q, NULL);
+ if (err) {
+ if (q)
+ qdisc_destroy(q);
+ return err;
}
+
return 0;
}
@@ -1529,11 +1582,11 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
t = 0;
dev_queue = netdev_get_tx_queue(dev, 0);
- if (tc_dump_tclass_root(dev_queue->qdisc, skb, tcm, cb, &t, s_t) < 0)
+ if (tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0)
goto done;
dev_queue = &dev->rx_queue;
- if (tc_dump_tclass_root(dev_queue->qdisc, skb, tcm, cb, &t, s_t) < 0)
+ if (tc_dump_tclass_root(dev_queue->qdisc_sleeping, skb, tcm, cb, &t, s_t) < 0)
goto done;
done:
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index 4e261ce62f4..9b720adedea 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -521,6 +521,10 @@ static void cbq_ovl_delay(struct cbq_class *cl)
struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
psched_tdiff_t delay = cl->undertime - q->now;
+ if (test_bit(__QDISC_STATE_DEACTIVATED,
+ &qdisc_root_sleeping(cl->qdisc)->state))
+ return;
+
if (!cl->delayed) {
psched_time_t sched = q->now;
ktime_t expires;
@@ -654,7 +658,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
}
sch->flags &= ~TCQ_F_THROTTLED;
- __netif_schedule(sch);
+ __netif_schedule(qdisc_root(sch));
return HRTIMER_NORESTART;
}
@@ -1835,7 +1839,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
if (tca[TCA_RATE])
gen_replace_estimator(&cl->bstats, &cl->rate_est,
- qdisc_root_lock(sch),
+ qdisc_root_sleeping_lock(sch),
tca[TCA_RATE]);
return 0;
}
@@ -1926,7 +1930,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **t
if (tca[TCA_RATE])
gen_new_estimator(&cl->bstats, &cl->rate_est,
- qdisc_root_lock(sch), tca[TCA_RATE]);
+ qdisc_root_sleeping_lock(sch), tca[TCA_RATE]);
*arg = (unsigned long)cl;
return 0;
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 7cf83b37459..9634091ee2f 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -518,15 +518,17 @@ void qdisc_reset(struct Qdisc *qdisc)
}
EXPORT_SYMBOL(qdisc_reset);
-/* this is the rcu callback function to clean up a qdisc when there
- * are no further references to it */
-
-static void __qdisc_destroy(struct rcu_head *head)
+void qdisc_destroy(struct Qdisc *qdisc)
{
- struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu);
const struct Qdisc_ops *ops = qdisc->ops;
+ if (qdisc->flags & TCQ_F_BUILTIN ||
+ !atomic_dec_and_test(&qdisc->refcnt))
+ return;
+
#ifdef CONFIG_NET_SCHED
+ qdisc_list_del(qdisc);
+
qdisc_put_stab(qdisc->stab);
#endif
gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
@@ -542,20 +544,6 @@ static void __qdisc_destroy(struct rcu_head *head)
kfree((char *) qdisc - qdisc->padded);
}
-
-/* Under qdisc_lock(qdisc) and BH! */
-
-void qdisc_destroy(struct Qdisc *qdisc)
-{
- if (qdisc->flags & TCQ_F_BUILTIN ||
- !atomic_dec_and_test(&qdisc->refcnt))
- return;
-
- if (qdisc->parent)
- list_del(&qdisc->list);
-
- call_rcu(&qdisc->q_rcu, __qdisc_destroy);
-}
EXPORT_SYMBOL(qdisc_destroy);
static bool dev_all_qdisc_sleeping_noop(struct net_device *dev)
@@ -597,6 +585,9 @@ static void transition_one_qdisc(struct net_device *dev,
struct Qdisc *new_qdisc = dev_queue->qdisc_sleeping;
int *need_watchdog_p = _need_watchdog;
+ if (!(new_qdisc->flags & TCQ_F_BUILTIN))
+ clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
+
rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
if (need_watchdog_p && new_qdisc != &noqueue_qdisc)
*need_watchdog_p = 1;
@@ -640,14 +631,17 @@ static void dev_deactivate_queue(struct net_device *dev,
if (qdisc) {
spin_lock_bh(qdisc_lock(qdisc));
- dev_queue->qdisc = qdisc_default;
+ if (!(qdisc->flags & TCQ_F_BUILTIN))
+ set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state);
+
+ rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
qdisc_reset(qdisc);
spin_unlock_bh(qdisc_lock(qdisc));
}
}
-static bool some_qdisc_is_running(struct net_device *dev, int lock)
+static bool some_qdisc_is_busy(struct net_device *dev)
{
unsigned int i;
@@ -658,16 +652,15 @@ static bool some_qdisc_is_running(struct net_device *dev, int lock)
int val;
dev_queue = netdev_get_tx_queue(dev, i);
- q = dev_queue->qdisc;
+ q = dev_queue->qdisc_sleeping;
root_lock = qdisc_lock(q);
- if (lock)
- spin_lock_bh(root_lock);
+ spin_lock_bh(root_lock);
- val = test_bit(__QDISC_STATE_RUNNING, &q->state);
+ val = (test_bit(__QDISC_STATE_RUNNING, &q->state) ||
+ test_bit(__QDISC_STATE_SCHED, &q->state));
- if (lock)
- spin_unlock_bh(root_lock);
+ spin_unlock_bh(root_lock);
if (val)
return true;
@@ -677,8 +670,6 @@ static bool some_qdisc_is_running(struct net_device *dev, int lock)
void dev_deactivate(struct net_device *dev)
{
- bool running;
-
netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc);
dev_deactivate_queue(dev, &dev->rx_queue, &noop_qdisc);
@@ -688,25 +679,8 @@ void dev_deactivate(struct net_device *dev)
synchronize_rcu();
/* Wait for outstanding qdisc_run calls. */
- do {
- while (some_qdisc_is_running(dev, 0))
- yield();
-
- /*
- * Double-check inside queue lock to ensure that all effects
- * of the queue run are visible when we return.
- */
- running = some_qdisc_is_running(dev, 1);
-
- /*
- * The running flag should never be set at this point because
- * we've already set dev->qdisc to noop_qdisc *inside* the same
- * pair of spin locks. That is, if any qdisc_run starts after
- * our initial test it should see the noop_qdisc and then
- * clear the RUNNING bit before dropping the queue lock. So
- * if it is set here then we've found a bug.
- */
- } while (WARN_ON_ONCE(running));
+ while (some_qdisc_is_busy(dev))
+ yield();
}
static void dev_init_scheduler_queue(struct net_device *dev,
@@ -735,14 +709,10 @@ static void shutdown_scheduler_queue(struct net_device *dev,
struct Qdisc *qdisc_default = _qdisc_default;
if (qdisc) {
- spinlock_t *root_lock = qdisc_lock(qdisc);
-
- dev_queue->qdisc = qdisc_default;
+ rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
dev_queue->qdisc_sleeping = qdisc_default;
- spin_lock_bh(root_lock);
qdisc_destroy(qdisc);
- spin_unlock_bh(root_lock);
}
}
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index c2b8d9cce3d..c1e77da8cd0 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1045,7 +1045,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (tca[TCA_RATE])
gen_replace_estimator(&cl->bstats, &cl->rate_est,
- qdisc_root_lock(sch),
+ qdisc_root_sleeping_lock(sch),
tca[TCA_RATE]);
return 0;
}
@@ -1104,7 +1104,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (tca[TCA_RATE])
gen_new_estimator(&cl->bstats, &cl->rate_est,
- qdisc_root_lock(sch), tca[TCA_RATE]);
+ qdisc_root_sleeping_lock(sch), tca[TCA_RATE]);
*arg = (unsigned long)cl;
return 0;
}
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index be35422711a..97d4761cc31 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -577,7 +577,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
sch->qstats.drops++;
cl->qstats.drops++;
}
- return NET_XMIT_DROP;
+ return ret;
} else {
cl->bstats.packets +=
skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
@@ -623,7 +623,7 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
sch->qstats.drops++;
cl->qstats.drops++;
}
- return NET_XMIT_DROP;
+ return ret;
} else
htb_activate(q, cl);
@@ -1279,7 +1279,8 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
/* delete from hash and active; remainder in destroy_class */
qdisc_class_hash_remove(&q->clhash, &cl->common);
- cl->parent->children--;
+ if (cl->parent)
+ cl->parent->children--;
if (cl->prio_activity)
htb_deactivate(q, cl);
@@ -1371,7 +1372,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
goto failure;
gen_new_estimator(&cl->bstats, &cl->rate_est,
- qdisc_root_lock(sch),
+ qdisc_root_sleeping_lock(sch),
tca[TCA_RATE] ? : &est.nla);
cl->refcnt = 1;
cl->children = 0;
@@ -1426,7 +1427,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
} else {
if (tca[TCA_RATE])
gen_replace_estimator(&cl->bstats, &cl->rate_est,
- qdisc_root_lock(sch),
+ qdisc_root_sleeping_lock(sch),
tca[TCA_RATE]);
sch_tree_lock(sch);
}
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index eac197610ed..a6697c686c7 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -113,11 +113,11 @@ prio_requeue(struct sk_buff *skb, struct Qdisc* sch)
if ((ret = qdisc->ops->requeue(skb, qdisc)) == NET_XMIT_SUCCESS) {
sch->q.qlen++;
sch->qstats.requeues++;
- return 0;
+ return NET_XMIT_SUCCESS;
}
if (net_xmit_drop_count(ret))
sch->qstats.drops++;
- return NET_XMIT_DROP;
+ return ret;
}
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 7d3b7ff3bf0..94c61598b86 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -123,15 +123,8 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
struct tbf_sched_data *q = qdisc_priv(sch);
int ret;
- if (qdisc_pkt_len(skb) > q->max_size) {
- sch->qstats.drops++;
-#ifdef CONFIG_NET_CLS_ACT
- if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
-#endif
- kfree_skb(skb);
-
- return NET_XMIT_DROP;
- }
+ if (qdisc_pkt_len(skb) > q->max_size)
+ return qdisc_reshape_fail(skb, sch);
ret = qdisc_enqueue(skb, q->qdisc);
if (ret != 0) {
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 675a5c3e68a..52db5f60daa 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -80,6 +80,10 @@ static struct sctp_auth_bytes *sctp_auth_create_key(__u32 key_len, gfp_t gfp)
{
struct sctp_auth_bytes *key;
+ /* Verify that we are not going to overflow INT_MAX */
+ if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes))
+ return NULL;
+
/* Allocate the shared key */
key = kmalloc(sizeof(struct sctp_auth_bytes) + key_len, gfp);
if (!key)
@@ -782,6 +786,9 @@ int sctp_auth_ep_set_hmacs(struct sctp_endpoint *ep,
for (i = 0; i < hmacs->shmac_num_idents; i++) {
id = hmacs->shmac_idents[i];
+ if (id > SCTP_AUTH_HMAC_ID_MAX)
+ return -EOPNOTSUPP;
+
if (SCTP_AUTH_HMAC_ID_SHA1 == id)
has_sha1 = 1;
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index e39a0cdef18..4c8d9f45ce0 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -103,6 +103,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
/* Initialize the CHUNKS parameter */
auth_chunks->param_hdr.type = SCTP_PARAM_CHUNKS;
+ auth_chunks->param_hdr.length = htons(sizeof(sctp_paramhdr_t));
/* If the Add-IP functionality is enabled, we must
* authenticate, ASCONF and ASCONF-ACK chunks
@@ -110,8 +111,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
if (sctp_addip_enable) {
auth_chunks->chunks[0] = SCTP_CID_ASCONF;
auth_chunks->chunks[1] = SCTP_CID_ASCONF_ACK;
- auth_chunks->param_hdr.length =
- htons(sizeof(sctp_paramhdr_t) + 2);
+ auth_chunks->param_hdr.length += htons(2);
}
}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 483a01d0740..47f91afa021 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -319,7 +319,8 @@ static void sctp_v6_get_saddr(struct sctp_sock *sk,
__func__, asoc, dst, NIP6(daddr->v6.sin6_addr));
if (!asoc) {
- ipv6_dev_get_saddr(dst ? ip6_dst_idev(dst)->dev : NULL,
+ ipv6_dev_get_saddr(sock_net(sctp_opt2sk(sk)),
+ dst ? ip6_dst_idev(dst)->dev : NULL,
&daddr->v6.sin6_addr,
inet6_sk(&sk->inet.sk)->srcprefs,
&saddr->v6.sin6_addr);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index dbb79adf8f3..5ffb9dec1c3 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -3055,6 +3055,9 @@ static int sctp_setsockopt_auth_chunk(struct sock *sk,
{
struct sctp_authchunk val;
+ if (!sctp_auth_enable)
+ return -EACCES;
+
if (optlen != sizeof(struct sctp_authchunk))
return -EINVAL;
if (copy_from_user(&val, optval, optlen))
@@ -3083,8 +3086,12 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
int optlen)
{
struct sctp_hmacalgo *hmacs;
+ u32 idents;
int err;
+ if (!sctp_auth_enable)
+ return -EACCES;
+
if (optlen < sizeof(struct sctp_hmacalgo))
return -EINVAL;
@@ -3097,8 +3104,9 @@ static int sctp_setsockopt_hmac_ident(struct sock *sk,
goto out;
}
- if (hmacs->shmac_num_idents == 0 ||
- hmacs->shmac_num_idents > SCTP_AUTH_NUM_HMACS) {
+ idents = hmacs->shmac_num_idents;
+ if (idents == 0 || idents > SCTP_AUTH_NUM_HMACS ||
+ (idents * sizeof(u16)) > (optlen - sizeof(struct sctp_hmacalgo))) {
err = -EINVAL;
goto out;
}
@@ -3123,6 +3131,9 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
struct sctp_association *asoc;
int ret;
+ if (!sctp_auth_enable)
+ return -EACCES;
+
if (optlen <= sizeof(struct sctp_authkey))
return -EINVAL;
@@ -3135,6 +3146,11 @@ static int sctp_setsockopt_auth_key(struct sock *sk,
goto out;
}
+ if (authkey->sca_keylength > optlen - sizeof(struct sctp_authkey)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
asoc = sctp_id2assoc(sk, authkey->sca_assoc_id);
if (!asoc && authkey->sca_assoc_id && sctp_style(sk, UDP)) {
ret = -EINVAL;
@@ -3160,6 +3176,9 @@ static int sctp_setsockopt_active_key(struct sock *sk,
struct sctp_authkeyid val;
struct sctp_association *asoc;
+ if (!sctp_auth_enable)
+ return -EACCES;
+
if (optlen != sizeof(struct sctp_authkeyid))
return -EINVAL;
if (copy_from_user(&val, optval, optlen))
@@ -3185,6 +3204,9 @@ static int sctp_setsockopt_del_key(struct sock *sk,
struct sctp_authkeyid val;
struct sctp_association *asoc;
+ if (!sctp_auth_enable)
+ return -EACCES;
+
if (optlen != sizeof(struct sctp_authkeyid))
return -EINVAL;
if (copy_from_user(&val, optval, optlen))
@@ -5197,19 +5219,29 @@ static int sctp_getsockopt_maxburst(struct sock *sk, int len,
static int sctp_getsockopt_hmac_ident(struct sock *sk, int len,
char __user *optval, int __user *optlen)
{
+ struct sctp_hmacalgo __user *p = (void __user *)optval;
struct sctp_hmac_algo_param *hmacs;
- __u16 param_len;
+ __u16 data_len = 0;
+ u32 num_idents;
+
+ if (!sctp_auth_enable)
+ return -EACCES;
hmacs = sctp_sk(sk)->ep->auth_hmacs_list;
- param_len = ntohs(hmacs->param_hdr.length);
+ data_len = ntohs(hmacs->param_hdr.length) - sizeof(sctp_paramhdr_t);
- if (len < param_len)
+ if (len < sizeof(struct sctp_hmacalgo) + data_len)
return -EINVAL;
+
+ len = sizeof(struct sctp_hmacalgo) + data_len;
+ num_idents = data_len / sizeof(u16);
+
if (put_user(len, optlen))
return -EFAULT;
- if (copy_to_user(optval, hmacs->hmac_ids, len))
+ if (put_user(num_idents, &p->shmac_num_idents))
+ return -EFAULT;
+ if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len))
return -EFAULT;
-
return 0;
}
@@ -5219,6 +5251,9 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
struct sctp_authkeyid val;
struct sctp_association *asoc;
+ if (!sctp_auth_enable)
+ return -EACCES;
+
if (len < sizeof(struct sctp_authkeyid))
return -EINVAL;
if (copy_from_user(&val, optval, sizeof(struct sctp_authkeyid)))
@@ -5233,6 +5268,12 @@ static int sctp_getsockopt_active_key(struct sock *sk, int len,
else
val.scact_keynumber = sctp_sk(sk)->ep->active_key_id;
+ len = sizeof(struct sctp_authkeyid);
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (copy_to_user(optval, &val, len))
+ return -EFAULT;
+
return 0;
}
@@ -5243,13 +5284,16 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
struct sctp_authchunks val;
struct sctp_association *asoc;
struct sctp_chunks_param *ch;
- u32 num_chunks;
+ u32 num_chunks = 0;
char __user *to;
- if (len <= sizeof(struct sctp_authchunks))
+ if (!sctp_auth_enable)
+ return -EACCES;
+
+ if (len < sizeof(struct sctp_authchunks))
return -EINVAL;
- if (copy_from_user(&val, p, sizeof(struct sctp_authchunks)))
+ if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
return -EFAULT;
to = p->gauth_chunks;
@@ -5258,20 +5302,21 @@ static int sctp_getsockopt_peer_auth_chunks(struct sock *sk, int len,
return -EINVAL;
ch = asoc->peer.peer_chunks;
+ if (!ch)
+ goto num;
/* See if the user provided enough room for all the data */
num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t);
if (len < num_chunks)
return -EINVAL;
- len = num_chunks;
- if (put_user(len, optlen))
+ if (copy_to_user(to, ch->chunks, num_chunks))
return -EFAULT;
+num:
+ len = sizeof(struct sctp_authchunks) + num_chunks;
+ if (put_user(len, optlen)) return -EFAULT;
if (put_user(num_chunks, &p->gauth_number_of_chunks))
return -EFAULT;
- if (copy_to_user(to, ch->chunks, len))
- return -EFAULT;
-
return 0;
}
@@ -5282,13 +5327,16 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
struct sctp_authchunks val;
struct sctp_association *asoc;
struct sctp_chunks_param *ch;
- u32 num_chunks;
+ u32 num_chunks = 0;
char __user *to;
- if (len <= sizeof(struct sctp_authchunks))
+ if (!sctp_auth_enable)
+ return -EACCES;
+
+ if (len < sizeof(struct sctp_authchunks))
return -EINVAL;
- if (copy_from_user(&val, p, sizeof(struct sctp_authchunks)))
+ if (copy_from_user(&val, optval, sizeof(struct sctp_authchunks)))
return -EFAULT;
to = p->gauth_chunks;
@@ -5301,17 +5349,21 @@ static int sctp_getsockopt_local_auth_chunks(struct sock *sk, int len,
else
ch = sctp_sk(sk)->ep->auth_chunk_list;
+ if (!ch)
+ goto num;
+
num_chunks = ntohs(ch->param_hdr.length) - sizeof(sctp_paramhdr_t);
- if (len < num_chunks)
+ if (len < sizeof(struct sctp_authchunks) + num_chunks)
return -EINVAL;
- len = num_chunks;
+ if (copy_to_user(to, ch->chunks, num_chunks))
+ return -EFAULT;
+num:
+ len = sizeof(struct sctp_authchunks) + num_chunks;
if (put_user(len, optlen))
return -EFAULT;
if (put_user(num_chunks, &p->gauth_number_of_chunks))
return -EFAULT;
- if (copy_to_user(to, ch->chunks, len))
- return -EFAULT;
return 0;
}
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 0326d3060bc..0747d8a9232 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -85,7 +85,7 @@ static struct top_srv topsrv = { 0 };
static u32 htohl(u32 in, int swap)
{
- return swap ? (u32)___constant_swab32(in) : in;
+ return swap ? swab32(in) : in;
}
/**
diff --git a/net/wireless/wext.c b/net/wireless/wext.c
index df5b3886c36..d98ffb75119 100644
--- a/net/wireless/wext.c
+++ b/net/wireless/wext.c
@@ -1277,6 +1277,7 @@ static int rtnetlink_fill_iwinfo(struct sk_buff *skb, struct net_device *dev,
r->ifi_flags = dev_get_flags(dev);
r->ifi_change = 0; /* Wireless changes don't affect those flags */
+ NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name);
/* Add the wireless events in the netlink packet */
NLA_PUT(skb, IFLA_WIRELESS, event_len, event);
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 3f964db908a..ac25b4c0e98 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -112,16 +112,13 @@ error_nolock:
int xfrm_output_resume(struct sk_buff *skb, int err)
{
while (likely((err = xfrm_output_one(skb, err)) == 0)) {
- struct xfrm_state *x;
-
nf_reset(skb);
err = skb->dst->ops->local_out(skb);
if (unlikely(err != 1))
goto out;
- x = skb->dst->xfrm;
- if (!x)
+ if (!skb->dst->xfrm)
return dst_output(skb);
err = nf_hook(skb->dst->ops->family,