summaryrefslogtreecommitdiff
path: root/net/netfilter
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-20 21:04:47 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-20 21:04:47 -0700
commit3b59bf081622b6446db77ad06c93fe23677bc533 (patch)
tree3f4bb5a27c90cc86994a1f6d3c53fbf9208003cb /net/netfilter
parente45836fafe157df137a837093037f741ad8f4c90 (diff)
parentbbdb32cb5b73597386913d052165423b9d736145 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking merge from David Miller: "1) Move ixgbe driver over to purely page based buffering on receive. From Alexander Duyck. 2) Add receive packet steering support to e1000e, from Bruce Allan. 3) Convert TCP MD5 support over to RCU, from Eric Dumazet. 4) Reduce cpu usage in handling out-of-order TCP packets on modern systems, also from Eric Dumazet. 5) Support the IP{,V6}_UNICAST_IF socket options, making the wine folks happy, from Erich Hoover. 6) Support VLAN trunking from guests in hyperv driver, from Haiyang Zhang. 7) Support byte-queue-limtis in r8169, from Igor Maravic. 8) Outline code intended for IP_RECVTOS in IP_PKTOPTIONS existed but was never properly implemented, Jiri Benc fixed that. 9) 64-bit statistics support in r8169 and 8139too, from Junchang Wang. 10) Support kernel side dump filtering by ctmark in netfilter ctnetlink, from Pablo Neira Ayuso. 11) Support byte-queue-limits in gianfar driver, from Paul Gortmaker. 12) Add new peek socket options to assist with socket migration, from Pavel Emelyanov. 13) Add sch_plug packet scheduler whose queue is controlled by userland daemons using explicit freeze and release commands. From Shriram Rajagopalan. 14) Fix FCOE checksum offload handling on transmit, from Yi Zou." * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1846 commits) Fix pppol2tp getsockname() Remove printk from rds_sendmsg ipv6: fix incorrent ipv6 ipsec packet fragment cpsw: Hook up default ndo_change_mtu. net: qmi_wwan: fix build error due to cdc-wdm dependecy netdev: driver: ethernet: Add TI CPSW driver netdev: driver: ethernet: add cpsw address lookup engine support phy: add am79c874 PHY support mlx4_core: fix race on comm channel bonding: send igmp report for its master fs_enet: Add MPC5125 FEC support and PHY interface selection net: bpf_jit: fix BPF_S_LDX_B_MSH compilation net: update the usage of CHECKSUM_UNNECESSARY fcoe: use CHECKSUM_UNNECESSARY instead of CHECKSUM_PARTIAL on tx net: do not do gso for CHECKSUM_UNNECESSARY in netif_needs_gso ixgbe: Fix issues with SR-IOV loopback when flow control is disabled net/hyperv: Fix the code handling tx busy ixgbe: fix namespace issues when FCoE/DCB is not enabled rtlwifi: Remove unused ETH_ADDR_LEN defines igbvf: Use ETH_ALEN ... Fix up fairly trivial conflicts in drivers/isdn/gigaset/interface.c and drivers/net/usb/{Kconfig,qmi_wwan.c} as per David.
Diffstat (limited to 'net/netfilter')
-rw-r--r--net/netfilter/Kconfig30
-rw-r--r--net/netfilter/Makefile3
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ip.c4
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_ipmac.c4
-rw-r--r--net/netfilter/ipset/ip_set_bitmap_port.c4
-rw-r--r--net/netfilter/ipset/ip_set_core.c26
-rw-r--r--net/netfilter/ipset/ip_set_getport.c4
-rw-r--r--net/netfilter/ipset/ip_set_hash_ip.c18
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipport.c10
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportip.c10
-rw-r--r--net/netfilter/ipset/ip_set_hash_ipportnet.c147
-rw-r--r--net/netfilter/ipset/ip_set_hash_net.c89
-rw-r--r--net/netfilter/ipset/ip_set_hash_netiface.c84
-rw-r--r--net/netfilter/ipset/ip_set_hash_netport.c150
-rw-r--r--net/netfilter/ipset/ip_set_list_set.c2
-rw-r--r--net/netfilter/nf_conntrack_core.c34
-rw-r--r--net/netfilter/nf_conntrack_ecache.c55
-rw-r--r--net/netfilter/nf_conntrack_helper.c54
-rw-r--r--net/netfilter/nf_conntrack_netlink.c218
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c86
-rw-r--r--net/netfilter/nf_conntrack_proto_generic.c77
-rw-r--r--net/netfilter/nf_conntrack_proto_gre.c82
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c83
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c168
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c106
-rw-r--r--net/netfilter/nf_conntrack_proto_udplite.c103
-rw-r--r--net/netfilter/nf_conntrack_timeout.c60
-rw-r--r--net/netfilter/nfnetlink_acct.c6
-rw-r--r--net/netfilter/nfnetlink_cttimeout.c429
-rw-r--r--net/netfilter/xt_CT.c220
-rw-r--r--net/netfilter/xt_LOG.c925
31 files changed, 2996 insertions, 295 deletions
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index f8ac4ef0b79..0c6f67e8f2e 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -103,6 +103,16 @@ config NF_CONNTRACK_EVENTS
If unsure, say `N'.
+config NF_CONNTRACK_TIMEOUT
+ bool 'Connection tracking timeout'
+ depends on NETFILTER_ADVANCED
+ help
+ This option enables support for connection tracking timeout
+ extension. This allows you to attach timeout policies to flow
+ via the CT target.
+
+ If unsure, say `N'.
+
config NF_CONNTRACK_TIMESTAMP
bool 'Connection tracking timestamping'
depends on NETFILTER_ADVANCED
@@ -314,6 +324,17 @@ config NF_CT_NETLINK
help
This option enables support for a netlink-based userspace interface
+config NF_CT_NETLINK_TIMEOUT
+ tristate 'Connection tracking timeout tuning via Netlink'
+ select NETFILTER_NETLINK
+ depends on NETFILTER_ADVANCED
+ help
+ This option enables support for connection tracking timeout
+ fine-grain tuning. This allows you to attach specific timeout
+ policies to flows, instead of using the global timeout policy.
+
+ If unsure, say `N'.
+
endif # NF_CONNTRACK
# transparent proxy support
@@ -524,6 +545,15 @@ config NETFILTER_XT_TARGET_LED
For more information on the LEDs available on your system, see
Documentation/leds/leds-class.txt
+config NETFILTER_XT_TARGET_LOG
+ tristate "LOG target support"
+ default m if NETFILTER_ADVANCED=n
+ help
+ This option adds a `LOG' target, which allows you to create rules in
+ any iptables table which records the packet header to the syslog.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config NETFILTER_XT_TARGET_MARK
tristate '"MARK" target support'
depends on NETFILTER_ADVANCED
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 40f4c3d636c..ca3676586f5 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -1,6 +1,7 @@
netfilter-objs := core.o nf_log.o nf_queue.o nf_sockopt.o
nf_conntrack-y := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_expect.o nf_conntrack_helper.o nf_conntrack_proto.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o nf_conntrack_extend.o nf_conntrack_acct.o
+nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMEOUT) += nf_conntrack_timeout.o
nf_conntrack-$(CONFIG_NF_CONNTRACK_TIMESTAMP) += nf_conntrack_timestamp.o
nf_conntrack-$(CONFIG_NF_CONNTRACK_EVENTS) += nf_conntrack_ecache.o
@@ -22,6 +23,7 @@ obj-$(CONFIG_NF_CT_PROTO_UDPLITE) += nf_conntrack_proto_udplite.o
# netlink interface for nf_conntrack
obj-$(CONFIG_NF_CT_NETLINK) += nf_conntrack_netlink.o
+obj-$(CONFIG_NF_CT_NETLINK_TIMEOUT) += nfnetlink_cttimeout.o
# connection tracking helpers
nf_conntrack_h323-objs := nf_conntrack_h323_main.o nf_conntrack_h323_asn1.o
@@ -58,6 +60,7 @@ obj-$(CONFIG_NETFILTER_XT_TARGET_CT) += xt_CT.o
obj-$(CONFIG_NETFILTER_XT_TARGET_DSCP) += xt_DSCP.o
obj-$(CONFIG_NETFILTER_XT_TARGET_HL) += xt_HL.o
obj-$(CONFIG_NETFILTER_XT_TARGET_LED) += xt_LED.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_LOG) += xt_LOG.o
obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o
obj-$(CONFIG_NETFILTER_XT_TARGET_NFQUEUE) += xt_NFQUEUE.o
obj-$(CONFIG_NETFILTER_XT_TARGET_NOTRACK) += xt_NOTRACK.o
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index e3e73997c3b..a72a4dff003 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -442,7 +442,7 @@ init_map_ip(struct ip_set *set, struct bitmap_ip *map,
map->timeout = IPSET_NO_TIMEOUT;
set->data = map;
- set->family = AF_INET;
+ set->family = NFPROTO_IPV4;
return true;
}
@@ -550,7 +550,7 @@ static struct ip_set_type bitmap_ip_type __read_mostly = {
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP,
.dimension = IPSET_DIM_ONE,
- .family = AF_INET,
+ .family = NFPROTO_IPV4,
.revision_min = 0,
.revision_max = 0,
.create = bitmap_ip_create,
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 56096f54497..81324c12c5b 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -543,7 +543,7 @@ init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map,
map->timeout = IPSET_NO_TIMEOUT;
set->data = map;
- set->family = AF_INET;
+ set->family = NFPROTO_IPV4;
return true;
}
@@ -623,7 +623,7 @@ static struct ip_set_type bitmap_ipmac_type = {
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP | IPSET_TYPE_MAC,
.dimension = IPSET_DIM_TWO,
- .family = AF_INET,
+ .family = NFPROTO_IPV4,
.revision_min = 0,
.revision_max = 0,
.create = bitmap_ipmac_create,
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index 29ba93bb94b..382ec28ba72 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -422,7 +422,7 @@ init_map_port(struct ip_set *set, struct bitmap_port *map,
map->timeout = IPSET_NO_TIMEOUT;
set->data = map;
- set->family = AF_UNSPEC;
+ set->family = NFPROTO_UNSPEC;
return true;
}
@@ -483,7 +483,7 @@ static struct ip_set_type bitmap_port_type = {
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_PORT,
.dimension = IPSET_DIM_ONE,
- .family = AF_UNSPEC,
+ .family = NFPROTO_UNSPEC,
.revision_min = 0,
.revision_max = 0,
.create = bitmap_port_create,
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 32dbf0fa89d..e6c1c9605a5 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -69,7 +69,7 @@ find_set_type(const char *name, u8 family, u8 revision)
list_for_each_entry_rcu(type, &ip_set_type_list, list)
if (STREQ(type->name, name) &&
- (type->family == family || type->family == AF_UNSPEC) &&
+ (type->family == family || type->family == NFPROTO_UNSPEC) &&
revision >= type->revision_min &&
revision <= type->revision_max)
return type;
@@ -149,7 +149,7 @@ __find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max,
rcu_read_lock();
list_for_each_entry_rcu(type, &ip_set_type_list, list)
if (STREQ(type->name, name) &&
- (type->family == family || type->family == AF_UNSPEC)) {
+ (type->family == family || type->family == NFPROTO_UNSPEC)) {
found = true;
if (type->revision_min < *min)
*min = type->revision_min;
@@ -164,8 +164,8 @@ __find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max,
__find_set_type_minmax(name, family, min, max, true);
}
-#define family_name(f) ((f) == AF_INET ? "inet" : \
- (f) == AF_INET6 ? "inet6" : "any")
+#define family_name(f) ((f) == NFPROTO_IPV4 ? "inet" : \
+ (f) == NFPROTO_IPV6 ? "inet6" : "any")
/* Register a set type structure. The type is identified by
* the unique triple of name, family and revision.
@@ -354,7 +354,7 @@ ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
pr_debug("set %s, index %u\n", set->name, index);
if (opt->dim < set->type->dimension ||
- !(opt->family == set->family || set->family == AF_UNSPEC))
+ !(opt->family == set->family || set->family == NFPROTO_UNSPEC))
return 0;
read_lock_bh(&set->lock);
@@ -387,7 +387,7 @@ ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
pr_debug("set %s, index %u\n", set->name, index);
if (opt->dim < set->type->dimension ||
- !(opt->family == set->family || set->family == AF_UNSPEC))
+ !(opt->family == set->family || set->family == NFPROTO_UNSPEC))
return 0;
write_lock_bh(&set->lock);
@@ -410,7 +410,7 @@ ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
pr_debug("set %s, index %u\n", set->name, index);
if (opt->dim < set->type->dimension ||
- !(opt->family == set->family || set->family == AF_UNSPEC))
+ !(opt->family == set->family || set->family == NFPROTO_UNSPEC))
return 0;
write_lock_bh(&set->lock);
@@ -575,7 +575,7 @@ start_msg(struct sk_buff *skb, u32 pid, u32 seq, unsigned int flags,
return NULL;
nfmsg = nlmsg_data(nlh);
- nfmsg->nfgen_family = AF_INET;
+ nfmsg->nfgen_family = NFPROTO_IPV4;
nfmsg->version = NFNETLINK_V0;
nfmsg->res_id = 0;
@@ -1162,9 +1162,13 @@ ip_set_dump(struct sock *ctnl, struct sk_buff *skb,
if (unlikely(protocol_failed(attr)))
return -IPSET_ERR_PROTOCOL;
- return netlink_dump_start(ctnl, skb, nlh,
- ip_set_dump_start,
- ip_set_dump_done, 0);
+ {
+ struct netlink_dump_control c = {
+ .dump = ip_set_dump_start,
+ .done = ip_set_dump_done,
+ };
+ return netlink_dump_start(ctnl, skb, nlh, &c);
+ }
}
/* Add, del and test */
diff --git a/net/netfilter/ipset/ip_set_getport.c b/net/netfilter/ipset/ip_set_getport.c
index 1f03556666f..6fdf88ae235 100644
--- a/net/netfilter/ipset/ip_set_getport.c
+++ b/net/netfilter/ipset/ip_set_getport.c
@@ -136,10 +136,10 @@ ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src, __be16 *port)
u8 proto;
switch (pf) {
- case AF_INET:
+ case NFPROTO_IPV4:
ret = ip_set_get_ip4_port(skb, src, port, &proto);
break;
- case AF_INET6:
+ case NFPROTO_IPV6:
ret = ip_set_get_ip6_port(skb, src, port, &proto);
break;
default:
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c
index 4015fcaf87b..5139dea6019 100644
--- a/net/netfilter/ipset/ip_set_hash_ip.c
+++ b/net/netfilter/ipset/ip_set_hash_ip.c
@@ -366,11 +366,11 @@ hash_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
u8 netmask, hbits;
struct ip_set_hash *h;
- if (!(set->family == AF_INET || set->family == AF_INET6))
+ if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
- netmask = set->family == AF_INET ? 32 : 128;
+ netmask = set->family == NFPROTO_IPV4 ? 32 : 128;
pr_debug("Create set %s with family %s\n",
- set->name, set->family == AF_INET ? "inet" : "inet6");
+ set->name, set->family == NFPROTO_IPV4 ? "inet" : "inet6");
if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_MAXELEM) ||
@@ -389,8 +389,8 @@ hash_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
if (tb[IPSET_ATTR_NETMASK]) {
netmask = nla_get_u8(tb[IPSET_ATTR_NETMASK]);
- if ((set->family == AF_INET && netmask > 32) ||
- (set->family == AF_INET6 && netmask > 128) ||
+ if ((set->family == NFPROTO_IPV4 && netmask > 32) ||
+ (set->family == NFPROTO_IPV6 && netmask > 128) ||
netmask == 0)
return -IPSET_ERR_INVALID_NETMASK;
}
@@ -419,15 +419,15 @@ hash_ip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
if (tb[IPSET_ATTR_TIMEOUT]) {
h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
- set->variant = set->family == AF_INET
+ set->variant = set->family == NFPROTO_IPV4
? &hash_ip4_tvariant : &hash_ip6_tvariant;
- if (set->family == AF_INET)
+ if (set->family == NFPROTO_IPV4)
hash_ip4_gc_init(set);
else
hash_ip6_gc_init(set);
} else {
- set->variant = set->family == AF_INET
+ set->variant = set->family == NFPROTO_IPV4
? &hash_ip4_variant : &hash_ip6_variant;
}
@@ -443,7 +443,7 @@ static struct ip_set_type hash_ip_type __read_mostly = {
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP,
.dimension = IPSET_DIM_ONE,
- .family = AF_UNSPEC,
+ .family = NFPROTO_UNSPEC,
.revision_min = 0,
.revision_max = 0,
.create = hash_ip_create,
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c
index 37d667e3f6f..9c27e249c17 100644
--- a/net/netfilter/ipset/ip_set_hash_ipport.c
+++ b/net/netfilter/ipset/ip_set_hash_ipport.c
@@ -450,7 +450,7 @@ hash_ipport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
u8 hbits;
- if (!(set->family == AF_INET || set->family == AF_INET6))
+ if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
@@ -490,15 +490,15 @@ hash_ipport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
if (tb[IPSET_ATTR_TIMEOUT]) {
h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
- set->variant = set->family == AF_INET
+ set->variant = set->family == NFPROTO_IPV4
? &hash_ipport4_tvariant : &hash_ipport6_tvariant;
- if (set->family == AF_INET)
+ if (set->family == NFPROTO_IPV4)
hash_ipport4_gc_init(set);
else
hash_ipport6_gc_init(set);
} else {
- set->variant = set->family == AF_INET
+ set->variant = set->family == NFPROTO_IPV4
? &hash_ipport4_variant : &hash_ipport6_variant;
}
@@ -514,7 +514,7 @@ static struct ip_set_type hash_ipport_type __read_mostly = {
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP | IPSET_TYPE_PORT,
.dimension = IPSET_DIM_TWO,
- .family = AF_UNSPEC,
+ .family = NFPROTO_UNSPEC,
.revision_min = 0,
.revision_max = 1, /* SCTP and UDPLITE support added */
.create = hash_ipport_create,
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c
index e69e2718fbe..9134057c072 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportip.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportip.c
@@ -468,7 +468,7 @@ hash_ipportip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
u8 hbits;
- if (!(set->family == AF_INET || set->family == AF_INET6))
+ if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
@@ -508,15 +508,15 @@ hash_ipportip_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
if (tb[IPSET_ATTR_TIMEOUT]) {
h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
- set->variant = set->family == AF_INET
+ set->variant = set->family == NFPROTO_IPV4
? &hash_ipportip4_tvariant : &hash_ipportip6_tvariant;
- if (set->family == AF_INET)
+ if (set->family == NFPROTO_IPV4)
hash_ipportip4_gc_init(set);
else
hash_ipportip6_gc_init(set);
} else {
- set->variant = set->family == AF_INET
+ set->variant = set->family == NFPROTO_IPV4
? &hash_ipportip4_variant : &hash_ipportip6_variant;
}
@@ -532,7 +532,7 @@ static struct ip_set_type hash_ipportip_type __read_mostly = {
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
.dimension = IPSET_DIM_THREE,
- .family = AF_UNSPEC,
+ .family = NFPROTO_UNSPEC,
.revision_min = 0,
.revision_max = 1, /* SCTP and UDPLITE support added */
.create = hash_ipportip_create,
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c
index 64199b4e93c..5d05e696986 100644
--- a/net/netfilter/ipset/ip_set_hash_ipportnet.c
+++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c
@@ -41,12 +41,19 @@ hash_ipportnet_same_set(const struct ip_set *a, const struct ip_set *b);
/* The type variant functions: IPv4 */
+/* We squeeze the "nomatch" flag into cidr: we don't support cidr == 0
+ * However this way we have to store internally cidr - 1,
+ * dancing back and forth.
+ */
+#define IP_SET_HASH_WITH_NETS_PACKED
+
/* Member elements without timeout */
struct hash_ipportnet4_elem {
__be32 ip;
__be32 ip2;
__be16 port;
- u8 cidr;
+ u8 cidr:7;
+ u8 nomatch:1;
u8 proto;
};
@@ -55,7 +62,8 @@ struct hash_ipportnet4_telem {
__be32 ip;
__be32 ip2;
__be16 port;
- u8 cidr;
+ u8 cidr:7;
+ u8 nomatch:1;
u8 proto;
unsigned long timeout;
};
@@ -86,10 +94,22 @@ hash_ipportnet4_data_copy(struct hash_ipportnet4_elem *dst,
}
static inline void
+hash_ipportnet4_data_flags(struct hash_ipportnet4_elem *dst, u32 flags)
+{
+ dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
+}
+
+static inline bool
+hash_ipportnet4_data_match(const struct hash_ipportnet4_elem *elem)
+{
+ return !elem->nomatch;
+}
+
+static inline void
hash_ipportnet4_data_netmask(struct hash_ipportnet4_elem *elem, u8 cidr)
{
elem->ip2 &= ip_set_netmask(cidr);
- elem->cidr = cidr;
+ elem->cidr = cidr - 1;
}
static inline void
@@ -102,11 +122,15 @@ static bool
hash_ipportnet4_data_list(struct sk_buff *skb,
const struct hash_ipportnet4_elem *data)
{
+ u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
+
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, data->ip2);
NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
- NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ if (flags)
+ NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
nla_put_failure:
@@ -119,14 +143,17 @@ hash_ipportnet4_data_tlist(struct sk_buff *skb,
{
const struct hash_ipportnet4_telem *tdata =
(const struct hash_ipportnet4_telem *)data;
+ u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP2, tdata->ip2);
NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
- NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(tdata->timeout)));
+ if (flags)
+ NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
@@ -158,13 +185,11 @@ hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportnet4_elem data = {
- .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
+ .cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1
};
- if (data.cidr == 0)
- return -EINVAL;
if (adt == IPSET_TEST)
- data.cidr = HOST_MASK;
+ data.cidr = HOST_MASK - 1;
if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&data.port, &data.proto))
@@ -172,7 +197,7 @@ hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb,
ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip);
ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2);
- data.ip2 &= ip_set_netmask(data.cidr);
+ data.ip2 &= ip_set_netmask(data.cidr + 1);
return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
}
@@ -183,17 +208,19 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
{
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
- struct hash_ipportnet4_elem data = { .cidr = HOST_MASK };
+ struct hash_ipportnet4_elem data = { .cidr = HOST_MASK - 1 };
u32 ip, ip_to = 0, p = 0, port, port_to;
u32 ip2_from = 0, ip2_to, ip2_last, ip2;
u32 timeout = h->timeout;
bool with_ports = false;
+ u8 cidr;
int ret;
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
- !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO])
@@ -208,9 +235,10 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
return ret;
if (tb[IPSET_ATTR_CIDR2]) {
- data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
- if (!data.cidr)
+ cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+ if (!cidr || cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
+ data.cidr = cidr - 1;
}
if (tb[IPSET_ATTR_PORT])
@@ -236,12 +264,18 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
+ if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) {
+ u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+ if (cadt_flags & IPSET_FLAG_NOMATCH)
+ flags |= (cadt_flags << 16);
+ }
+
with_ports = with_ports && tb[IPSET_ATTR_PORT_TO];
if (adt == IPSET_TEST ||
!(tb[IPSET_ATTR_CIDR] || tb[IPSET_ATTR_IP_TO] || with_ports ||
tb[IPSET_ATTR_IP2_TO])) {
data.ip = htonl(ip);
- data.ip2 = htonl(ip2_from & ip_set_hostmask(data.cidr));
+ data.ip2 = htonl(ip2_from & ip_set_hostmask(data.cidr + 1));
ret = adtfn(set, &data, timeout, flags);
return ip_set_eexist(ret, flags) ? 0 : ret;
}
@@ -275,7 +309,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
if (ip2_from + UINT_MAX == ip2_to)
return -IPSET_ERR_HASH_RANGE;
} else {
- ip_set_mask_from_to(ip2_from, ip2_to, data.cidr);
+ ip_set_mask_from_to(ip2_from, ip2_to, data.cidr + 1);
}
if (retried)
@@ -290,7 +324,8 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
while (!after(ip2, ip2_to)) {
data.ip2 = htonl(ip2);
ip2_last = ip_set_range_to_cidr(ip2, ip2_to,
- &data.cidr);
+ &cidr);
+ data.cidr = cidr - 1;
ret = adtfn(set, &data, timeout, flags);
if (ret && !ip_set_eexist(ret, flags))
@@ -321,7 +356,8 @@ struct hash_ipportnet6_elem {
union nf_inet_addr ip;
union nf_inet_addr ip2;
__be16 port;
- u8 cidr;
+ u8 cidr:7;
+ u8 nomatch:1;
u8 proto;
};
@@ -329,7 +365,8 @@ struct hash_ipportnet6_telem {
union nf_inet_addr ip;
union nf_inet_addr ip2;
__be16 port;
- u8 cidr;
+ u8 cidr:7;
+ u8 nomatch:1;
u8 proto;
unsigned long timeout;
};
@@ -360,6 +397,18 @@ hash_ipportnet6_data_copy(struct hash_ipportnet6_elem *dst,
}
static inline void
+hash_ipportnet6_data_flags(struct hash_ipportnet6_elem *dst, u32 flags)
+{
+ dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
+}
+
+static inline bool
+hash_ipportnet6_data_match(const struct hash_ipportnet6_elem *elem)
+{
+ return !elem->nomatch;
+}
+
+static inline void
hash_ipportnet6_data_zero_out(struct hash_ipportnet6_elem *elem)
{
elem->proto = 0;
@@ -378,18 +427,22 @@ static inline void
hash_ipportnet6_data_netmask(struct hash_ipportnet6_elem *elem, u8 cidr)
{
ip6_netmask(&elem->ip2, cidr);
- elem->cidr = cidr;
+ elem->cidr = cidr - 1;
}
static bool
hash_ipportnet6_data_list(struct sk_buff *skb,
const struct hash_ipportnet6_elem *data)
{
+ u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
+
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
- NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ if (flags)
+ NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
nla_put_failure:
@@ -402,14 +455,17 @@ hash_ipportnet6_data_tlist(struct sk_buff *skb,
{
const struct hash_ipportnet6_telem *e =
(const struct hash_ipportnet6_telem *)data;
+ u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP2, &data->ip2);
NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
- NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR2, data->cidr + 1);
NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(e->timeout)));
+ if (flags)
+ NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
nla_put_failure:
@@ -438,13 +494,11 @@ hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportnet6_elem data = {
- .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
+ .cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1
};
- if (data.cidr == 0)
- return -EINVAL;
if (adt == IPSET_TEST)
- data.cidr = HOST_MASK;
+ data.cidr = HOST_MASK - 1;
if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&data.port, &data.proto))
@@ -452,7 +506,7 @@ hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb,
ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &data.ip2.in6);
- ip6_netmask(&data.ip2, data.cidr);
+ ip6_netmask(&data.ip2, data.cidr + 1);
return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
}
@@ -463,16 +517,18 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
{
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
- struct hash_ipportnet6_elem data = { .cidr = HOST_MASK };
+ struct hash_ipportnet6_elem data = { .cidr = HOST_MASK - 1 };
u32 port, port_to;
u32 timeout = h->timeout;
bool with_ports = false;
+ u8 cidr;
int ret;
if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) ||
tb[IPSET_ATTR_IP_TO] ||
tb[IPSET_ATTR_CIDR]))
return -IPSET_ERR_PROTOCOL;
@@ -490,13 +546,14 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
if (ret)
return ret;
- if (tb[IPSET_ATTR_CIDR2])
- data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
-
- if (!data.cidr)
- return -IPSET_ERR_INVALID_CIDR;
+ if (tb[IPSET_ATTR_CIDR2]) {
+ cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]);
+ if (!cidr || cidr > HOST_MASK)
+ return -IPSET_ERR_INVALID_CIDR;
+ data.cidr = cidr - 1;
+ }
- ip6_netmask(&data.ip2, data.cidr);
+ ip6_netmask(&data.ip2, data.cidr + 1);
if (tb[IPSET_ATTR_PORT])
data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
@@ -521,6 +578,12 @@ hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[],
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
+ if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) {
+ u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+ if (cadt_flags & IPSET_FLAG_NOMATCH)
+ flags |= (cadt_flags << 16);
+ }
+
if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
ret = adtfn(set, &data, timeout, flags);
return ip_set_eexist(ret, flags) ? 0 : ret;
@@ -554,7 +617,7 @@ hash_ipportnet_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
u8 hbits;
- if (!(set->family == AF_INET || set->family == AF_INET6))
+ if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
@@ -573,7 +636,7 @@ hash_ipportnet_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
h = kzalloc(sizeof(*h)
+ sizeof(struct ip_set_hash_nets)
- * (set->family == AF_INET ? 32 : 128), GFP_KERNEL);
+ * (set->family == NFPROTO_IPV4 ? 32 : 128), GFP_KERNEL);
if (!h)
return -ENOMEM;
@@ -596,16 +659,16 @@ hash_ipportnet_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
if (tb[IPSET_ATTR_TIMEOUT]) {
h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
- set->variant = set->family == AF_INET
+ set->variant = set->family == NFPROTO_IPV4
? &hash_ipportnet4_tvariant
: &hash_ipportnet6_tvariant;
- if (set->family == AF_INET)
+ if (set->family == NFPROTO_IPV4)
hash_ipportnet4_gc_init(set);
else
hash_ipportnet6_gc_init(set);
} else {
- set->variant = set->family == AF_INET
+ set->variant = set->family == NFPROTO_IPV4
? &hash_ipportnet4_variant : &hash_ipportnet6_variant;
}
@@ -621,10 +684,11 @@ static struct ip_set_type hash_ipportnet_type __read_mostly = {
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2,
.dimension = IPSET_DIM_THREE,
- .family = AF_UNSPEC,
+ .family = NFPROTO_UNSPEC,
.revision_min = 0,
/* 1 SCTP and UDPLITE support added */
- .revision_max = 2, /* Range as input support for IPv4 added */
+ /* 2 Range as input support for IPv4 added */
+ .revision_max = 3, /* nomatch flag support added */
.create = hash_ipportnet_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
@@ -643,6 +707,7 @@ static struct ip_set_type hash_ipportnet_type __read_mostly = {
[IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_CIDR2] = { .type = NLA_U8 },
[IPSET_ATTR_PROTO] = { .type = NLA_U8 },
+ [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
},
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c
index 28988196775..7c3d945517c 100644
--- a/net/netfilter/ipset/ip_set_hash_net.c
+++ b/net/netfilter/ipset/ip_set_hash_net.c
@@ -43,7 +43,7 @@ hash_net_same_set(const struct ip_set *a, const struct ip_set *b);
struct hash_net4_elem {
__be32 ip;
u16 padding0;
- u8 padding1;
+ u8 nomatch;
u8 cidr;
};
@@ -51,7 +51,7 @@ struct hash_net4_elem {
struct hash_net4_telem {
__be32 ip;
u16 padding0;
- u8 padding1;
+ u8 nomatch;
u8 cidr;
unsigned long timeout;
};
@@ -61,7 +61,8 @@ hash_net4_data_equal(const struct hash_net4_elem *ip1,
const struct hash_net4_elem *ip2,
u32 *multi)
{
- return ip1->ip == ip2->ip && ip1->cidr == ip2->cidr;
+ return ip1->ip == ip2->ip &&
+ ip1->cidr == ip2->cidr;
}
static inline bool
@@ -76,6 +77,19 @@ hash_net4_data_copy(struct hash_net4_elem *dst,
{
dst->ip = src->ip;
dst->cidr = src->cidr;
+ dst->nomatch = src->nomatch;
+}
+
+static inline void
+hash_net4_data_flags(struct hash_net4_elem *dst, u32 flags)
+{
+ dst->nomatch = flags & IPSET_FLAG_NOMATCH;
+}
+
+static inline bool
+hash_net4_data_match(const struct hash_net4_elem *elem)
+{
+ return !elem->nomatch;
}
static inline void
@@ -95,8 +109,12 @@ hash_net4_data_zero_out(struct hash_net4_elem *elem)
static bool
hash_net4_data_list(struct sk_buff *skb, const struct hash_net4_elem *data)
{
+ u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
+
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+ if (flags)
+ NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
nla_put_failure:
@@ -108,11 +126,14 @@ hash_net4_data_tlist(struct sk_buff *skb, const struct hash_net4_elem *data)
{
const struct hash_net4_telem *tdata =
(const struct hash_net4_telem *)data;
+ u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR, tdata->cidr);
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(tdata->timeout)));
+ if (flags)
+ NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
@@ -167,7 +188,8 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
int ret;
if (unlikely(!tb[IPSET_ATTR_IP] ||
- !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO])
@@ -179,7 +201,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_CIDR]) {
data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
- if (!data.cidr)
+ if (!data.cidr || data.cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
@@ -189,6 +211,12 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[],
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
+ if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) {
+ u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+ if (cadt_flags & IPSET_FLAG_NOMATCH)
+ flags |= (cadt_flags << 16);
+ }
+
if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) {
data.ip = htonl(ip & ip_set_hostmask(data.cidr));
ret = adtfn(set, &data, timeout, flags);
@@ -236,14 +264,14 @@ hash_net_same_set(const struct ip_set *a, const struct ip_set *b)
struct hash_net6_elem {
union nf_inet_addr ip;
u16 padding0;
- u8 padding1;
+ u8 nomatch;
u8 cidr;
};
struct hash_net6_telem {
union nf_inet_addr ip;
u16 padding0;
- u8 padding1;
+ u8 nomatch;
u8 cidr;
unsigned long timeout;
};
@@ -269,6 +297,19 @@ hash_net6_data_copy(struct hash_net6_elem *dst,
{
dst->ip.in6 = src->ip.in6;
dst->cidr = src->cidr;
+ dst->nomatch = src->nomatch;
+}
+
+static inline void
+hash_net6_data_flags(struct hash_net6_elem *dst, u32 flags)
+{
+ dst->nomatch = flags & IPSET_FLAG_NOMATCH;
+}
+
+static inline bool
+hash_net6_data_match(const struct hash_net6_elem *elem)
+{
+ return !elem->nomatch;
}
static inline void
@@ -296,8 +337,12 @@ hash_net6_data_netmask(struct hash_net6_elem *elem, u8 cidr)
static bool
hash_net6_data_list(struct sk_buff *skb, const struct hash_net6_elem *data)
{
+ u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
+
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+ if (flags)
+ NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
nla_put_failure:
@@ -309,11 +354,14 @@ hash_net6_data_tlist(struct sk_buff *skb, const struct hash_net6_elem *data)
{
const struct hash_net6_telem *e =
(const struct hash_net6_telem *)data;
+ u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR, e->cidr);
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(e->timeout)));
+ if (flags)
+ NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
nla_put_failure:
@@ -366,7 +414,8 @@ hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
int ret;
if (unlikely(!tb[IPSET_ATTR_IP] ||
- !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
if (unlikely(tb[IPSET_ATTR_IP_TO]))
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
@@ -381,7 +430,7 @@ hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_CIDR])
data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
- if (!data.cidr)
+ if (!data.cidr || data.cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
ip6_netmask(&data.ip, data.cidr);
@@ -392,6 +441,12 @@ hash_net6_uadt(struct ip_set *set, struct nlattr *tb[],
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
+ if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) {
+ u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+ if (cadt_flags & IPSET_FLAG_NOMATCH)
+ flags |= (cadt_flags << 16);
+ }
+
ret = adtfn(set, &data, timeout, flags);
return ip_set_eexist(ret, flags) ? 0 : ret;
@@ -406,7 +461,7 @@ hash_net_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
struct ip_set_hash *h;
u8 hbits;
- if (!(set->family == AF_INET || set->family == AF_INET6))
+ if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
@@ -425,7 +480,7 @@ hash_net_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
h = kzalloc(sizeof(*h)
+ sizeof(struct ip_set_hash_nets)
- * (set->family == AF_INET ? 32 : 128), GFP_KERNEL);
+ * (set->family == NFPROTO_IPV4 ? 32 : 128), GFP_KERNEL);
if (!h)
return -ENOMEM;
@@ -448,15 +503,15 @@ hash_net_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
if (tb[IPSET_ATTR_TIMEOUT]) {
h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
- set->variant = set->family == AF_INET
+ set->variant = set->family == NFPROTO_IPV4
? &hash_net4_tvariant : &hash_net6_tvariant;
- if (set->family == AF_INET)
+ if (set->family == NFPROTO_IPV4)
hash_net4_gc_init(set);
else
hash_net6_gc_init(set);
} else {
- set->variant = set->family == AF_INET
+ set->variant = set->family == NFPROTO_IPV4
? &hash_net4_variant : &hash_net6_variant;
}
@@ -472,9 +527,10 @@ static struct ip_set_type hash_net_type __read_mostly = {
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP,
.dimension = IPSET_DIM_ONE,
- .family = AF_UNSPEC,
+ .family = NFPROTO_UNSPEC,
.revision_min = 0,
- .revision_max = 1, /* Range as input support for IPv4 added */
+ /* = 1 Range as input support for IPv4 added */
+ .revision_max = 2, /* nomatch flag support added */
.create = hash_net_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
@@ -488,6 +544,7 @@ static struct ip_set_type hash_net_type __read_mostly = {
[IPSET_ATTR_IP_TO] = { .type = NLA_NESTED },
[IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
+ [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
},
.me = THIS_MODULE,
};
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c
index e13095deb50..f24037ff432 100644
--- a/net/netfilter/ipset/ip_set_hash_netiface.c
+++ b/net/netfilter/ipset/ip_set_hash_netiface.c
@@ -163,7 +163,8 @@ struct hash_netiface4_elem_hashed {
__be32 ip;
u8 physdev;
u8 cidr;
- u16 padding;
+ u8 nomatch;
+ u8 padding;
};
#define HKEY_DATALEN sizeof(struct hash_netiface4_elem_hashed)
@@ -173,7 +174,8 @@ struct hash_netiface4_elem {
__be32 ip;
u8 physdev;
u8 cidr;
- u16 padding;
+ u8 nomatch;
+ u8 padding;
const char *iface;
};
@@ -182,7 +184,8 @@ struct hash_netiface4_telem {
__be32 ip;
u8 physdev;
u8 cidr;
- u16 padding;
+ u8 nomatch;
+ u8 padding;
const char *iface;
unsigned long timeout;
};
@@ -207,11 +210,25 @@ hash_netiface4_data_isnull(const struct hash_netiface4_elem *elem)
static inline void
hash_netiface4_data_copy(struct hash_netiface4_elem *dst,
- const struct hash_netiface4_elem *src) {
+ const struct hash_netiface4_elem *src)
+{
dst->ip = src->ip;
dst->cidr = src->cidr;
dst->physdev = src->physdev;
dst->iface = src->iface;
+ dst->nomatch = src->nomatch;
+}
+
+static inline void
+hash_netiface4_data_flags(struct hash_netiface4_elem *dst, u32 flags)
+{
+ dst->nomatch = flags & IPSET_FLAG_NOMATCH;
+}
+
+static inline bool
+hash_netiface4_data_match(const struct hash_netiface4_elem *elem)
+{
+ return !elem->nomatch;
}
static inline void
@@ -233,11 +250,13 @@ hash_netiface4_data_list(struct sk_buff *skb,
{
u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0;
+ if (data->nomatch)
+ flags |= IPSET_FLAG_NOMATCH;
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
if (flags)
- NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags);
+ NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
nla_put_failure:
@@ -252,11 +271,13 @@ hash_netiface4_data_tlist(struct sk_buff *skb,
(const struct hash_netiface4_telem *)data;
u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0;
+ if (data->nomatch)
+ flags |= IPSET_FLAG_NOMATCH;
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
if (flags)
- NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags);
+ NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(tdata->timeout)));
@@ -361,7 +382,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_CIDR]) {
data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
- if (!data.cidr)
+ if (!data.cidr || data.cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
}
@@ -387,6 +408,8 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[],
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_PHYSDEV)
data.physdev = 1;
+ if (adt == IPSET_ADD && (cadt_flags & IPSET_FLAG_NOMATCH))
+ flags |= (cadt_flags << 16);
}
if (adt == IPSET_TEST || !tb[IPSET_ATTR_IP_TO]) {
@@ -440,7 +463,8 @@ struct hash_netiface6_elem_hashed {
union nf_inet_addr ip;
u8 physdev;
u8 cidr;
- u16 padding;
+ u8 nomatch;
+ u8 padding;
};
#define HKEY_DATALEN sizeof(struct hash_netiface6_elem_hashed)
@@ -449,7 +473,8 @@ struct hash_netiface6_elem {
union nf_inet_addr ip;
u8 physdev;
u8 cidr;
- u16 padding;
+ u8 nomatch;
+ u8 padding;
const char *iface;
};
@@ -457,7 +482,8 @@ struct hash_netiface6_telem {
union nf_inet_addr ip;
u8 physdev;
u8 cidr;
- u16 padding;
+ u8 nomatch;
+ u8 padding;
const char *iface;
unsigned long timeout;
};
@@ -488,8 +514,21 @@ hash_netiface6_data_copy(struct hash_netiface6_elem *dst,
}
static inline void
+hash_netiface6_data_flags(struct hash_netiface6_elem *dst, u32 flags)
+{
+ dst->nomatch = flags & IPSET_FLAG_NOMATCH;
+}
+
+static inline bool
+hash_netiface6_data_match(const struct hash_netiface6_elem *elem)
+{
+ return !elem->nomatch;
+}
+
+static inline void
hash_netiface6_data_zero_out(struct hash_netiface6_elem *elem)
{
+ elem->cidr = 0;
}
static inline void
@@ -514,11 +553,13 @@ hash_netiface6_data_list(struct sk_buff *skb,
{
u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0;
+ if (data->nomatch)
+ flags |= IPSET_FLAG_NOMATCH;
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
if (flags)
- NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags);
+ NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
nla_put_failure:
@@ -533,11 +574,13 @@ hash_netiface6_data_tlist(struct sk_buff *skb,
(const struct hash_netiface6_telem *)data;
u32 flags = data->physdev ? IPSET_FLAG_PHYSDEV : 0;
+ if (data->nomatch)
+ flags |= IPSET_FLAG_NOMATCH;
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
NLA_PUT_STRING(skb, IPSET_ATTR_IFACE, data->iface);
if (flags)
- NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, flags);
+ NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(e->timeout)));
return 0;
@@ -636,7 +679,7 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
if (tb[IPSET_ATTR_CIDR])
data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
- if (!data.cidr)
+ if (!data.cidr || data.cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
ip6_netmask(&data.ip, data.cidr);
@@ -662,6 +705,8 @@ hash_netiface6_uadt(struct ip_set *set, struct nlattr *tb[],
u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
if (cadt_flags & IPSET_FLAG_PHYSDEV)
data.physdev = 1;
+ if (adt == IPSET_ADD && (cadt_flags & IPSET_FLAG_NOMATCH))
+ flags |= (cadt_flags << 16);
}
ret = adtfn(set, &data, timeout, flags);
@@ -678,7 +723,7 @@ hash_netiface_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
u8 hbits;
- if (!(set->family == AF_INET || set->family == AF_INET6))
+ if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
@@ -697,7 +742,7 @@ hash_netiface_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
h = kzalloc(sizeof(*h)
+ sizeof(struct ip_set_hash_nets)
- * (set->family == AF_INET ? 32 : 128), GFP_KERNEL);
+ * (set->family == NFPROTO_IPV4 ? 32 : 128), GFP_KERNEL);
if (!h)
return -ENOMEM;
@@ -722,15 +767,15 @@ hash_netiface_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
if (tb[IPSET_ATTR_TIMEOUT]) {
h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
- set->variant = set->family == AF_INET
+ set->variant = set->family == NFPROTO_IPV4
? &hash_netiface4_tvariant : &hash_netiface6_tvariant;
- if (set->family == AF_INET)
+ if (set->family == NFPROTO_IPV4)
hash_netiface4_gc_init(set);
else
hash_netiface6_gc_init(set);
} else {
- set->variant = set->family == AF_INET
+ set->variant = set->family == NFPROTO_IPV4
? &hash_netiface4_variant : &hash_netiface6_variant;
}
@@ -746,8 +791,9 @@ static struct ip_set_type hash_netiface_type __read_mostly = {
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP | IPSET_TYPE_IFACE,
.dimension = IPSET_DIM_TWO,
- .family = AF_UNSPEC,
+ .family = NFPROTO_UNSPEC,
.revision_min = 0,
+ .revision_max = 1, /* nomatch flag support added */
.create = hash_netiface_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c
index 8f9de7207ec..ce2e77100b6 100644
--- a/net/netfilter/ipset/ip_set_hash_netport.c
+++ b/net/netfilter/ipset/ip_set_hash_netport.c
@@ -40,12 +40,19 @@ hash_netport_same_set(const struct ip_set *a, const struct ip_set *b);
/* The type variant functions: IPv4 */
+/* We squeeze the "nomatch" flag into cidr: we don't support cidr == 0
+ * However this way we have to store internally cidr - 1,
+ * dancing back and forth.
+ */
+#define IP_SET_HASH_WITH_NETS_PACKED
+
/* Member elements without timeout */
struct hash_netport4_elem {
__be32 ip;
__be16 port;
u8 proto;
- u8 cidr;
+ u8 cidr:7;
+ u8 nomatch:1;
};
/* Member elements with timeout support */
@@ -53,7 +60,8 @@ struct hash_netport4_telem {
__be32 ip;
__be16 port;
u8 proto;
- u8 cidr;
+ u8 cidr:7;
+ u8 nomatch:1;
unsigned long timeout;
};
@@ -82,13 +90,26 @@ hash_netport4_data_copy(struct hash_netport4_elem *dst,
dst->port = src->port;
dst->proto = src->proto;
dst->cidr = src->cidr;
+ dst->nomatch = src->nomatch;
+}
+
+static inline void
+hash_netport4_data_flags(struct hash_netport4_elem *dst, u32 flags)
+{
+ dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
+}
+
+static inline bool
+hash_netport4_data_match(const struct hash_netport4_elem *elem)
+{
+ return !elem->nomatch;
}
static inline void
hash_netport4_data_netmask(struct hash_netport4_elem *elem, u8 cidr)
{
elem->ip &= ip_set_netmask(cidr);
- elem->cidr = cidr;
+ elem->cidr = cidr - 1;
}
static inline void
@@ -101,10 +122,14 @@ static bool
hash_netport4_data_list(struct sk_buff *skb,
const struct hash_netport4_elem *data)
{
+ u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
+
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, data->ip);
NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
- NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ if (flags)
+ NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
nla_put_failure:
@@ -117,13 +142,16 @@ hash_netport4_data_tlist(struct sk_buff *skb,
{
const struct hash_netport4_telem *tdata =
(const struct hash_netport4_telem *)data;
+ u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
NLA_PUT_IPADDR4(skb, IPSET_ATTR_IP, tdata->ip);
NLA_PUT_NET16(skb, IPSET_ATTR_PORT, tdata->port);
- NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(tdata->timeout)));
+ if (flags)
+ NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
@@ -154,20 +182,18 @@ hash_netport4_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netport4_elem data = {
- .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
+ .cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1
};
- if (data.cidr == 0)
- return -EINVAL;
if (adt == IPSET_TEST)
- data.cidr = HOST_MASK;
+ data.cidr = HOST_MASK - 1;
if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&data.port, &data.proto))
return -EINVAL;
ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip);
- data.ip &= ip_set_netmask(data.cidr);
+ data.ip &= ip_set_netmask(data.cidr + 1);
return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
}
@@ -178,16 +204,18 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
{
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
- struct hash_netport4_elem data = { .cidr = HOST_MASK };
+ struct hash_netport4_elem data = { .cidr = HOST_MASK - 1 };
u32 port, port_to, p = 0, ip = 0, ip_to, last;
u32 timeout = h->timeout;
bool with_ports = false;
+ u8 cidr;
int ret;
if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
- !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
if (tb[IPSET_ATTR_LINENO])
@@ -198,9 +226,10 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
return ret;
if (tb[IPSET_ATTR_CIDR]) {
- data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
- if (!data.cidr)
+ cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+ if (!cidr || cidr > HOST_MASK)
return -IPSET_ERR_INVALID_CIDR;
+ data.cidr = cidr - 1;
}
if (tb[IPSET_ATTR_PORT])
@@ -227,8 +256,15 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
}
with_ports = with_ports && tb[IPSET_ATTR_PORT_TO];
+
+ if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) {
+ u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+ if (cadt_flags & IPSET_FLAG_NOMATCH)
+ flags |= (cadt_flags << 16);
+ }
+
if (adt == IPSET_TEST || !(with_ports || tb[IPSET_ATTR_IP_TO])) {
- data.ip = htonl(ip & ip_set_hostmask(data.cidr));
+ data.ip = htonl(ip & ip_set_hostmask(data.cidr + 1));
ret = adtfn(set, &data, timeout, flags);
return ip_set_eexist(ret, flags) ? 0 : ret;
}
@@ -248,14 +284,15 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[],
if (ip + UINT_MAX == ip_to)
return -IPSET_ERR_HASH_RANGE;
} else {
- ip_set_mask_from_to(ip, ip_to, data.cidr);
+ ip_set_mask_from_to(ip, ip_to, data.cidr + 1);
}
if (retried)
ip = h->next.ip;
while (!after(ip, ip_to)) {
data.ip = htonl(ip);
- last = ip_set_range_to_cidr(ip, ip_to, &data.cidr);
+ last = ip_set_range_to_cidr(ip, ip_to, &cidr);
+ data.cidr = cidr - 1;
p = retried && ip == h->next.ip ? h->next.port : port;
for (; p <= port_to; p++) {
data.port = htons(p);
@@ -288,14 +325,16 @@ struct hash_netport6_elem {
union nf_inet_addr ip;
__be16 port;
u8 proto;
- u8 cidr;
+ u8 cidr:7;
+ u8 nomatch:1;
};
struct hash_netport6_telem {
union nf_inet_addr ip;
__be16 port;
u8 proto;
- u8 cidr;
+ u8 cidr:7;
+ u8 nomatch:1;
unsigned long timeout;
};
@@ -324,6 +363,18 @@ hash_netport6_data_copy(struct hash_netport6_elem *dst,
}
static inline void
+hash_netport6_data_flags(struct hash_netport6_elem *dst, u32 flags)
+{
+ dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);
+}
+
+static inline bool
+hash_netport6_data_match(const struct hash_netport6_elem *elem)
+{
+ return !elem->nomatch;
+}
+
+static inline void
hash_netport6_data_zero_out(struct hash_netport6_elem *elem)
{
elem->proto = 0;
@@ -342,17 +393,21 @@ static inline void
hash_netport6_data_netmask(struct hash_netport6_elem *elem, u8 cidr)
{
ip6_netmask(&elem->ip, cidr);
- elem->cidr = cidr;
+ elem->cidr = cidr - 1;
}
static bool
hash_netport6_data_list(struct sk_buff *skb,
const struct hash_netport6_elem *data)
{
+ u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
+
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &data->ip);
NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
- NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
+ if (flags)
+ NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
nla_put_failure:
@@ -365,13 +420,16 @@ hash_netport6_data_tlist(struct sk_buff *skb,
{
const struct hash_netport6_telem *e =
(const struct hash_netport6_telem *)data;
+ u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0;
NLA_PUT_IPADDR6(skb, IPSET_ATTR_IP, &e->ip);
NLA_PUT_NET16(skb, IPSET_ATTR_PORT, data->port);
- NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr);
+ NLA_PUT_U8(skb, IPSET_ATTR_CIDR, data->cidr + 1);
NLA_PUT_U8(skb, IPSET_ATTR_PROTO, data->proto);
NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT,
htonl(ip_set_timeout_get(e->timeout)));
+ if (flags)
+ NLA_PUT_NET32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags));
return 0;
nla_put_failure:
@@ -400,20 +458,18 @@ hash_netport6_kadt(struct ip_set *set, const struct sk_buff *skb,
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_netport6_elem data = {
- .cidr = h->nets[0].cidr ? h->nets[0].cidr : HOST_MASK
+ .cidr = h->nets[0].cidr ? h->nets[0].cidr - 1 : HOST_MASK - 1,
};
- if (data.cidr == 0)
- return -EINVAL;
if (adt == IPSET_TEST)
- data.cidr = HOST_MASK;
+ data.cidr = HOST_MASK - 1;
if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC,
&data.port, &data.proto))
return -EINVAL;
ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &data.ip.in6);
- ip6_netmask(&data.ip, data.cidr);
+ ip6_netmask(&data.ip, data.cidr + 1);
return adtfn(set, &data, opt_timeout(opt, h), opt->cmdflags);
}
@@ -424,16 +480,18 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
{
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
- struct hash_netport6_elem data = { .cidr = HOST_MASK };
+ struct hash_netport6_elem data = { .cidr = HOST_MASK - 1 };
u32 port, port_to;
u32 timeout = h->timeout;
bool with_ports = false;
+ u8 cidr;
int ret;
if (unlikely(!tb[IPSET_ATTR_IP] ||
!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
!ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
- !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
+ !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
return -IPSET_ERR_PROTOCOL;
if (unlikely(tb[IPSET_ATTR_IP_TO]))
return -IPSET_ERR_HASH_RANGE_UNSUPPORTED;
@@ -445,11 +503,13 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
if (ret)
return ret;
- if (tb[IPSET_ATTR_CIDR])
- data.cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
- if (!data.cidr)
- return -IPSET_ERR_INVALID_CIDR;
- ip6_netmask(&data.ip, data.cidr);
+ if (tb[IPSET_ATTR_CIDR]) {
+ cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);
+ if (!cidr || cidr > HOST_MASK)
+ return -IPSET_ERR_INVALID_CIDR;
+ data.cidr = cidr - 1;
+ }
+ ip6_netmask(&data.ip, data.cidr + 1);
if (tb[IPSET_ATTR_PORT])
data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
@@ -474,6 +534,12 @@ hash_netport6_uadt(struct ip_set *set, struct nlattr *tb[],
timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
}
+ if (tb[IPSET_ATTR_CADT_FLAGS] && adt == IPSET_ADD) {
+ u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
+ if (cadt_flags & IPSET_FLAG_NOMATCH)
+ flags |= (cadt_flags << 16);
+ }
+
if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
ret = adtfn(set, &data, timeout, flags);
return ip_set_eexist(ret, flags) ? 0 : ret;
@@ -507,7 +573,7 @@ hash_netport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
u32 hashsize = IPSET_DEFAULT_HASHSIZE, maxelem = IPSET_DEFAULT_MAXELEM;
u8 hbits;
- if (!(set->family == AF_INET || set->family == AF_INET6))
+ if (!(set->family == NFPROTO_IPV4 || set->family == NFPROTO_IPV6))
return -IPSET_ERR_INVALID_FAMILY;
if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_HASHSIZE) ||
@@ -526,7 +592,7 @@ hash_netport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
h = kzalloc(sizeof(*h)
+ sizeof(struct ip_set_hash_nets)
- * (set->family == AF_INET ? 32 : 128), GFP_KERNEL);
+ * (set->family == NFPROTO_IPV4 ? 32 : 128), GFP_KERNEL);
if (!h)
return -ENOMEM;
@@ -549,15 +615,15 @@ hash_netport_create(struct ip_set *set, struct nlattr *tb[], u32 flags)
if (tb[IPSET_ATTR_TIMEOUT]) {
h->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
- set->variant = set->family == AF_INET
+ set->variant = set->family == NFPROTO_IPV4
? &hash_netport4_tvariant : &hash_netport6_tvariant;
- if (set->family == AF_INET)
+ if (set->family == NFPROTO_IPV4)
hash_netport4_gc_init(set);
else
hash_netport6_gc_init(set);
} else {
- set->variant = set->family == AF_INET
+ set->variant = set->family == NFPROTO_IPV4
? &hash_netport4_variant : &hash_netport6_variant;
}
@@ -573,10 +639,11 @@ static struct ip_set_type hash_netport_type __read_mostly = {
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_IP | IPSET_TYPE_PORT,
.dimension = IPSET_DIM_TWO,
- .family = AF_UNSPEC,
+ .family = NFPROTO_UNSPEC,
.revision_min = 0,
/* 1 SCTP and UDPLITE support added */
- .revision_max = 2, /* Range as input support for IPv4 added */
+ /* 2, Range as input support for IPv4 added */
+ .revision_max = 3, /* nomatch flag support added */
.create = hash_netport_create,
.create_policy = {
[IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 },
@@ -595,6 +662,7 @@ static struct ip_set_type hash_netport_type __read_mostly = {
[IPSET_ATTR_CIDR] = { .type = NLA_U8 },
[IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
[IPSET_ATTR_LINENO] = { .type = NLA_U32 },
+ [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
},
.me = THIS_MODULE,
};
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 4d10819d462..7e095f9005f 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -575,7 +575,7 @@ static struct ip_set_type list_set_type __read_mostly = {
.protocol = IPSET_PROTOCOL,
.features = IPSET_TYPE_NAME | IPSET_DUMP_LAST,
.dimension = IPSET_DIM_ONE,
- .family = AF_UNSPEC,
+ .family = NFPROTO_UNSPEC,
.revision_min = 0,
.revision_max = 0,
.create = list_set_create,
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index fa4b82c8ae8..7b48035826e 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -44,6 +44,7 @@
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_conntrack_timestamp.h>
+#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_core.h>
@@ -767,7 +768,8 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
struct nf_conntrack_l3proto *l3proto,
struct nf_conntrack_l4proto *l4proto,
struct sk_buff *skb,
- unsigned int dataoff, u32 hash)
+ unsigned int dataoff, u32 hash,
+ unsigned int *timeouts)
{
struct nf_conn *ct;
struct nf_conn_help *help;
@@ -786,7 +788,7 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
if (IS_ERR(ct))
return (struct nf_conntrack_tuple_hash *)ct;
- if (!l4proto->new(ct, skb, dataoff)) {
+ if (!l4proto->new(ct, skb, dataoff, timeouts)) {
nf_conntrack_free(ct);
pr_debug("init conntrack: can't track with proto module\n");
return NULL;
@@ -852,7 +854,8 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
struct nf_conntrack_l3proto *l3proto,
struct nf_conntrack_l4proto *l4proto,
int *set_reply,
- enum ip_conntrack_info *ctinfo)
+ enum ip_conntrack_info *ctinfo,
+ unsigned int *timeouts)
{
struct nf_conntrack_tuple tuple;
struct nf_conntrack_tuple_hash *h;
@@ -872,7 +875,7 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
h = __nf_conntrack_find_get(net, zone, &tuple, hash);
if (!h) {
h = init_conntrack(net, tmpl, &tuple, l3proto, l4proto,
- skb, dataoff, hash);
+ skb, dataoff, hash, timeouts);
if (!h)
return NULL;
if (IS_ERR(h))
@@ -913,6 +916,8 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
enum ip_conntrack_info ctinfo;
struct nf_conntrack_l3proto *l3proto;
struct nf_conntrack_l4proto *l4proto;
+ struct nf_conn_timeout *timeout_ext;
+ unsigned int *timeouts;
unsigned int dataoff;
u_int8_t protonum;
int set_reply = 0;
@@ -959,8 +964,19 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
goto out;
}
+ /* Decide what timeout policy we want to apply to this flow. */
+ if (tmpl) {
+ timeout_ext = nf_ct_timeout_find(tmpl);
+ if (timeout_ext)
+ timeouts = NF_CT_TIMEOUT_EXT_DATA(timeout_ext);
+ else
+ timeouts = l4proto->get_timeouts(net);
+ } else
+ timeouts = l4proto->get_timeouts(net);
+
ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
- l3proto, l4proto, &set_reply, &ctinfo);
+ l3proto, l4proto, &set_reply, &ctinfo,
+ timeouts);
if (!ct) {
/* Not valid part of a connection */
NF_CT_STAT_INC_ATOMIC(net, invalid);
@@ -977,7 +993,7 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
NF_CT_ASSERT(skb->nfct);
- ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum);
+ ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum, timeouts);
if (ret <= 0) {
/* Invalid: inverse of the return code tells
* the netfilter core what to do */
@@ -1331,6 +1347,7 @@ static void nf_conntrack_cleanup_net(struct net *net)
}
nf_ct_free_hashtable(net->ct.hash, net->ct.htable_size);
+ nf_conntrack_timeout_fini(net);
nf_conntrack_ecache_fini(net);
nf_conntrack_tstamp_fini(net);
nf_conntrack_acct_fini(net);
@@ -1562,9 +1579,14 @@ static int nf_conntrack_init_net(struct net *net)
ret = nf_conntrack_ecache_init(net);
if (ret < 0)
goto err_ecache;
+ ret = nf_conntrack_timeout_init(net);
+ if (ret < 0)
+ goto err_timeout;
return 0;
+err_timeout:
+ nf_conntrack_timeout_fini(net);
err_ecache:
nf_conntrack_tstamp_fini(net);
err_tstamp:
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index 14af6329bdd..5bd3047ddee 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -32,9 +32,11 @@ static DEFINE_MUTEX(nf_ct_ecache_mutex);
void nf_ct_deliver_cached_events(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
- unsigned long events;
+ unsigned long events, missed;
struct nf_ct_event_notifier *notify;
struct nf_conntrack_ecache *e;
+ struct nf_ct_event item;
+ int ret;
rcu_read_lock();
notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
@@ -47,31 +49,32 @@ void nf_ct_deliver_cached_events(struct nf_conn *ct)
events = xchg(&e->cache, 0);
- if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct) && events) {
- struct nf_ct_event item = {
- .ct = ct,
- .pid = 0,
- .report = 0
- };
- int ret;
- /* We make a copy of the missed event cache without taking
- * the lock, thus we may send missed events twice. However,
- * this does not harm and it happens very rarely. */
- unsigned long missed = e->missed;
-
- if (!((events | missed) & e->ctmask))
- goto out_unlock;
-
- ret = notify->fcn(events | missed, &item);
- if (unlikely(ret < 0 || missed)) {
- spin_lock_bh(&ct->lock);
- if (ret < 0)
- e->missed |= events;
- else
- e->missed &= ~missed;
- spin_unlock_bh(&ct->lock);
- }
- }
+ if (!nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct) || !events)
+ goto out_unlock;
+
+ /* We make a copy of the missed event cache without taking
+ * the lock, thus we may send missed events twice. However,
+ * this does not harm and it happens very rarely. */
+ missed = e->missed;
+
+ if (!((events | missed) & e->ctmask))
+ goto out_unlock;
+
+ item.ct = ct;
+ item.pid = 0;
+ item.report = 0;
+
+ ret = notify->fcn(events | missed, &item);
+
+ if (likely(ret >= 0 && !missed))
+ goto out_unlock;
+
+ spin_lock_bh(&ct->lock);
+ if (ret < 0)
+ e->missed |= events;
+ else
+ e->missed &= ~missed;
+ spin_unlock_bh(&ct->lock);
out_unlock:
rcu_read_unlock();
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index bbe23baa19b..436b7cb79ba 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -181,6 +181,60 @@ void nf_ct_helper_destroy(struct nf_conn *ct)
}
}
+static LIST_HEAD(nf_ct_helper_expectfn_list);
+
+void nf_ct_helper_expectfn_register(struct nf_ct_helper_expectfn *n)
+{
+ spin_lock_bh(&nf_conntrack_lock);
+ list_add_rcu(&n->head, &nf_ct_helper_expectfn_list);
+ spin_unlock_bh(&nf_conntrack_lock);
+}
+EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_register);
+
+void nf_ct_helper_expectfn_unregister(struct nf_ct_helper_expectfn *n)
+{
+ spin_lock_bh(&nf_conntrack_lock);
+ list_del_rcu(&n->head);
+ spin_unlock_bh(&nf_conntrack_lock);
+}
+EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_unregister);
+
+struct nf_ct_helper_expectfn *
+nf_ct_helper_expectfn_find_by_name(const char *name)
+{
+ struct nf_ct_helper_expectfn *cur;
+ bool found = false;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) {
+ if (!strcmp(cur->name, name)) {
+ found = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return found ? cur : NULL;
+}
+EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_name);
+
+struct nf_ct_helper_expectfn *
+nf_ct_helper_expectfn_find_by_symbol(const void *symbol)
+{
+ struct nf_ct_helper_expectfn *cur;
+ bool found = false;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(cur, &nf_ct_helper_expectfn_list, head) {
+ if (cur->expectfn == symbol) {
+ found = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+ return found ? cur : NULL;
+}
+EXPORT_SYMBOL_GPL(nf_ct_helper_expectfn_find_by_symbol);
+
int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
{
unsigned int h = helper_hash(&me->tuple);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index b49da6c925b..ca7e8354e4f 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -110,15 +110,16 @@ ctnetlink_dump_tuples(struct sk_buff *skb,
struct nf_conntrack_l3proto *l3proto;
struct nf_conntrack_l4proto *l4proto;
+ rcu_read_lock();
l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
ret = ctnetlink_dump_tuples_ip(skb, tuple, l3proto);
- if (unlikely(ret < 0))
- return ret;
-
- l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum);
- ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto);
-
+ if (ret >= 0) {
+ l4proto = __nf_ct_l4proto_find(tuple->src.l3num,
+ tuple->dst.protonum);
+ ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto);
+ }
+ rcu_read_unlock();
return ret;
}
@@ -691,9 +692,18 @@ static int ctnetlink_done(struct netlink_callback *cb)
{
if (cb->args[1])
nf_ct_put((struct nf_conn *)cb->args[1]);
+ if (cb->data)
+ kfree(cb->data);
return 0;
}
+struct ctnetlink_dump_filter {
+ struct {
+ u_int32_t val;
+ u_int32_t mask;
+ } mark;
+};
+
static int
ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
{
@@ -703,6 +713,10 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
struct hlist_nulls_node *n;
struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
u_int8_t l3proto = nfmsg->nfgen_family;
+ int res;
+#ifdef CONFIG_NF_CONNTRACK_MARK
+ const struct ctnetlink_dump_filter *filter = cb->data;
+#endif
spin_lock_bh(&nf_conntrack_lock);
last = (struct nf_conn *)cb->args[1];
@@ -723,11 +737,20 @@ restart:
continue;
cb->args[1] = 0;
}
- if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
- cb->nlh->nlmsg_seq,
- NFNL_MSG_TYPE(
- cb->nlh->nlmsg_type),
- ct) < 0) {
+#ifdef CONFIG_NF_CONNTRACK_MARK
+ if (filter && !((ct->mark & filter->mark.mask) ==
+ filter->mark.val)) {
+ continue;
+ }
+#endif
+ rcu_read_lock();
+ res =
+ ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq,
+ NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
+ ct);
+ rcu_read_unlock();
+ if (res < 0) {
nf_conntrack_get(&ct->ct_general);
cb->args[1] = (unsigned long)ct;
goto out;
@@ -894,6 +917,7 @@ static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
[CTA_NAT_DST] = { .type = NLA_NESTED },
[CTA_TUPLE_MASTER] = { .type = NLA_NESTED },
[CTA_ZONE] = { .type = NLA_U16 },
+ [CTA_MARK_MASK] = { .type = NLA_U32 },
};
static int
@@ -978,9 +1002,28 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
u16 zone;
int err;
- if (nlh->nlmsg_flags & NLM_F_DUMP)
- return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table,
- ctnetlink_done, 0);
+ if (nlh->nlmsg_flags & NLM_F_DUMP) {
+ struct netlink_dump_control c = {
+ .dump = ctnetlink_dump_table,
+ .done = ctnetlink_done,
+ };
+#ifdef CONFIG_NF_CONNTRACK_MARK
+ if (cda[CTA_MARK] && cda[CTA_MARK_MASK]) {
+ struct ctnetlink_dump_filter *filter;
+
+ filter = kzalloc(sizeof(struct ctnetlink_dump_filter),
+ GFP_ATOMIC);
+ if (filter == NULL)
+ return -ENOMEM;
+
+ filter->mark.val = ntohl(nla_get_be32(cda[CTA_MARK]));
+ filter->mark.mask =
+ ntohl(nla_get_be32(cda[CTA_MARK_MASK]));
+ c.data = filter;
+ }
+#endif
+ return netlink_dump_start(ctnl, skb, nlh, &c);
+ }
err = ctnetlink_parse_zone(cda[CTA_ZONE], &zone);
if (err < 0)
@@ -1610,14 +1653,16 @@ ctnetlink_exp_dump_mask(struct sk_buff *skb,
if (!nest_parms)
goto nla_put_failure;
+ rcu_read_lock();
l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
ret = ctnetlink_dump_tuples_ip(skb, &m, l3proto);
-
- if (unlikely(ret < 0))
- goto nla_put_failure;
-
- l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum);
+ if (ret >= 0) {
+ l4proto = __nf_ct_l4proto_find(tuple->src.l3num,
+ tuple->dst.protonum);
ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto);
+ }
+ rcu_read_unlock();
+
if (unlikely(ret < 0))
goto nla_put_failure;
@@ -1636,6 +1681,11 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
struct nf_conn *master = exp->master;
long timeout = ((long)exp->timeout.expires - (long)jiffies) / HZ;
struct nf_conn_help *help;
+#ifdef CONFIG_NF_NAT_NEEDED
+ struct nlattr *nest_parms;
+ struct nf_conntrack_tuple nat_tuple = {};
+#endif
+ struct nf_ct_helper_expectfn *expfn;
if (timeout < 0)
timeout = 0;
@@ -1649,9 +1699,29 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
CTA_EXPECT_MASTER) < 0)
goto nla_put_failure;
+#ifdef CONFIG_NF_NAT_NEEDED
+ if (exp->saved_ip || exp->saved_proto.all) {
+ nest_parms = nla_nest_start(skb, CTA_EXPECT_NAT | NLA_F_NESTED);
+ if (!nest_parms)
+ goto nla_put_failure;
+
+ NLA_PUT_BE32(skb, CTA_EXPECT_NAT_DIR, htonl(exp->dir));
+
+ nat_tuple.src.l3num = nf_ct_l3num(master);
+ nat_tuple.src.u3.ip = exp->saved_ip;
+ nat_tuple.dst.protonum = nf_ct_protonum(master);
+ nat_tuple.src.u = exp->saved_proto;
+
+ if (ctnetlink_exp_dump_tuple(skb, &nat_tuple,
+ CTA_EXPECT_NAT_TUPLE) < 0)
+ goto nla_put_failure;
+ nla_nest_end(skb, nest_parms);
+ }
+#endif
NLA_PUT_BE32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout));
NLA_PUT_BE32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp));
NLA_PUT_BE32(skb, CTA_EXPECT_FLAGS, htonl(exp->flags));
+ NLA_PUT_BE32(skb, CTA_EXPECT_CLASS, htonl(exp->class));
help = nfct_help(master);
if (help) {
struct nf_conntrack_helper *helper;
@@ -1660,6 +1730,9 @@ ctnetlink_exp_dump_expect(struct sk_buff *skb,
if (helper)
NLA_PUT_STRING(skb, CTA_EXPECT_HELP_NAME, helper->name);
}
+ expfn = nf_ct_helper_expectfn_find_by_symbol(exp->expectfn);
+ if (expfn != NULL)
+ NLA_PUT_STRING(skb, CTA_EXPECT_FN, expfn->name);
return 0;
@@ -1817,6 +1890,9 @@ static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
[CTA_EXPECT_HELP_NAME] = { .type = NLA_NUL_STRING },
[CTA_EXPECT_ZONE] = { .type = NLA_U16 },
[CTA_EXPECT_FLAGS] = { .type = NLA_U32 },
+ [CTA_EXPECT_CLASS] = { .type = NLA_U32 },
+ [CTA_EXPECT_NAT] = { .type = NLA_NESTED },
+ [CTA_EXPECT_FN] = { .type = NLA_NUL_STRING },
};
static int
@@ -1834,9 +1910,11 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
int err;
if (nlh->nlmsg_flags & NLM_F_DUMP) {
- return netlink_dump_start(ctnl, skb, nlh,
- ctnetlink_exp_dump_table,
- ctnetlink_exp_done, 0);
+ struct netlink_dump_control c = {
+ .dump = ctnetlink_exp_dump_table,
+ .done = ctnetlink_exp_done,
+ };
+ return netlink_dump_start(ctnl, skb, nlh, &c);
}
err = ctnetlink_parse_zone(cda[CTA_EXPECT_ZONE], &zone);
@@ -1990,6 +2068,41 @@ ctnetlink_change_expect(struct nf_conntrack_expect *x,
return -EOPNOTSUPP;
}
+static const struct nla_policy exp_nat_nla_policy[CTA_EXPECT_NAT_MAX+1] = {
+ [CTA_EXPECT_NAT_DIR] = { .type = NLA_U32 },
+ [CTA_EXPECT_NAT_TUPLE] = { .type = NLA_NESTED },
+};
+
+static int
+ctnetlink_parse_expect_nat(const struct nlattr *attr,
+ struct nf_conntrack_expect *exp,
+ u_int8_t u3)
+{
+#ifdef CONFIG_NF_NAT_NEEDED
+ struct nlattr *tb[CTA_EXPECT_NAT_MAX+1];
+ struct nf_conntrack_tuple nat_tuple = {};
+ int err;
+
+ nla_parse_nested(tb, CTA_EXPECT_NAT_MAX, attr, exp_nat_nla_policy);
+
+ if (!tb[CTA_EXPECT_NAT_DIR] || !tb[CTA_EXPECT_NAT_TUPLE])
+ return -EINVAL;
+
+ err = ctnetlink_parse_tuple((const struct nlattr * const *)tb,
+ &nat_tuple, CTA_EXPECT_NAT_TUPLE, u3);
+ if (err < 0)
+ return err;
+
+ exp->saved_ip = nat_tuple.src.u3.ip;
+ exp->saved_proto = nat_tuple.src.u;
+ exp->dir = ntohl(nla_get_be32(tb[CTA_EXPECT_NAT_DIR]));
+
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+
static int
ctnetlink_create_expect(struct net *net, u16 zone,
const struct nlattr * const cda[],
@@ -2001,6 +2114,8 @@ ctnetlink_create_expect(struct net *net, u16 zone,
struct nf_conntrack_expect *exp;
struct nf_conn *ct;
struct nf_conn_help *help;
+ struct nf_conntrack_helper *helper = NULL;
+ u_int32_t class = 0;
int err = 0;
/* caller guarantees that those three CTA_EXPECT_* exist */
@@ -2019,6 +2134,40 @@ ctnetlink_create_expect(struct net *net, u16 zone,
if (!h)
return -ENOENT;
ct = nf_ct_tuplehash_to_ctrack(h);
+
+ /* Look for helper of this expectation */
+ if (cda[CTA_EXPECT_HELP_NAME]) {
+ const char *helpname = nla_data(cda[CTA_EXPECT_HELP_NAME]);
+
+ helper = __nf_conntrack_helper_find(helpname, nf_ct_l3num(ct),
+ nf_ct_protonum(ct));
+ if (helper == NULL) {
+#ifdef CONFIG_MODULES
+ if (request_module("nfct-helper-%s", helpname) < 0) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ helper = __nf_conntrack_helper_find(helpname,
+ nf_ct_l3num(ct),
+ nf_ct_protonum(ct));
+ if (helper) {
+ err = -EAGAIN;
+ goto out;
+ }
+#endif
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+ }
+
+ if (cda[CTA_EXPECT_CLASS] && helper) {
+ class = ntohl(nla_get_be32(cda[CTA_EXPECT_CLASS]));
+ if (class > helper->expect_class_max) {
+ err = -EINVAL;
+ goto out;
+ }
+ }
exp = nf_ct_expect_alloc(ct);
if (!exp) {
err = -ENOMEM;
@@ -2045,18 +2194,35 @@ ctnetlink_create_expect(struct net *net, u16 zone,
} else
exp->flags = 0;
}
+ if (cda[CTA_EXPECT_FN]) {
+ const char *name = nla_data(cda[CTA_EXPECT_FN]);
+ struct nf_ct_helper_expectfn *expfn;
+
+ expfn = nf_ct_helper_expectfn_find_by_name(name);
+ if (expfn == NULL) {
+ err = -EINVAL;
+ goto err_out;
+ }
+ exp->expectfn = expfn->expectfn;
+ } else
+ exp->expectfn = NULL;
- exp->class = 0;
- exp->expectfn = NULL;
+ exp->class = class;
exp->master = ct;
- exp->helper = NULL;
+ exp->helper = helper;
memcpy(&exp->tuple, &tuple, sizeof(struct nf_conntrack_tuple));
memcpy(&exp->mask.src.u3, &mask.src.u3, sizeof(exp->mask.src.u3));
exp->mask.src.u.all = mask.src.u.all;
+ if (cda[CTA_EXPECT_NAT]) {
+ err = ctnetlink_parse_expect_nat(cda[CTA_EXPECT_NAT],
+ exp, u3);
+ if (err < 0)
+ goto err_out;
+ }
err = nf_ct_expect_related_report(exp, pid, report);
+err_out:
nf_ct_expect_put(exp);
-
out:
nf_ct_put(nf_ct_tuplehash_to_ctrack(h));
return err;
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index d6dde6dc09e..24fdce256cb 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -423,7 +423,7 @@ static bool dccp_invert_tuple(struct nf_conntrack_tuple *inv,
}
static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff)
+ unsigned int dataoff, unsigned int *timeouts)
{
struct net *net = nf_ct_net(ct);
struct dccp_net *dn;
@@ -472,12 +472,17 @@ static u64 dccp_ack_seq(const struct dccp_hdr *dh)
ntohl(dhack->dccph_ack_nr_low);
}
+static unsigned int *dccp_get_timeouts(struct net *net)
+{
+ return dccp_pernet(net)->dccp_timeout;
+}
+
static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
unsigned int dataoff, enum ip_conntrack_info ctinfo,
- u_int8_t pf, unsigned int hooknum)
+ u_int8_t pf, unsigned int hooknum,
+ unsigned int *timeouts)
{
struct net *net = nf_ct_net(ct);
- struct dccp_net *dn;
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
struct dccp_hdr _dh, *dh;
u_int8_t type, old_state, new_state;
@@ -559,8 +564,7 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
if (new_state != old_state)
nf_conntrack_event_cache(IPCT_PROTOINFO, ct);
- dn = dccp_pernet(net);
- nf_ct_refresh_acct(ct, ctinfo, skb, dn->dccp_timeout[new_state]);
+ nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
return NF_ACCEPT;
}
@@ -702,8 +706,60 @@ static int dccp_nlattr_size(void)
return nla_total_size(0) /* CTA_PROTOINFO_DCCP */
+ nla_policy_len(dccp_nla_policy, CTA_PROTOINFO_DCCP_MAX + 1);
}
+
#endif
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nfnetlink_cttimeout.h>
+
+static int dccp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
+{
+ struct dccp_net *dn = dccp_pernet(&init_net);
+ unsigned int *timeouts = data;
+ int i;
+
+ /* set default DCCP timeouts. */
+ for (i=0; i<CT_DCCP_MAX; i++)
+ timeouts[i] = dn->dccp_timeout[i];
+
+ /* there's a 1:1 mapping between attributes and protocol states. */
+ for (i=CTA_TIMEOUT_DCCP_UNSPEC+1; i<CTA_TIMEOUT_DCCP_MAX+1; i++) {
+ if (tb[i]) {
+ timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ;
+ }
+ }
+ return 0;
+}
+
+static int
+dccp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
+{
+ const unsigned int *timeouts = data;
+ int i;
+
+ for (i=CTA_TIMEOUT_DCCP_UNSPEC+1; i<CTA_TIMEOUT_DCCP_MAX+1; i++)
+ NLA_PUT_BE32(skb, i, htonl(timeouts[i] / HZ));
+
+ return 0;
+
+nla_put_failure:
+ return -ENOSPC;
+}
+
+static const struct nla_policy
+dccp_timeout_nla_policy[CTA_TIMEOUT_DCCP_MAX+1] = {
+ [CTA_TIMEOUT_DCCP_REQUEST] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_DCCP_RESPOND] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_DCCP_PARTOPEN] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_DCCP_OPEN] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_DCCP_CLOSEREQ] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_DCCP_CLOSING] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_DCCP_TIMEWAIT] = { .type = NLA_U32 },
+};
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+
#ifdef CONFIG_SYSCTL
/* template, data assigned later */
static struct ctl_table dccp_sysctl_table[] = {
@@ -767,6 +823,7 @@ static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = {
.invert_tuple = dccp_invert_tuple,
.new = dccp_new,
.packet = dccp_packet,
+ .get_timeouts = dccp_get_timeouts,
.error = dccp_error,
.print_tuple = dccp_print_tuple,
.print_conntrack = dccp_print_conntrack,
@@ -779,6 +836,15 @@ static struct nf_conntrack_l4proto dccp_proto4 __read_mostly = {
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+ .ctnl_timeout = {
+ .nlattr_to_obj = dccp_timeout_nlattr_to_obj,
+ .obj_to_nlattr = dccp_timeout_obj_to_nlattr,
+ .nlattr_max = CTA_TIMEOUT_DCCP_MAX,
+ .obj_size = sizeof(unsigned int) * CT_DCCP_MAX,
+ .nla_policy = dccp_timeout_nla_policy,
+ },
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
};
static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
@@ -789,6 +855,7 @@ static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
.invert_tuple = dccp_invert_tuple,
.new = dccp_new,
.packet = dccp_packet,
+ .get_timeouts = dccp_get_timeouts,
.error = dccp_error,
.print_tuple = dccp_print_tuple,
.print_conntrack = dccp_print_conntrack,
@@ -801,6 +868,15 @@ static struct nf_conntrack_l4proto dccp_proto6 __read_mostly = {
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+ .ctnl_timeout = {
+ .nlattr_to_obj = dccp_timeout_nlattr_to_obj,
+ .obj_to_nlattr = dccp_timeout_obj_to_nlattr,
+ .nlattr_max = CTA_TIMEOUT_DCCP_MAX,
+ .obj_size = sizeof(unsigned int) * CT_DCCP_MAX,
+ .nla_policy = dccp_timeout_nla_policy,
+ },
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
};
static __net_init int dccp_net_init(struct net *net)
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
index e2091d0c7a2..835e24c58f0 100644
--- a/net/netfilter/nf_conntrack_proto_generic.c
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -40,25 +40,70 @@ static int generic_print_tuple(struct seq_file *s,
return 0;
}
+static unsigned int *generic_get_timeouts(struct net *net)
+{
+ return &nf_ct_generic_timeout;
+}
+
/* Returns verdict for packet, or -1 for invalid. */
-static int packet(struct nf_conn *ct,
- const struct sk_buff *skb,
- unsigned int dataoff,
- enum ip_conntrack_info ctinfo,
- u_int8_t pf,
- unsigned int hooknum)
+static int generic_packet(struct nf_conn *ct,
+ const struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ u_int8_t pf,
+ unsigned int hooknum,
+ unsigned int *timeout)
{
- nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_generic_timeout);
+ nf_ct_refresh_acct(ct, ctinfo, skb, *timeout);
return NF_ACCEPT;
}
/* Called when a new connection for this protocol found. */
-static bool new(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff)
+static bool generic_new(struct nf_conn *ct, const struct sk_buff *skb,
+ unsigned int dataoff, unsigned int *timeouts)
{
return true;
}
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nfnetlink_cttimeout.h>
+
+static int generic_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
+{
+ unsigned int *timeout = data;
+
+ if (tb[CTA_TIMEOUT_GENERIC_TIMEOUT])
+ *timeout =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_GENERIC_TIMEOUT])) * HZ;
+ else {
+ /* Set default generic timeout. */
+ *timeout = nf_ct_generic_timeout;
+ }
+
+ return 0;
+}
+
+static int
+generic_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
+{
+ const unsigned int *timeout = data;
+
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_GENERIC_TIMEOUT, htonl(*timeout / HZ));
+
+ return 0;
+
+nla_put_failure:
+ return -ENOSPC;
+}
+
+static const struct nla_policy
+generic_timeout_nla_policy[CTA_TIMEOUT_GENERIC_MAX+1] = {
+ [CTA_TIMEOUT_GENERIC_TIMEOUT] = { .type = NLA_U32 },
+};
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+
#ifdef CONFIG_SYSCTL
static struct ctl_table_header *generic_sysctl_header;
static struct ctl_table generic_sysctl_table[] = {
@@ -93,8 +138,18 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_generic __read_mostly =
.pkt_to_tuple = generic_pkt_to_tuple,
.invert_tuple = generic_invert_tuple,
.print_tuple = generic_print_tuple,
- .packet = packet,
- .new = new,
+ .packet = generic_packet,
+ .get_timeouts = generic_get_timeouts,
+ .new = generic_new,
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+ .ctnl_timeout = {
+ .nlattr_to_obj = generic_timeout_nlattr_to_obj,
+ .obj_to_nlattr = generic_timeout_obj_to_nlattr,
+ .nlattr_max = CTA_TIMEOUT_GENERIC_MAX,
+ .obj_size = sizeof(unsigned int),
+ .nla_policy = generic_timeout_nla_policy,
+ },
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
.ctl_table_header = &generic_sysctl_header,
.ctl_table = generic_sysctl_table,
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c
index f0338791b82..659648c4b14 100644
--- a/net/netfilter/nf_conntrack_proto_gre.c
+++ b/net/netfilter/nf_conntrack_proto_gre.c
@@ -41,8 +41,16 @@
#include <linux/netfilter/nf_conntrack_proto_gre.h>
#include <linux/netfilter/nf_conntrack_pptp.h>
-#define GRE_TIMEOUT (30 * HZ)
-#define GRE_STREAM_TIMEOUT (180 * HZ)
+enum grep_conntrack {
+ GRE_CT_UNREPLIED,
+ GRE_CT_REPLIED,
+ GRE_CT_MAX
+};
+
+static unsigned int gre_timeouts[GRE_CT_MAX] = {
+ [GRE_CT_UNREPLIED] = 30*HZ,
+ [GRE_CT_REPLIED] = 180*HZ,
+};
static int proto_gre_net_id __read_mostly;
struct netns_proto_gre {
@@ -227,13 +235,19 @@ static int gre_print_conntrack(struct seq_file *s, struct nf_conn *ct)
(ct->proto.gre.stream_timeout / HZ));
}
+static unsigned int *gre_get_timeouts(struct net *net)
+{
+ return gre_timeouts;
+}
+
/* Returns verdict for packet, and may modify conntrack */
static int gre_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
- unsigned int hooknum)
+ unsigned int hooknum,
+ unsigned int *timeouts)
{
/* If we've seen traffic both ways, this is a GRE connection.
* Extend timeout. */
@@ -252,15 +266,15 @@ static int gre_packet(struct nf_conn *ct,
/* Called when a new connection for this protocol found. */
static bool gre_new(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff)
+ unsigned int dataoff, unsigned int *timeouts)
{
pr_debug(": ");
nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
/* initialize to sane value. Ideally a conntrack helper
* (e.g. in case of pptp) is increasing them */
- ct->proto.gre.stream_timeout = GRE_STREAM_TIMEOUT;
- ct->proto.gre.timeout = GRE_TIMEOUT;
+ ct->proto.gre.stream_timeout = timeouts[GRE_CT_REPLIED];
+ ct->proto.gre.timeout = timeouts[GRE_CT_UNREPLIED];
return true;
}
@@ -278,6 +292,52 @@ static void gre_destroy(struct nf_conn *ct)
nf_ct_gre_keymap_destroy(master);
}
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nfnetlink_cttimeout.h>
+
+static int gre_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
+{
+ unsigned int *timeouts = data;
+
+ /* set default timeouts for GRE. */
+ timeouts[GRE_CT_UNREPLIED] = gre_timeouts[GRE_CT_UNREPLIED];
+ timeouts[GRE_CT_REPLIED] = gre_timeouts[GRE_CT_REPLIED];
+
+ if (tb[CTA_TIMEOUT_GRE_UNREPLIED]) {
+ timeouts[GRE_CT_UNREPLIED] =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_GRE_UNREPLIED])) * HZ;
+ }
+ if (tb[CTA_TIMEOUT_GRE_REPLIED]) {
+ timeouts[GRE_CT_REPLIED] =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_GRE_REPLIED])) * HZ;
+ }
+ return 0;
+}
+
+static int
+gre_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
+{
+ const unsigned int *timeouts = data;
+
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_GRE_UNREPLIED,
+ htonl(timeouts[GRE_CT_UNREPLIED] / HZ));
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_GRE_REPLIED,
+ htonl(timeouts[GRE_CT_REPLIED] / HZ));
+ return 0;
+
+nla_put_failure:
+ return -ENOSPC;
+}
+
+static const struct nla_policy
+gre_timeout_nla_policy[CTA_TIMEOUT_GRE_MAX+1] = {
+ [CTA_TIMEOUT_GRE_UNREPLIED] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_GRE_REPLIED] = { .type = NLA_U32 },
+};
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+
/* protocol helper struct */
static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = {
.l3proto = AF_INET,
@@ -287,6 +347,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = {
.invert_tuple = gre_invert_tuple,
.print_tuple = gre_print_tuple,
.print_conntrack = gre_print_conntrack,
+ .get_timeouts = gre_get_timeouts,
.packet = gre_packet,
.new = gre_new,
.destroy = gre_destroy,
@@ -297,6 +358,15 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_gre4 __read_mostly = {
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+ .ctnl_timeout = {
+ .nlattr_to_obj = gre_timeout_nlattr_to_obj,
+ .obj_to_nlattr = gre_timeout_obj_to_nlattr,
+ .nlattr_max = CTA_TIMEOUT_GRE_MAX,
+ .obj_size = sizeof(unsigned int) * GRE_CT_MAX,
+ .nla_policy = gre_timeout_nla_policy,
+ },
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
};
static int proto_gre_net_init(struct net *net)
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
index afa69136061..72b5088592d 100644
--- a/net/netfilter/nf_conntrack_proto_sctp.c
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -279,13 +279,19 @@ static int sctp_new_state(enum ip_conntrack_dir dir,
return sctp_conntracks[dir][i][cur_state];
}
+static unsigned int *sctp_get_timeouts(struct net *net)
+{
+ return sctp_timeouts;
+}
+
/* Returns verdict for packet, or -NF_ACCEPT for invalid. */
static int sctp_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
- unsigned int hooknum)
+ unsigned int hooknum,
+ unsigned int *timeouts)
{
enum sctp_conntrack new_state, old_state;
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
@@ -370,7 +376,7 @@ static int sctp_packet(struct nf_conn *ct,
}
spin_unlock_bh(&ct->lock);
- nf_ct_refresh_acct(ct, ctinfo, skb, sctp_timeouts[new_state]);
+ nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[new_state]);
if (old_state == SCTP_CONNTRACK_COOKIE_ECHOED &&
dir == IP_CT_DIR_REPLY &&
@@ -390,7 +396,7 @@ out:
/* Called when a new connection for this protocol found. */
static bool sctp_new(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff)
+ unsigned int dataoff, unsigned int *timeouts)
{
enum sctp_conntrack new_state;
const struct sctphdr *sh;
@@ -543,6 +549,57 @@ static int sctp_nlattr_size(void)
}
#endif
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nfnetlink_cttimeout.h>
+
+static int sctp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
+{
+ unsigned int *timeouts = data;
+ int i;
+
+ /* set default SCTP timeouts. */
+ for (i=0; i<SCTP_CONNTRACK_MAX; i++)
+ timeouts[i] = sctp_timeouts[i];
+
+ /* there's a 1:1 mapping between attributes and protocol states. */
+ for (i=CTA_TIMEOUT_SCTP_UNSPEC+1; i<CTA_TIMEOUT_SCTP_MAX+1; i++) {
+ if (tb[i]) {
+ timeouts[i] = ntohl(nla_get_be32(tb[i])) * HZ;
+ }
+ }
+ return 0;
+}
+
+static int
+sctp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
+{
+ const unsigned int *timeouts = data;
+ int i;
+
+ for (i=CTA_TIMEOUT_SCTP_UNSPEC+1; i<CTA_TIMEOUT_SCTP_MAX+1; i++)
+ NLA_PUT_BE32(skb, i, htonl(timeouts[i] / HZ));
+
+ return 0;
+
+nla_put_failure:
+ return -ENOSPC;
+}
+
+static const struct nla_policy
+sctp_timeout_nla_policy[CTA_TIMEOUT_SCTP_MAX+1] = {
+ [CTA_TIMEOUT_SCTP_CLOSED] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_SCTP_COOKIE_WAIT] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_SCTP_COOKIE_ECHOED] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_SCTP_ESTABLISHED] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_SCTP_SHUTDOWN_SENT] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_SCTP_SHUTDOWN_RECD] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_SCTP_SHUTDOWN_ACK_SENT] = { .type = NLA_U32 },
+};
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+
+
#ifdef CONFIG_SYSCTL
static unsigned int sctp_sysctl_table_users;
static struct ctl_table_header *sctp_sysctl_header;
@@ -664,6 +721,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
.print_tuple = sctp_print_tuple,
.print_conntrack = sctp_print_conntrack,
.packet = sctp_packet,
+ .get_timeouts = sctp_get_timeouts,
.new = sctp_new,
.me = THIS_MODULE,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -675,6 +733,15 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp4 __read_mostly = {
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+ .ctnl_timeout = {
+ .nlattr_to_obj = sctp_timeout_nlattr_to_obj,
+ .obj_to_nlattr = sctp_timeout_obj_to_nlattr,
+ .nlattr_max = CTA_TIMEOUT_SCTP_MAX,
+ .obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX,
+ .nla_policy = sctp_timeout_nla_policy,
+ },
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
.ctl_table_users = &sctp_sysctl_table_users,
.ctl_table_header = &sctp_sysctl_header,
@@ -694,6 +761,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
.print_tuple = sctp_print_tuple,
.print_conntrack = sctp_print_conntrack,
.packet = sctp_packet,
+ .get_timeouts = sctp_get_timeouts,
.new = sctp_new,
.me = THIS_MODULE,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -704,6 +772,15 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_sctp6 __read_mostly = {
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+ .ctnl_timeout = {
+ .nlattr_to_obj = sctp_timeout_nlattr_to_obj,
+ .obj_to_nlattr = sctp_timeout_obj_to_nlattr,
+ .nlattr_max = CTA_TIMEOUT_SCTP_MAX,
+ .obj_size = sizeof(unsigned int) * SCTP_CONNTRACK_MAX,
+ .nla_policy = sctp_timeout_nla_policy,
+ },
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#endif
#ifdef CONFIG_SYSCTL
.ctl_table_users = &sctp_sysctl_table_users,
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 97b9f3ebf28..361eade62a0 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -64,13 +64,7 @@ static const char *const tcp_conntrack_names[] = {
#define HOURS * 60 MINS
#define DAYS * 24 HOURS
-/* RFC1122 says the R2 limit should be at least 100 seconds.
- Linux uses 15 packets as limit, which corresponds
- to ~13-30min depending on RTO. */
-static unsigned int nf_ct_tcp_timeout_max_retrans __read_mostly = 5 MINS;
-static unsigned int nf_ct_tcp_timeout_unacknowledged __read_mostly = 5 MINS;
-
-static unsigned int tcp_timeouts[TCP_CONNTRACK_MAX] __read_mostly = {
+static unsigned int tcp_timeouts[TCP_CONNTRACK_TIMEOUT_MAX] __read_mostly = {
[TCP_CONNTRACK_SYN_SENT] = 2 MINS,
[TCP_CONNTRACK_SYN_RECV] = 60 SECS,
[TCP_CONNTRACK_ESTABLISHED] = 5 DAYS,
@@ -80,6 +74,11 @@ static unsigned int tcp_timeouts[TCP_CONNTRACK_MAX] __read_mostly = {
[TCP_CONNTRACK_TIME_WAIT] = 2 MINS,
[TCP_CONNTRACK_CLOSE] = 10 SECS,
[TCP_CONNTRACK_SYN_SENT2] = 2 MINS,
+/* RFC1122 says the R2 limit should be at least 100 seconds.
+ Linux uses 15 packets as limit, which corresponds
+ to ~13-30min depending on RTO. */
+ [TCP_CONNTRACK_RETRANS] = 5 MINS,
+ [TCP_CONNTRACK_UNACK] = 5 MINS,
};
#define sNO TCP_CONNTRACK_NONE
@@ -814,13 +813,19 @@ static int tcp_error(struct net *net, struct nf_conn *tmpl,
return NF_ACCEPT;
}
+static unsigned int *tcp_get_timeouts(struct net *net)
+{
+ return tcp_timeouts;
+}
+
/* Returns verdict for packet, or -1 for invalid. */
static int tcp_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
- unsigned int hooknum)
+ unsigned int hooknum,
+ unsigned int *timeouts)
{
struct net *net = nf_ct_net(ct);
struct nf_conntrack_tuple *tuple;
@@ -1015,14 +1020,14 @@ static int tcp_packet(struct nf_conn *ct,
ct->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
if (ct->proto.tcp.retrans >= nf_ct_tcp_max_retrans &&
- tcp_timeouts[new_state] > nf_ct_tcp_timeout_max_retrans)
- timeout = nf_ct_tcp_timeout_max_retrans;
+ timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
+ timeout = timeouts[TCP_CONNTRACK_RETRANS];
else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) &
IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED &&
- tcp_timeouts[new_state] > nf_ct_tcp_timeout_unacknowledged)
- timeout = nf_ct_tcp_timeout_unacknowledged;
+ timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK])
+ timeout = timeouts[TCP_CONNTRACK_UNACK];
else
- timeout = tcp_timeouts[new_state];
+ timeout = timeouts[new_state];
spin_unlock_bh(&ct->lock);
if (new_state != old_state)
@@ -1054,7 +1059,7 @@ static int tcp_packet(struct nf_conn *ct,
/* Called when a new connection for this protocol found. */
static bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff)
+ unsigned int dataoff, unsigned int *timeouts)
{
enum tcp_conntrack new_state;
const struct tcphdr *th;
@@ -1239,6 +1244,113 @@ static int tcp_nlattr_tuple_size(void)
}
#endif
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nfnetlink_cttimeout.h>
+
+static int tcp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
+{
+ unsigned int *timeouts = data;
+ int i;
+
+ /* set default TCP timeouts. */
+ for (i=0; i<TCP_CONNTRACK_TIMEOUT_MAX; i++)
+ timeouts[i] = tcp_timeouts[i];
+
+ if (tb[CTA_TIMEOUT_TCP_SYN_SENT]) {
+ timeouts[TCP_CONNTRACK_SYN_SENT] =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT]))*HZ;
+ }
+ if (tb[CTA_TIMEOUT_TCP_SYN_RECV]) {
+ timeouts[TCP_CONNTRACK_SYN_RECV] =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_RECV]))*HZ;
+ }
+ if (tb[CTA_TIMEOUT_TCP_ESTABLISHED]) {
+ timeouts[TCP_CONNTRACK_ESTABLISHED] =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_ESTABLISHED]))*HZ;
+ }
+ if (tb[CTA_TIMEOUT_TCP_FIN_WAIT]) {
+ timeouts[TCP_CONNTRACK_FIN_WAIT] =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_FIN_WAIT]))*HZ;
+ }
+ if (tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]) {
+ timeouts[TCP_CONNTRACK_CLOSE_WAIT] =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE_WAIT]))*HZ;
+ }
+ if (tb[CTA_TIMEOUT_TCP_LAST_ACK]) {
+ timeouts[TCP_CONNTRACK_LAST_ACK] =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_LAST_ACK]))*HZ;
+ }
+ if (tb[CTA_TIMEOUT_TCP_TIME_WAIT]) {
+ timeouts[TCP_CONNTRACK_TIME_WAIT] =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_TIME_WAIT]))*HZ;
+ }
+ if (tb[CTA_TIMEOUT_TCP_CLOSE]) {
+ timeouts[TCP_CONNTRACK_CLOSE] =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_CLOSE]))*HZ;
+ }
+ if (tb[CTA_TIMEOUT_TCP_SYN_SENT2]) {
+ timeouts[TCP_CONNTRACK_SYN_SENT2] =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_SYN_SENT2]))*HZ;
+ }
+ if (tb[CTA_TIMEOUT_TCP_RETRANS]) {
+ timeouts[TCP_CONNTRACK_RETRANS] =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_RETRANS]))*HZ;
+ }
+ if (tb[CTA_TIMEOUT_TCP_UNACK]) {
+ timeouts[TCP_CONNTRACK_UNACK] =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_TCP_UNACK]))*HZ;
+ }
+ return 0;
+}
+
+static int
+tcp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
+{
+ const unsigned int *timeouts = data;
+
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_SENT,
+ htonl(timeouts[TCP_CONNTRACK_SYN_SENT] / HZ));
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_RECV,
+ htonl(timeouts[TCP_CONNTRACK_SYN_RECV] / HZ));
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_ESTABLISHED,
+ htonl(timeouts[TCP_CONNTRACK_ESTABLISHED] / HZ));
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_FIN_WAIT,
+ htonl(timeouts[TCP_CONNTRACK_FIN_WAIT] / HZ));
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_CLOSE_WAIT,
+ htonl(timeouts[TCP_CONNTRACK_CLOSE_WAIT] / HZ));
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_LAST_ACK,
+ htonl(timeouts[TCP_CONNTRACK_LAST_ACK] / HZ));
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_TIME_WAIT,
+ htonl(timeouts[TCP_CONNTRACK_TIME_WAIT] / HZ));
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_CLOSE,
+ htonl(timeouts[TCP_CONNTRACK_CLOSE] / HZ));
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_SYN_SENT2,
+ htonl(timeouts[TCP_CONNTRACK_SYN_SENT2] / HZ));
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_RETRANS,
+ htonl(timeouts[TCP_CONNTRACK_RETRANS] / HZ));
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_TCP_UNACK,
+ htonl(timeouts[TCP_CONNTRACK_UNACK] / HZ));
+ return 0;
+
+nla_put_failure:
+ return -ENOSPC;
+}
+
+static const struct nla_policy tcp_timeout_nla_policy[CTA_TIMEOUT_TCP_MAX+1] = {
+ [CTA_TIMEOUT_TCP_SYN_SENT] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_TCP_SYN_RECV] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_TCP_ESTABLISHED] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_TCP_FIN_WAIT] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_TCP_CLOSE_WAIT] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_TCP_LAST_ACK] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_TCP_TIME_WAIT] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_TCP_CLOSE] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_TCP_SYN_SENT2] = { .type = NLA_U32 },
+};
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+
#ifdef CONFIG_SYSCTL
static unsigned int tcp_sysctl_table_users;
static struct ctl_table_header *tcp_sysctl_header;
@@ -1301,14 +1413,14 @@ static struct ctl_table tcp_sysctl_table[] = {
},
{
.procname = "nf_conntrack_tcp_timeout_max_retrans",
- .data = &nf_ct_tcp_timeout_max_retrans,
+ .data = &tcp_timeouts[TCP_CONNTRACK_RETRANS],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_tcp_timeout_unacknowledged",
- .data = &nf_ct_tcp_timeout_unacknowledged,
+ .data = &tcp_timeouts[TCP_CONNTRACK_UNACK],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -1404,7 +1516,7 @@ static struct ctl_table tcp_compat_sysctl_table[] = {
},
{
.procname = "ip_conntrack_tcp_timeout_max_retrans",
- .data = &nf_ct_tcp_timeout_max_retrans,
+ .data = &tcp_timeouts[TCP_CONNTRACK_RETRANS],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -1445,6 +1557,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
.print_tuple = tcp_print_tuple,
.print_conntrack = tcp_print_conntrack,
.packet = tcp_packet,
+ .get_timeouts = tcp_get_timeouts,
.new = tcp_new,
.error = tcp_error,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -1456,6 +1569,16 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4 __read_mostly =
.nlattr_tuple_size = tcp_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+ .ctnl_timeout = {
+ .nlattr_to_obj = tcp_timeout_nlattr_to_obj,
+ .obj_to_nlattr = tcp_timeout_obj_to_nlattr,
+ .nlattr_max = CTA_TIMEOUT_TCP_MAX,
+ .obj_size = sizeof(unsigned int) *
+ TCP_CONNTRACK_TIMEOUT_MAX,
+ .nla_policy = tcp_timeout_nla_policy,
+ },
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
.ctl_table_users = &tcp_sysctl_table_users,
.ctl_table_header = &tcp_sysctl_header,
@@ -1477,6 +1600,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 __read_mostly =
.print_tuple = tcp_print_tuple,
.print_conntrack = tcp_print_conntrack,
.packet = tcp_packet,
+ .get_timeouts = tcp_get_timeouts,
.new = tcp_new,
.error = tcp_error,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -1488,6 +1612,16 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6 __read_mostly =
.nlattr_tuple_size = tcp_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+ .ctnl_timeout = {
+ .nlattr_to_obj = tcp_timeout_nlattr_to_obj,
+ .obj_to_nlattr = tcp_timeout_obj_to_nlattr,
+ .nlattr_max = CTA_TIMEOUT_TCP_MAX,
+ .obj_size = sizeof(unsigned int) *
+ TCP_CONNTRACK_TIMEOUT_MAX,
+ .nla_policy = tcp_timeout_nla_policy,
+ },
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
.ctl_table_users = &tcp_sysctl_table_users,
.ctl_table_header = &tcp_sysctl_header,
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 5f35757fbff..a9073dc1548 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -25,8 +25,16 @@
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
-static unsigned int nf_ct_udp_timeout __read_mostly = 30*HZ;
-static unsigned int nf_ct_udp_timeout_stream __read_mostly = 180*HZ;
+enum udp_conntrack {
+ UDP_CT_UNREPLIED,
+ UDP_CT_REPLIED,
+ UDP_CT_MAX
+};
+
+static unsigned int udp_timeouts[UDP_CT_MAX] = {
+ [UDP_CT_UNREPLIED] = 30*HZ,
+ [UDP_CT_REPLIED] = 180*HZ,
+};
static bool udp_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
@@ -63,30 +71,38 @@ static int udp_print_tuple(struct seq_file *s,
ntohs(tuple->dst.u.udp.port));
}
+static unsigned int *udp_get_timeouts(struct net *net)
+{
+ return udp_timeouts;
+}
+
/* Returns verdict for packet, and may modify conntracktype */
static int udp_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
- unsigned int hooknum)
+ unsigned int hooknum,
+ unsigned int *timeouts)
{
/* If we've seen traffic both ways, this is some kind of UDP
stream. Extend timeout. */
if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
- nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udp_timeout_stream);
+ nf_ct_refresh_acct(ct, ctinfo, skb,
+ timeouts[UDP_CT_REPLIED]);
/* Also, more likely to be important, and not a probe */
if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
nf_conntrack_event_cache(IPCT_ASSURED, ct);
- } else
- nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udp_timeout);
-
+ } else {
+ nf_ct_refresh_acct(ct, ctinfo, skb,
+ timeouts[UDP_CT_UNREPLIED]);
+ }
return NF_ACCEPT;
}
/* Called when a new connection for this protocol found. */
static bool udp_new(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff)
+ unsigned int dataoff, unsigned int *timeouts)
{
return true;
}
@@ -136,20 +152,66 @@ static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
return NF_ACCEPT;
}
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nfnetlink_cttimeout.h>
+
+static int udp_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
+{
+ unsigned int *timeouts = data;
+
+ /* set default timeouts for UDP. */
+ timeouts[UDP_CT_UNREPLIED] = udp_timeouts[UDP_CT_UNREPLIED];
+ timeouts[UDP_CT_REPLIED] = udp_timeouts[UDP_CT_REPLIED];
+
+ if (tb[CTA_TIMEOUT_UDP_UNREPLIED]) {
+ timeouts[UDP_CT_UNREPLIED] =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDP_UNREPLIED])) * HZ;
+ }
+ if (tb[CTA_TIMEOUT_UDP_REPLIED]) {
+ timeouts[UDP_CT_REPLIED] =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDP_REPLIED])) * HZ;
+ }
+ return 0;
+}
+
+static int
+udp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
+{
+ const unsigned int *timeouts = data;
+
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_UDP_UNREPLIED,
+ htonl(timeouts[UDP_CT_UNREPLIED] / HZ));
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_UDP_REPLIED,
+ htonl(timeouts[UDP_CT_REPLIED] / HZ));
+ return 0;
+
+nla_put_failure:
+ return -ENOSPC;
+}
+
+static const struct nla_policy
+udp_timeout_nla_policy[CTA_TIMEOUT_UDP_MAX+1] = {
+ [CTA_TIMEOUT_UDP_UNREPLIED] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_UDP_REPLIED] = { .type = NLA_U32 },
+};
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+
#ifdef CONFIG_SYSCTL
static unsigned int udp_sysctl_table_users;
static struct ctl_table_header *udp_sysctl_header;
static struct ctl_table udp_sysctl_table[] = {
{
.procname = "nf_conntrack_udp_timeout",
- .data = &nf_ct_udp_timeout,
+ .data = &udp_timeouts[UDP_CT_UNREPLIED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_udp_timeout_stream",
- .data = &nf_ct_udp_timeout_stream,
+ .data = &udp_timeouts[UDP_CT_REPLIED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -160,14 +222,14 @@ static struct ctl_table udp_sysctl_table[] = {
static struct ctl_table udp_compat_sysctl_table[] = {
{
.procname = "ip_conntrack_udp_timeout",
- .data = &nf_ct_udp_timeout,
+ .data = &udp_timeouts[UDP_CT_UNREPLIED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "ip_conntrack_udp_timeout_stream",
- .data = &nf_ct_udp_timeout_stream,
+ .data = &udp_timeouts[UDP_CT_REPLIED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -186,6 +248,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly =
.invert_tuple = udp_invert_tuple,
.print_tuple = udp_print_tuple,
.packet = udp_packet,
+ .get_timeouts = udp_get_timeouts,
.new = udp_new,
.error = udp_error,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -194,6 +257,15 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly =
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+ .ctnl_timeout = {
+ .nlattr_to_obj = udp_timeout_nlattr_to_obj,
+ .obj_to_nlattr = udp_timeout_obj_to_nlattr,
+ .nlattr_max = CTA_TIMEOUT_UDP_MAX,
+ .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
+ .nla_policy = udp_timeout_nla_policy,
+ },
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
.ctl_table_users = &udp_sysctl_table_users,
.ctl_table_header = &udp_sysctl_header,
@@ -214,6 +286,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly =
.invert_tuple = udp_invert_tuple,
.print_tuple = udp_print_tuple,
.packet = udp_packet,
+ .get_timeouts = udp_get_timeouts,
.new = udp_new,
.error = udp_error,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -222,6 +295,15 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly =
.nlattr_tuple_size = nf_ct_port_nlattr_tuple_size,
.nla_policy = nf_ct_port_nla_policy,
#endif
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+ .ctnl_timeout = {
+ .nlattr_to_obj = udp_timeout_nlattr_to_obj,
+ .obj_to_nlattr = udp_timeout_obj_to_nlattr,
+ .nlattr_max = CTA_TIMEOUT_UDP_MAX,
+ .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX,
+ .nla_policy = udp_timeout_nla_policy,
+ },
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
.ctl_table_users = &udp_sysctl_table_users,
.ctl_table_header = &udp_sysctl_header,
diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c
index f52ca118101..e0606392cda 100644
--- a/net/netfilter/nf_conntrack_proto_udplite.c
+++ b/net/netfilter/nf_conntrack_proto_udplite.c
@@ -24,8 +24,16 @@
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_log.h>
-static unsigned int nf_ct_udplite_timeout __read_mostly = 30*HZ;
-static unsigned int nf_ct_udplite_timeout_stream __read_mostly = 180*HZ;
+enum udplite_conntrack {
+ UDPLITE_CT_UNREPLIED,
+ UDPLITE_CT_REPLIED,
+ UDPLITE_CT_MAX
+};
+
+static unsigned int udplite_timeouts[UDPLITE_CT_MAX] = {
+ [UDPLITE_CT_UNREPLIED] = 30*HZ,
+ [UDPLITE_CT_REPLIED] = 180*HZ,
+};
static bool udplite_pkt_to_tuple(const struct sk_buff *skb,
unsigned int dataoff,
@@ -60,31 +68,38 @@ static int udplite_print_tuple(struct seq_file *s,
ntohs(tuple->dst.u.udp.port));
}
+static unsigned int *udplite_get_timeouts(struct net *net)
+{
+ return udplite_timeouts;
+}
+
/* Returns verdict for packet, and may modify conntracktype */
static int udplite_packet(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
u_int8_t pf,
- unsigned int hooknum)
+ unsigned int hooknum,
+ unsigned int *timeouts)
{
/* If we've seen traffic both ways, this is some kind of UDP
stream. Extend timeout. */
if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
nf_ct_refresh_acct(ct, ctinfo, skb,
- nf_ct_udplite_timeout_stream);
+ timeouts[UDPLITE_CT_REPLIED]);
/* Also, more likely to be important, and not a probe */
if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
nf_conntrack_event_cache(IPCT_ASSURED, ct);
- } else
- nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_udplite_timeout);
-
+ } else {
+ nf_ct_refresh_acct(ct, ctinfo, skb,
+ timeouts[UDPLITE_CT_UNREPLIED]);
+ }
return NF_ACCEPT;
}
/* Called when a new connection for this protocol found. */
static bool udplite_new(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff)
+ unsigned int dataoff, unsigned int *timeouts)
{
return true;
}
@@ -141,20 +156,66 @@ static int udplite_error(struct net *net, struct nf_conn *tmpl,
return NF_ACCEPT;
}
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nfnetlink_cttimeout.h>
+
+static int udplite_timeout_nlattr_to_obj(struct nlattr *tb[], void *data)
+{
+ unsigned int *timeouts = data;
+
+ /* set default timeouts for UDPlite. */
+ timeouts[UDPLITE_CT_UNREPLIED] = udplite_timeouts[UDPLITE_CT_UNREPLIED];
+ timeouts[UDPLITE_CT_REPLIED] = udplite_timeouts[UDPLITE_CT_REPLIED];
+
+ if (tb[CTA_TIMEOUT_UDPLITE_UNREPLIED]) {
+ timeouts[UDPLITE_CT_UNREPLIED] =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDPLITE_UNREPLIED])) * HZ;
+ }
+ if (tb[CTA_TIMEOUT_UDPLITE_REPLIED]) {
+ timeouts[UDPLITE_CT_REPLIED] =
+ ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDPLITE_REPLIED])) * HZ;
+ }
+ return 0;
+}
+
+static int
+udplite_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data)
+{
+ const unsigned int *timeouts = data;
+
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_UDPLITE_UNREPLIED,
+ htonl(timeouts[UDPLITE_CT_UNREPLIED] / HZ));
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_UDPLITE_REPLIED,
+ htonl(timeouts[UDPLITE_CT_REPLIED] / HZ));
+ return 0;
+
+nla_put_failure:
+ return -ENOSPC;
+}
+
+static const struct nla_policy
+udplite_timeout_nla_policy[CTA_TIMEOUT_UDPLITE_MAX+1] = {
+ [CTA_TIMEOUT_UDPLITE_UNREPLIED] = { .type = NLA_U32 },
+ [CTA_TIMEOUT_UDPLITE_REPLIED] = { .type = NLA_U32 },
+};
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
+
#ifdef CONFIG_SYSCTL
static unsigned int udplite_sysctl_table_users;
static struct ctl_table_header *udplite_sysctl_header;
static struct ctl_table udplite_sysctl_table[] = {
{
.procname = "nf_conntrack_udplite_timeout",
- .data = &nf_ct_udplite_timeout,
+ .data = &udplite_timeouts[UDPLITE_CT_UNREPLIED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "nf_conntrack_udplite_timeout_stream",
- .data = &nf_ct_udplite_timeout_stream,
+ .data = &udplite_timeouts[UDPLITE_CT_REPLIED],
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
@@ -172,6 +233,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly =
.invert_tuple = udplite_invert_tuple,
.print_tuple = udplite_print_tuple,
.packet = udplite_packet,
+ .get_timeouts = udplite_get_timeouts,
.new = udplite_new,
.error = udplite_error,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -180,6 +242,16 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4 __read_mostly =
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+ .ctnl_timeout = {
+ .nlattr_to_obj = udplite_timeout_nlattr_to_obj,
+ .obj_to_nlattr = udplite_timeout_obj_to_nlattr,
+ .nlattr_max = CTA_TIMEOUT_UDPLITE_MAX,
+ .obj_size = sizeof(unsigned int) *
+ CTA_TIMEOUT_UDPLITE_MAX,
+ .nla_policy = udplite_timeout_nla_policy,
+ },
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
.ctl_table_users = &udplite_sysctl_table_users,
.ctl_table_header = &udplite_sysctl_header,
@@ -196,6 +268,7 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly =
.invert_tuple = udplite_invert_tuple,
.print_tuple = udplite_print_tuple,
.packet = udplite_packet,
+ .get_timeouts = udplite_get_timeouts,
.new = udplite_new,
.error = udplite_error,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
@@ -204,6 +277,16 @@ static struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6 __read_mostly =
.nlattr_to_tuple = nf_ct_port_nlattr_to_tuple,
.nla_policy = nf_ct_port_nla_policy,
#endif
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
+ .ctnl_timeout = {
+ .nlattr_to_obj = udplite_timeout_nlattr_to_obj,
+ .obj_to_nlattr = udplite_timeout_obj_to_nlattr,
+ .nlattr_max = CTA_TIMEOUT_UDPLITE_MAX,
+ .obj_size = sizeof(unsigned int) *
+ CTA_TIMEOUT_UDPLITE_MAX,
+ .nla_policy = udplite_timeout_nla_policy,
+ },
+#endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */
#ifdef CONFIG_SYSCTL
.ctl_table_users = &udplite_sysctl_table_users,
.ctl_table_header = &udplite_sysctl_header,
diff --git a/net/netfilter/nf_conntrack_timeout.c b/net/netfilter/nf_conntrack_timeout.c
new file mode 100644
index 00000000000..a878ce5b252
--- /dev/null
+++ b/net/netfilter/nf_conntrack_timeout.c
@@ -0,0 +1,60 @@
+/*
+ * (C) 2012 by Pablo Neira Ayuso <pablo@netfilter.org>
+ * (C) 2012 by Vyatta Inc. <http://www.vyatta.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation (or any later at your option).
+ */
+
+#include <linux/types.h>
+#include <linux/netfilter.h>
+#include <linux/skbuff.h>
+#include <linux/vmalloc.h>
+#include <linux/stddef.h>
+#include <linux/err.h>
+#include <linux/percpu.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_timeout.h>
+
+struct ctnl_timeout *
+(*nf_ct_timeout_find_get_hook)(const char *name) __read_mostly;
+EXPORT_SYMBOL_GPL(nf_ct_timeout_find_get_hook);
+
+void (*nf_ct_timeout_put_hook)(struct ctnl_timeout *timeout) __read_mostly;
+EXPORT_SYMBOL_GPL(nf_ct_timeout_put_hook);
+
+static struct nf_ct_ext_type timeout_extend __read_mostly = {
+ .len = sizeof(struct nf_conn_timeout),
+ .align = __alignof__(struct nf_conn_timeout),
+ .id = NF_CT_EXT_TIMEOUT,
+};
+
+int nf_conntrack_timeout_init(struct net *net)
+{
+ int ret = 0;
+
+ if (net_eq(net, &init_net)) {
+ ret = nf_ct_extend_register(&timeout_extend);
+ if (ret < 0) {
+ printk(KERN_ERR "nf_ct_timeout: Unable to register "
+ "timeout extension.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+void nf_conntrack_timeout_fini(struct net *net)
+{
+ if (net_eq(net, &init_net))
+ nf_ct_extend_unregister(&timeout_extend);
+}
diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c
index 11ba013e47f..3eb348bfc4f 100644
--- a/net/netfilter/nfnetlink_acct.c
+++ b/net/netfilter/nfnetlink_acct.c
@@ -171,8 +171,10 @@ nfnl_acct_get(struct sock *nfnl, struct sk_buff *skb,
char *acct_name;
if (nlh->nlmsg_flags & NLM_F_DUMP) {
- return netlink_dump_start(nfnl, skb, nlh, nfnl_acct_dump,
- NULL, 0);
+ struct netlink_dump_control c = {
+ .dump = nfnl_acct_dump,
+ };
+ return netlink_dump_start(nfnl, skb, nlh, &c);
}
if (!tb[NFACCT_NAME])
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
new file mode 100644
index 00000000000..fec29a43de4
--- /dev/null
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -0,0 +1,429 @@
+/*
+ * (C) 2012 by Pablo Neira Ayuso <pablo@netfilter.org>
+ * (C) 2012 by Vyatta Inc. <http://www.vyatta.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation (or any later at your option).
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/rculist.h>
+#include <linux/rculist_nulls.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/security.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/netlink.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+
+#include <linux/netfilter.h>
+#include <net/netlink.h>
+#include <net/sock.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_l3proto.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <net/netfilter/nf_conntrack_tuple.h>
+#include <net/netfilter/nf_conntrack_timeout.h>
+
+#include <linux/netfilter/nfnetlink.h>
+#include <linux/netfilter/nfnetlink_cttimeout.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_DESCRIPTION("cttimeout: Extended Netfilter Connection Tracking timeout tuning");
+
+static LIST_HEAD(cttimeout_list);
+
+static const struct nla_policy cttimeout_nla_policy[CTA_TIMEOUT_MAX+1] = {
+ [CTA_TIMEOUT_NAME] = { .type = NLA_NUL_STRING },
+ [CTA_TIMEOUT_L3PROTO] = { .type = NLA_U16 },
+ [CTA_TIMEOUT_L4PROTO] = { .type = NLA_U8 },
+ [CTA_TIMEOUT_DATA] = { .type = NLA_NESTED },
+};
+
+static int
+ctnl_timeout_parse_policy(struct ctnl_timeout *timeout,
+ struct nf_conntrack_l4proto *l4proto,
+ const struct nlattr *attr)
+{
+ int ret = 0;
+
+ if (likely(l4proto->ctnl_timeout.nlattr_to_obj)) {
+ struct nlattr *tb[l4proto->ctnl_timeout.nlattr_max+1];
+
+ nla_parse_nested(tb, l4proto->ctnl_timeout.nlattr_max,
+ attr, l4proto->ctnl_timeout.nla_policy);
+
+ ret = l4proto->ctnl_timeout.nlattr_to_obj(tb, &timeout->data);
+ }
+ return ret;
+}
+
+static int
+cttimeout_new_timeout(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const cda[])
+{
+ __u16 l3num;
+ __u8 l4num;
+ struct nf_conntrack_l4proto *l4proto;
+ struct ctnl_timeout *timeout, *matching = NULL;
+ char *name;
+ int ret;
+
+ if (!cda[CTA_TIMEOUT_NAME] ||
+ !cda[CTA_TIMEOUT_L3PROTO] ||
+ !cda[CTA_TIMEOUT_L4PROTO] ||
+ !cda[CTA_TIMEOUT_DATA])
+ return -EINVAL;
+
+ name = nla_data(cda[CTA_TIMEOUT_NAME]);
+ l3num = ntohs(nla_get_be16(cda[CTA_TIMEOUT_L3PROTO]));
+ l4num = nla_get_u8(cda[CTA_TIMEOUT_L4PROTO]);
+
+ list_for_each_entry(timeout, &cttimeout_list, head) {
+ if (strncmp(timeout->name, name, CTNL_TIMEOUT_NAME_MAX) != 0)
+ continue;
+
+ if (nlh->nlmsg_flags & NLM_F_EXCL)
+ return -EEXIST;
+
+ matching = timeout;
+ break;
+ }
+
+ l4proto = __nf_ct_l4proto_find(l3num, l4num);
+
+ /* This protocol is not supportted, skip. */
+ if (l4proto->l4proto != l4num)
+ return -EOPNOTSUPP;
+
+ if (matching) {
+ if (nlh->nlmsg_flags & NLM_F_REPLACE) {
+ /* You cannot replace one timeout policy by another of
+ * different kind, sorry.
+ */
+ if (matching->l3num != l3num ||
+ matching->l4num != l4num)
+ return -EINVAL;
+
+ ret = ctnl_timeout_parse_policy(matching, l4proto,
+ cda[CTA_TIMEOUT_DATA]);
+ return ret;
+ }
+ return -EBUSY;
+ }
+
+ timeout = kzalloc(sizeof(struct ctnl_timeout) +
+ l4proto->ctnl_timeout.obj_size, GFP_KERNEL);
+ if (timeout == NULL)
+ return -ENOMEM;
+
+ ret = ctnl_timeout_parse_policy(timeout, l4proto,
+ cda[CTA_TIMEOUT_DATA]);
+ if (ret < 0)
+ goto err;
+
+ strcpy(timeout->name, nla_data(cda[CTA_TIMEOUT_NAME]));
+ timeout->l3num = l3num;
+ timeout->l4num = l4num;
+ atomic_set(&timeout->refcnt, 1);
+ list_add_tail_rcu(&timeout->head, &cttimeout_list);
+
+ return 0;
+err:
+ kfree(timeout);
+ return ret;
+}
+
+static int
+ctnl_timeout_fill_info(struct sk_buff *skb, u32 pid, u32 seq, u32 type,
+ int event, struct ctnl_timeout *timeout)
+{
+ struct nlmsghdr *nlh;
+ struct nfgenmsg *nfmsg;
+ unsigned int flags = pid ? NLM_F_MULTI : 0;
+ struct nf_conntrack_l4proto *l4proto;
+
+ event |= NFNL_SUBSYS_CTNETLINK_TIMEOUT << 8;
+ nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
+ if (nlh == NULL)
+ goto nlmsg_failure;
+
+ nfmsg = nlmsg_data(nlh);
+ nfmsg->nfgen_family = AF_UNSPEC;
+ nfmsg->version = NFNETLINK_V0;
+ nfmsg->res_id = 0;
+
+ NLA_PUT_STRING(skb, CTA_TIMEOUT_NAME, timeout->name);
+ NLA_PUT_BE16(skb, CTA_TIMEOUT_L3PROTO, htons(timeout->l3num));
+ NLA_PUT_U8(skb, CTA_TIMEOUT_L4PROTO, timeout->l4num);
+ NLA_PUT_BE32(skb, CTA_TIMEOUT_USE,
+ htonl(atomic_read(&timeout->refcnt)));
+
+ l4proto = __nf_ct_l4proto_find(timeout->l3num, timeout->l4num);
+
+ /* If the timeout object does not match the layer 4 protocol tracker,
+ * then skip dumping the data part since we don't know how to
+ * interpret it. This may happen for UPDlite, SCTP and DCCP since
+ * you can unload the module.
+ */
+ if (timeout->l4num != l4proto->l4proto)
+ goto out;
+
+ if (likely(l4proto->ctnl_timeout.obj_to_nlattr)) {
+ struct nlattr *nest_parms;
+ int ret;
+
+ nest_parms = nla_nest_start(skb,
+ CTA_TIMEOUT_DATA | NLA_F_NESTED);
+ if (!nest_parms)
+ goto nla_put_failure;
+
+ ret = l4proto->ctnl_timeout.obj_to_nlattr(skb, &timeout->data);
+ if (ret < 0)
+ goto nla_put_failure;
+
+ nla_nest_end(skb, nest_parms);
+ }
+out:
+ nlmsg_end(skb, nlh);
+ return skb->len;
+
+nlmsg_failure:
+nla_put_failure:
+ nlmsg_cancel(skb, nlh);
+ return -1;
+}
+
+static int
+ctnl_timeout_dump(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct ctnl_timeout *cur, *last;
+
+ if (cb->args[2])
+ return 0;
+
+ last = (struct ctnl_timeout *)cb->args[1];
+ if (cb->args[1])
+ cb->args[1] = 0;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(cur, &cttimeout_list, head) {
+ if (last && cur != last)
+ continue;
+
+ if (ctnl_timeout_fill_info(skb, NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq,
+ NFNL_MSG_TYPE(cb->nlh->nlmsg_type),
+ IPCTNL_MSG_TIMEOUT_NEW, cur) < 0) {
+ cb->args[1] = (unsigned long)cur;
+ break;
+ }
+ }
+ if (!cb->args[1])
+ cb->args[2] = 1;
+ rcu_read_unlock();
+ return skb->len;
+}
+
+static int
+cttimeout_get_timeout(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const cda[])
+{
+ int ret = -ENOENT;
+ char *name;
+ struct ctnl_timeout *cur;
+
+ if (nlh->nlmsg_flags & NLM_F_DUMP) {
+ struct netlink_dump_control c = {
+ .dump = ctnl_timeout_dump,
+ };
+ return netlink_dump_start(ctnl, skb, nlh, &c);
+ }
+
+ if (!cda[CTA_TIMEOUT_NAME])
+ return -EINVAL;
+ name = nla_data(cda[CTA_TIMEOUT_NAME]);
+
+ list_for_each_entry(cur, &cttimeout_list, head) {
+ struct sk_buff *skb2;
+
+ if (strncmp(cur->name, name, CTNL_TIMEOUT_NAME_MAX) != 0)
+ continue;
+
+ skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (skb2 == NULL) {
+ ret = -ENOMEM;
+ break;
+ }
+
+ ret = ctnl_timeout_fill_info(skb2, NETLINK_CB(skb).pid,
+ nlh->nlmsg_seq,
+ NFNL_MSG_TYPE(nlh->nlmsg_type),
+ IPCTNL_MSG_TIMEOUT_NEW, cur);
+ if (ret <= 0) {
+ kfree_skb(skb2);
+ break;
+ }
+ ret = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid,
+ MSG_DONTWAIT);
+ if (ret > 0)
+ ret = 0;
+
+ /* this avoids a loop in nfnetlink. */
+ return ret == -EAGAIN ? -ENOBUFS : ret;
+ }
+ return ret;
+}
+
+/* try to delete object, fail if it is still in use. */
+static int ctnl_timeout_try_del(struct ctnl_timeout *timeout)
+{
+ int ret = 0;
+
+ /* we want to avoid races with nf_ct_timeout_find_get. */
+ if (atomic_dec_and_test(&timeout->refcnt)) {
+ /* We are protected by nfnl mutex. */
+ list_del_rcu(&timeout->head);
+ kfree_rcu(timeout, rcu_head);
+ } else {
+ /* still in use, restore reference counter. */
+ atomic_inc(&timeout->refcnt);
+ ret = -EBUSY;
+ }
+ return ret;
+}
+
+static int
+cttimeout_del_timeout(struct sock *ctnl, struct sk_buff *skb,
+ const struct nlmsghdr *nlh,
+ const struct nlattr * const cda[])
+{
+ char *name;
+ struct ctnl_timeout *cur;
+ int ret = -ENOENT;
+
+ if (!cda[CTA_TIMEOUT_NAME]) {
+ list_for_each_entry(cur, &cttimeout_list, head)
+ ctnl_timeout_try_del(cur);
+
+ return 0;
+ }
+ name = nla_data(cda[CTA_TIMEOUT_NAME]);
+
+ list_for_each_entry(cur, &cttimeout_list, head) {
+ if (strncmp(cur->name, name, CTNL_TIMEOUT_NAME_MAX) != 0)
+ continue;
+
+ ret = ctnl_timeout_try_del(cur);
+ if (ret < 0)
+ return ret;
+
+ break;
+ }
+ return ret;
+}
+
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+static struct ctnl_timeout *ctnl_timeout_find_get(const char *name)
+{
+ struct ctnl_timeout *timeout, *matching = NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(timeout, &cttimeout_list, head) {
+ if (strncmp(timeout->name, name, CTNL_TIMEOUT_NAME_MAX) != 0)
+ continue;
+
+ if (!try_module_get(THIS_MODULE))
+ goto err;
+
+ if (!atomic_inc_not_zero(&timeout->refcnt)) {
+ module_put(THIS_MODULE);
+ goto err;
+ }
+ matching = timeout;
+ break;
+ }
+err:
+ rcu_read_unlock();
+ return matching;
+}
+
+static void ctnl_timeout_put(struct ctnl_timeout *timeout)
+{
+ atomic_dec(&timeout->refcnt);
+ module_put(THIS_MODULE);
+}
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
+
+static const struct nfnl_callback cttimeout_cb[IPCTNL_MSG_TIMEOUT_MAX] = {
+ [IPCTNL_MSG_TIMEOUT_NEW] = { .call = cttimeout_new_timeout,
+ .attr_count = CTA_TIMEOUT_MAX,
+ .policy = cttimeout_nla_policy },
+ [IPCTNL_MSG_TIMEOUT_GET] = { .call = cttimeout_get_timeout,
+ .attr_count = CTA_TIMEOUT_MAX,
+ .policy = cttimeout_nla_policy },
+ [IPCTNL_MSG_TIMEOUT_DELETE] = { .call = cttimeout_del_timeout,
+ .attr_count = CTA_TIMEOUT_MAX,
+ .policy = cttimeout_nla_policy },
+};
+
+static const struct nfnetlink_subsystem cttimeout_subsys = {
+ .name = "conntrack_timeout",
+ .subsys_id = NFNL_SUBSYS_CTNETLINK_TIMEOUT,
+ .cb_count = IPCTNL_MSG_TIMEOUT_MAX,
+ .cb = cttimeout_cb,
+};
+
+MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_TIMEOUT);
+
+static int __init cttimeout_init(void)
+{
+ int ret;
+
+ ret = nfnetlink_subsys_register(&cttimeout_subsys);
+ if (ret < 0) {
+ pr_err("cttimeout_init: cannot register cttimeout with "
+ "nfnetlink.\n");
+ goto err_out;
+ }
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+ RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, ctnl_timeout_find_get);
+ RCU_INIT_POINTER(nf_ct_timeout_put_hook, ctnl_timeout_put);
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
+ return 0;
+
+err_out:
+ return ret;
+}
+
+static void __exit cttimeout_exit(void)
+{
+ struct ctnl_timeout *cur, *tmp;
+
+ pr_info("cttimeout: unregistering from nfnetlink.\n");
+
+ nfnetlink_subsys_unregister(&cttimeout_subsys);
+ list_for_each_entry_safe(cur, tmp, &cttimeout_list, head) {
+ list_del_rcu(&cur->head);
+ /* We are sure that our objects have no clients at this point,
+ * it's safe to release them all without checking refcnt.
+ */
+ kfree_rcu(cur, rcu_head);
+ }
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+ RCU_INIT_POINTER(nf_ct_timeout_find_get_hook, NULL);
+ RCU_INIT_POINTER(nf_ct_timeout_put_hook, NULL);
+#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
+}
+
+module_init(cttimeout_init);
+module_exit(cttimeout_exit);
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 0221d10de75..b873445df44 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -16,10 +16,11 @@
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_ecache.h>
+#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_conntrack_zones.h>
-static unsigned int xt_ct_target(struct sk_buff *skb,
- const struct xt_action_param *par)
+static unsigned int xt_ct_target_v0(struct sk_buff *skb,
+ const struct xt_action_param *par)
{
const struct xt_ct_target_info *info = par->targinfo;
struct nf_conn *ct = info->ct;
@@ -35,6 +36,23 @@ static unsigned int xt_ct_target(struct sk_buff *skb,
return XT_CONTINUE;
}
+static unsigned int xt_ct_target_v1(struct sk_buff *skb,
+ const struct xt_action_param *par)
+{
+ const struct xt_ct_target_info_v1 *info = par->targinfo;
+ struct nf_conn *ct = info->ct;
+
+ /* Previously seen (loopback)? Ignore. */
+ if (skb->nfct != NULL)
+ return XT_CONTINUE;
+
+ atomic_inc(&ct->ct_general.use);
+ skb->nfct = &ct->ct_general;
+ skb->nfctinfo = IP_CT_NEW;
+
+ return XT_CONTINUE;
+}
+
static u8 xt_ct_find_proto(const struct xt_tgchk_param *par)
{
if (par->family == NFPROTO_IPV4) {
@@ -53,7 +71,7 @@ static u8 xt_ct_find_proto(const struct xt_tgchk_param *par)
return 0;
}
-static int xt_ct_tg_check(const struct xt_tgchk_param *par)
+static int xt_ct_tg_check_v0(const struct xt_tgchk_param *par)
{
struct xt_ct_target_info *info = par->targinfo;
struct nf_conntrack_tuple t;
@@ -130,7 +148,137 @@ err1:
return ret;
}
-static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par)
+static int xt_ct_tg_check_v1(const struct xt_tgchk_param *par)
+{
+ struct xt_ct_target_info_v1 *info = par->targinfo;
+ struct nf_conntrack_tuple t;
+ struct nf_conn_help *help;
+ struct nf_conn *ct;
+ int ret = 0;
+ u8 proto;
+
+ if (info->flags & ~XT_CT_NOTRACK)
+ return -EINVAL;
+
+ if (info->flags & XT_CT_NOTRACK) {
+ ct = nf_ct_untracked_get();
+ atomic_inc(&ct->ct_general.use);
+ goto out;
+ }
+
+#ifndef CONFIG_NF_CONNTRACK_ZONES
+ if (info->zone)
+ goto err1;
+#endif
+
+ ret = nf_ct_l3proto_try_module_get(par->family);
+ if (ret < 0)
+ goto err1;
+
+ memset(&t, 0, sizeof(t));
+ ct = nf_conntrack_alloc(par->net, info->zone, &t, &t, GFP_KERNEL);
+ ret = PTR_ERR(ct);
+ if (IS_ERR(ct))
+ goto err2;
+
+ ret = 0;
+ if ((info->ct_events || info->exp_events) &&
+ !nf_ct_ecache_ext_add(ct, info->ct_events, info->exp_events,
+ GFP_KERNEL))
+ goto err3;
+
+ if (info->helper[0]) {
+ ret = -ENOENT;
+ proto = xt_ct_find_proto(par);
+ if (!proto) {
+ pr_info("You must specify a L4 protocol, "
+ "and not use inversions on it.\n");
+ goto err3;
+ }
+
+ ret = -ENOMEM;
+ help = nf_ct_helper_ext_add(ct, GFP_KERNEL);
+ if (help == NULL)
+ goto err3;
+
+ ret = -ENOENT;
+ help->helper = nf_conntrack_helper_try_module_get(info->helper,
+ par->family,
+ proto);
+ if (help->helper == NULL) {
+ pr_info("No such helper \"%s\"\n", info->helper);
+ goto err3;
+ }
+ }
+
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+ if (info->timeout) {
+ typeof(nf_ct_timeout_find_get_hook) timeout_find_get;
+ struct ctnl_timeout *timeout;
+ struct nf_conn_timeout *timeout_ext;
+
+ timeout_find_get =
+ rcu_dereference(nf_ct_timeout_find_get_hook);
+
+ if (timeout_find_get) {
+ const struct ipt_entry *e = par->entryinfo;
+
+ if (e->ip.invflags & IPT_INV_PROTO) {
+ ret = -EINVAL;
+ pr_info("You cannot use inversion on "
+ "L4 protocol\n");
+ goto err3;
+ }
+ timeout = timeout_find_get(info->timeout);
+ if (timeout == NULL) {
+ ret = -ENOENT;
+ pr_info("No such timeout policy \"%s\"\n",
+ info->timeout);
+ goto err3;
+ }
+ if (timeout->l3num != par->family) {
+ ret = -EINVAL;
+ pr_info("Timeout policy `%s' can only be "
+ "used by L3 protocol number %d\n",
+ info->timeout, timeout->l3num);
+ goto err3;
+ }
+ if (timeout->l4num != e->ip.proto) {
+ ret = -EINVAL;
+ pr_info("Timeout policy `%s' can only be "
+ "used by L4 protocol number %d\n",
+ info->timeout, timeout->l4num);
+ goto err3;
+ }
+ timeout_ext = nf_ct_timeout_ext_add(ct, timeout,
+ GFP_KERNEL);
+ if (timeout_ext == NULL) {
+ ret = -ENOMEM;
+ goto err3;
+ }
+ } else {
+ ret = -ENOENT;
+ pr_info("Timeout policy base is empty\n");
+ goto err3;
+ }
+ }
+#endif
+
+ __set_bit(IPS_TEMPLATE_BIT, &ct->status);
+ __set_bit(IPS_CONFIRMED_BIT, &ct->status);
+out:
+ info->ct = ct;
+ return 0;
+
+err3:
+ nf_conntrack_free(ct);
+err2:
+ nf_ct_l3proto_module_put(par->family);
+err1:
+ return ret;
+}
+
+static void xt_ct_tg_destroy_v0(const struct xt_tgdtor_param *par)
{
struct xt_ct_target_info *info = par->targinfo;
struct nf_conn *ct = info->ct;
@@ -146,25 +294,67 @@ static void xt_ct_tg_destroy(const struct xt_tgdtor_param *par)
nf_ct_put(info->ct);
}
-static struct xt_target xt_ct_tg __read_mostly = {
- .name = "CT",
- .family = NFPROTO_UNSPEC,
- .targetsize = sizeof(struct xt_ct_target_info),
- .checkentry = xt_ct_tg_check,
- .destroy = xt_ct_tg_destroy,
- .target = xt_ct_target,
- .table = "raw",
- .me = THIS_MODULE,
+static void xt_ct_tg_destroy_v1(const struct xt_tgdtor_param *par)
+{
+ struct xt_ct_target_info_v1 *info = par->targinfo;
+ struct nf_conn *ct = info->ct;
+ struct nf_conn_help *help;
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+ struct nf_conn_timeout *timeout_ext;
+ typeof(nf_ct_timeout_put_hook) timeout_put;
+#endif
+ if (!nf_ct_is_untracked(ct)) {
+ help = nfct_help(ct);
+ if (help)
+ module_put(help->helper->me);
+
+ nf_ct_l3proto_module_put(par->family);
+
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
+ timeout_put = rcu_dereference(nf_ct_timeout_put_hook);
+
+ if (timeout_put) {
+ timeout_ext = nf_ct_timeout_find(ct);
+ if (timeout_ext)
+ timeout_put(timeout_ext->timeout);
+ }
+#endif
+ }
+ nf_ct_put(info->ct);
+}
+
+static struct xt_target xt_ct_tg_reg[] __read_mostly = {
+ {
+ .name = "CT",
+ .family = NFPROTO_UNSPEC,
+ .targetsize = sizeof(struct xt_ct_target_info),
+ .checkentry = xt_ct_tg_check_v0,
+ .destroy = xt_ct_tg_destroy_v0,
+ .target = xt_ct_target_v0,
+ .table = "raw",
+ .me = THIS_MODULE,
+ },
+ {
+ .name = "CT",
+ .family = NFPROTO_UNSPEC,
+ .revision = 1,
+ .targetsize = sizeof(struct xt_ct_target_info_v1),
+ .checkentry = xt_ct_tg_check_v1,
+ .destroy = xt_ct_tg_destroy_v1,
+ .target = xt_ct_target_v1,
+ .table = "raw",
+ .me = THIS_MODULE,
+ },
};
static int __init xt_ct_tg_init(void)
{
- return xt_register_target(&xt_ct_tg);
+ return xt_register_targets(xt_ct_tg_reg, ARRAY_SIZE(xt_ct_tg_reg));
}
static void __exit xt_ct_tg_exit(void)
{
- xt_unregister_target(&xt_ct_tg);
+ xt_unregister_targets(xt_ct_tg_reg, ARRAY_SIZE(xt_ct_tg_reg));
}
module_init(xt_ct_tg_init);
diff --git a/net/netfilter/xt_LOG.c b/net/netfilter/xt_LOG.c
new file mode 100644
index 00000000000..f99f8dee238
--- /dev/null
+++ b/net/netfilter/xt_LOG.c
@@ -0,0 +1,925 @@
+/*
+ * This is a module which is used for logging packets.
+ */
+
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ip.h>
+#include <net/ipv6.h>
+#include <net/icmp.h>
+#include <net/udp.h>
+#include <net/tcp.h>
+#include <net/route.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_LOG.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <net/netfilter/nf_log.h>
+#include <net/netfilter/xt_log.h>
+
+static struct nf_loginfo default_loginfo = {
+ .type = NF_LOG_TYPE_LOG,
+ .u = {
+ .log = {
+ .level = 5,
+ .logflags = NF_LOG_MASK,
+ },
+ },
+};
+
+static int dump_udp_header(struct sbuff *m, const struct sk_buff *skb,
+ u8 proto, int fragment, unsigned int offset)
+{
+ struct udphdr _udph;
+ const struct udphdr *uh;
+
+ if (proto == IPPROTO_UDP)
+ /* Max length: 10 "PROTO=UDP " */
+ sb_add(m, "PROTO=UDP ");
+ else /* Max length: 14 "PROTO=UDPLITE " */
+ sb_add(m, "PROTO=UDPLITE ");
+
+ if (fragment)
+ goto out;
+
+ /* Max length: 25 "INCOMPLETE [65535 bytes] " */
+ uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
+ if (uh == NULL) {
+ sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - offset);
+
+ return 1;
+ }
+
+ /* Max length: 20 "SPT=65535 DPT=65535 " */
+ sb_add(m, "SPT=%u DPT=%u LEN=%u ", ntohs(uh->source), ntohs(uh->dest),
+ ntohs(uh->len));
+
+out:
+ return 0;
+}
+
+static int dump_tcp_header(struct sbuff *m, const struct sk_buff *skb,
+ u8 proto, int fragment, unsigned int offset,
+ unsigned int logflags)
+{
+ struct tcphdr _tcph;
+ const struct tcphdr *th;
+
+ /* Max length: 10 "PROTO=TCP " */
+ sb_add(m, "PROTO=TCP ");
+
+ if (fragment)
+ return 0;
+
+ /* Max length: 25 "INCOMPLETE [65535 bytes] " */
+ th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
+ if (th == NULL) {
+ sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - offset);
+ return 1;
+ }
+
+ /* Max length: 20 "SPT=65535 DPT=65535 " */
+ sb_add(m, "SPT=%u DPT=%u ", ntohs(th->source), ntohs(th->dest));
+ /* Max length: 30 "SEQ=4294967295 ACK=4294967295 " */
+ if (logflags & XT_LOG_TCPSEQ)
+ sb_add(m, "SEQ=%u ACK=%u ", ntohl(th->seq), ntohl(th->ack_seq));
+
+ /* Max length: 13 "WINDOW=65535 " */
+ sb_add(m, "WINDOW=%u ", ntohs(th->window));
+ /* Max length: 9 "RES=0x3C " */
+ sb_add(m, "RES=0x%02x ", (u_int8_t)(ntohl(tcp_flag_word(th) &
+ TCP_RESERVED_BITS) >> 22));
+ /* Max length: 32 "CWR ECE URG ACK PSH RST SYN FIN " */
+ if (th->cwr)
+ sb_add(m, "CWR ");
+ if (th->ece)
+ sb_add(m, "ECE ");
+ if (th->urg)
+ sb_add(m, "URG ");
+ if (th->ack)
+ sb_add(m, "ACK ");
+ if (th->psh)
+ sb_add(m, "PSH ");
+ if (th->rst)
+ sb_add(m, "RST ");
+ if (th->syn)
+ sb_add(m, "SYN ");
+ if (th->fin)
+ sb_add(m, "FIN ");
+ /* Max length: 11 "URGP=65535 " */
+ sb_add(m, "URGP=%u ", ntohs(th->urg_ptr));
+
+ if ((logflags & XT_LOG_TCPOPT) && th->doff*4 > sizeof(struct tcphdr)) {
+ u_int8_t _opt[60 - sizeof(struct tcphdr)];
+ const u_int8_t *op;
+ unsigned int i;
+ unsigned int optsize = th->doff*4 - sizeof(struct tcphdr);
+
+ op = skb_header_pointer(skb, offset + sizeof(struct tcphdr),
+ optsize, _opt);
+ if (op == NULL) {
+ sb_add(m, "OPT (TRUNCATED)");
+ return 1;
+ }
+
+ /* Max length: 127 "OPT (" 15*4*2chars ") " */
+ sb_add(m, "OPT (");
+ for (i = 0; i < optsize; i++)
+ sb_add(m, "%02X", op[i]);
+
+ sb_add(m, ") ");
+ }
+
+ return 0;
+}
+
+/* One level of recursion won't kill us */
+static void dump_ipv4_packet(struct sbuff *m,
+ const struct nf_loginfo *info,
+ const struct sk_buff *skb,
+ unsigned int iphoff)
+{
+ struct iphdr _iph;
+ const struct iphdr *ih;
+ unsigned int logflags;
+
+ if (info->type == NF_LOG_TYPE_LOG)
+ logflags = info->u.log.logflags;
+ else
+ logflags = NF_LOG_MASK;
+
+ ih = skb_header_pointer(skb, iphoff, sizeof(_iph), &_iph);
+ if (ih == NULL) {
+ sb_add(m, "TRUNCATED");
+ return;
+ }
+
+ /* Important fields:
+ * TOS, len, DF/MF, fragment offset, TTL, src, dst, options. */
+ /* Max length: 40 "SRC=255.255.255.255 DST=255.255.255.255 " */
+ sb_add(m, "SRC=%pI4 DST=%pI4 ",
+ &ih->saddr, &ih->daddr);
+
+ /* Max length: 46 "LEN=65535 TOS=0xFF PREC=0xFF TTL=255 ID=65535 " */
+ sb_add(m, "LEN=%u TOS=0x%02X PREC=0x%02X TTL=%u ID=%u ",
+ ntohs(ih->tot_len), ih->tos & IPTOS_TOS_MASK,
+ ih->tos & IPTOS_PREC_MASK, ih->ttl, ntohs(ih->id));
+
+ /* Max length: 6 "CE DF MF " */
+ if (ntohs(ih->frag_off) & IP_CE)
+ sb_add(m, "CE ");
+ if (ntohs(ih->frag_off) & IP_DF)
+ sb_add(m, "DF ");
+ if (ntohs(ih->frag_off) & IP_MF)
+ sb_add(m, "MF ");
+
+ /* Max length: 11 "FRAG:65535 " */
+ if (ntohs(ih->frag_off) & IP_OFFSET)
+ sb_add(m, "FRAG:%u ", ntohs(ih->frag_off) & IP_OFFSET);
+
+ if ((logflags & XT_LOG_IPOPT) &&
+ ih->ihl * 4 > sizeof(struct iphdr)) {
+ const unsigned char *op;
+ unsigned char _opt[4 * 15 - sizeof(struct iphdr)];
+ unsigned int i, optsize;
+
+ optsize = ih->ihl * 4 - sizeof(struct iphdr);
+ op = skb_header_pointer(skb, iphoff+sizeof(_iph),
+ optsize, _opt);
+ if (op == NULL) {
+ sb_add(m, "TRUNCATED");
+ return;
+ }
+
+ /* Max length: 127 "OPT (" 15*4*2chars ") " */
+ sb_add(m, "OPT (");
+ for (i = 0; i < optsize; i++)
+ sb_add(m, "%02X", op[i]);
+ sb_add(m, ") ");
+ }
+
+ switch (ih->protocol) {
+ case IPPROTO_TCP:
+ if (dump_tcp_header(m, skb, ih->protocol,
+ ntohs(ih->frag_off) & IP_OFFSET,
+ iphoff+ih->ihl*4, logflags))
+ return;
+ break;
+ case IPPROTO_UDP:
+ case IPPROTO_UDPLITE:
+ if (dump_udp_header(m, skb, ih->protocol,
+ ntohs(ih->frag_off) & IP_OFFSET,
+ iphoff+ih->ihl*4))
+ return;
+ break;
+ case IPPROTO_ICMP: {
+ struct icmphdr _icmph;
+ const struct icmphdr *ich;
+ static const size_t required_len[NR_ICMP_TYPES+1]
+ = { [ICMP_ECHOREPLY] = 4,
+ [ICMP_DEST_UNREACH]
+ = 8 + sizeof(struct iphdr),
+ [ICMP_SOURCE_QUENCH]
+ = 8 + sizeof(struct iphdr),
+ [ICMP_REDIRECT]
+ = 8 + sizeof(struct iphdr),
+ [ICMP_ECHO] = 4,
+ [ICMP_TIME_EXCEEDED]
+ = 8 + sizeof(struct iphdr),
+ [ICMP_PARAMETERPROB]
+ = 8 + sizeof(struct iphdr),
+ [ICMP_TIMESTAMP] = 20,
+ [ICMP_TIMESTAMPREPLY] = 20,
+ [ICMP_ADDRESS] = 12,
+ [ICMP_ADDRESSREPLY] = 12 };
+
+ /* Max length: 11 "PROTO=ICMP " */
+ sb_add(m, "PROTO=ICMP ");
+
+ if (ntohs(ih->frag_off) & IP_OFFSET)
+ break;
+
+ /* Max length: 25 "INCOMPLETE [65535 bytes] " */
+ ich = skb_header_pointer(skb, iphoff + ih->ihl * 4,
+ sizeof(_icmph), &_icmph);
+ if (ich == NULL) {
+ sb_add(m, "INCOMPLETE [%u bytes] ",
+ skb->len - iphoff - ih->ihl*4);
+ break;
+ }
+
+ /* Max length: 18 "TYPE=255 CODE=255 " */
+ sb_add(m, "TYPE=%u CODE=%u ", ich->type, ich->code);
+
+ /* Max length: 25 "INCOMPLETE [65535 bytes] " */
+ if (ich->type <= NR_ICMP_TYPES &&
+ required_len[ich->type] &&
+ skb->len-iphoff-ih->ihl*4 < required_len[ich->type]) {
+ sb_add(m, "INCOMPLETE [%u bytes] ",
+ skb->len - iphoff - ih->ihl*4);
+ break;
+ }
+
+ switch (ich->type) {
+ case ICMP_ECHOREPLY:
+ case ICMP_ECHO:
+ /* Max length: 19 "ID=65535 SEQ=65535 " */
+ sb_add(m, "ID=%u SEQ=%u ",
+ ntohs(ich->un.echo.id),
+ ntohs(ich->un.echo.sequence));
+ break;
+
+ case ICMP_PARAMETERPROB:
+ /* Max length: 14 "PARAMETER=255 " */
+ sb_add(m, "PARAMETER=%u ",
+ ntohl(ich->un.gateway) >> 24);
+ break;
+ case ICMP_REDIRECT:
+ /* Max length: 24 "GATEWAY=255.255.255.255 " */
+ sb_add(m, "GATEWAY=%pI4 ", &ich->un.gateway);
+ /* Fall through */
+ case ICMP_DEST_UNREACH:
+ case ICMP_SOURCE_QUENCH:
+ case ICMP_TIME_EXCEEDED:
+ /* Max length: 3+maxlen */
+ if (!iphoff) { /* Only recurse once. */
+ sb_add(m, "[");
+ dump_ipv4_packet(m, info, skb,
+ iphoff + ih->ihl*4+sizeof(_icmph));
+ sb_add(m, "] ");
+ }
+
+ /* Max length: 10 "MTU=65535 " */
+ if (ich->type == ICMP_DEST_UNREACH &&
+ ich->code == ICMP_FRAG_NEEDED)
+ sb_add(m, "MTU=%u ", ntohs(ich->un.frag.mtu));
+ }
+ break;
+ }
+ /* Max Length */
+ case IPPROTO_AH: {
+ struct ip_auth_hdr _ahdr;
+ const struct ip_auth_hdr *ah;
+
+ if (ntohs(ih->frag_off) & IP_OFFSET)
+ break;
+
+ /* Max length: 9 "PROTO=AH " */
+ sb_add(m, "PROTO=AH ");
+
+ /* Max length: 25 "INCOMPLETE [65535 bytes] " */
+ ah = skb_header_pointer(skb, iphoff+ih->ihl*4,
+ sizeof(_ahdr), &_ahdr);
+ if (ah == NULL) {
+ sb_add(m, "INCOMPLETE [%u bytes] ",
+ skb->len - iphoff - ih->ihl*4);
+ break;
+ }
+
+ /* Length: 15 "SPI=0xF1234567 " */
+ sb_add(m, "SPI=0x%x ", ntohl(ah->spi));
+ break;
+ }
+ case IPPROTO_ESP: {
+ struct ip_esp_hdr _esph;
+ const struct ip_esp_hdr *eh;
+
+ /* Max length: 10 "PROTO=ESP " */
+ sb_add(m, "PROTO=ESP ");
+
+ if (ntohs(ih->frag_off) & IP_OFFSET)
+ break;
+
+ /* Max length: 25 "INCOMPLETE [65535 bytes] " */
+ eh = skb_header_pointer(skb, iphoff+ih->ihl*4,
+ sizeof(_esph), &_esph);
+ if (eh == NULL) {
+ sb_add(m, "INCOMPLETE [%u bytes] ",
+ skb->len - iphoff - ih->ihl*4);
+ break;
+ }
+
+ /* Length: 15 "SPI=0xF1234567 " */
+ sb_add(m, "SPI=0x%x ", ntohl(eh->spi));
+ break;
+ }
+ /* Max length: 10 "PROTO 255 " */
+ default:
+ sb_add(m, "PROTO=%u ", ih->protocol);
+ }
+
+ /* Max length: 15 "UID=4294967295 " */
+ if ((logflags & XT_LOG_UID) && !iphoff && skb->sk) {
+ read_lock_bh(&skb->sk->sk_callback_lock);
+ if (skb->sk->sk_socket && skb->sk->sk_socket->file)
+ sb_add(m, "UID=%u GID=%u ",
+ skb->sk->sk_socket->file->f_cred->fsuid,
+ skb->sk->sk_socket->file->f_cred->fsgid);
+ read_unlock_bh(&skb->sk->sk_callback_lock);
+ }
+
+ /* Max length: 16 "MARK=0xFFFFFFFF " */
+ if (!iphoff && skb->mark)
+ sb_add(m, "MARK=0x%x ", skb->mark);
+
+ /* Proto Max log string length */
+ /* IP: 40+46+6+11+127 = 230 */
+ /* TCP: 10+max(25,20+30+13+9+32+11+127) = 252 */
+ /* UDP: 10+max(25,20) = 35 */
+ /* UDPLITE: 14+max(25,20) = 39 */
+ /* ICMP: 11+max(25, 18+25+max(19,14,24+3+n+10,3+n+10)) = 91+n */
+ /* ESP: 10+max(25)+15 = 50 */
+ /* AH: 9+max(25)+15 = 49 */
+ /* unknown: 10 */
+
+ /* (ICMP allows recursion one level deep) */
+ /* maxlen = IP + ICMP + IP + max(TCP,UDP,ICMP,unknown) */
+ /* maxlen = 230+ 91 + 230 + 252 = 803 */
+}
+
+static void dump_ipv4_mac_header(struct sbuff *m,
+ const struct nf_loginfo *info,
+ const struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+ unsigned int logflags = 0;
+
+ if (info->type == NF_LOG_TYPE_LOG)
+ logflags = info->u.log.logflags;
+
+ if (!(logflags & XT_LOG_MACDECODE))
+ goto fallback;
+
+ switch (dev->type) {
+ case ARPHRD_ETHER:
+ sb_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
+ eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
+ ntohs(eth_hdr(skb)->h_proto));
+ return;
+ default:
+ break;
+ }
+
+fallback:
+ sb_add(m, "MAC=");
+ if (dev->hard_header_len &&
+ skb->mac_header != skb->network_header) {
+ const unsigned char *p = skb_mac_header(skb);
+ unsigned int i;
+
+ sb_add(m, "%02x", *p++);
+ for (i = 1; i < dev->hard_header_len; i++, p++)
+ sb_add(m, ":%02x", *p);
+ }
+ sb_add(m, " ");
+}
+
+static void
+log_packet_common(struct sbuff *m,
+ u_int8_t pf,
+ unsigned int hooknum,
+ const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const struct nf_loginfo *loginfo,
+ const char *prefix)
+{
+ sb_add(m, "<%d>%sIN=%s OUT=%s ", loginfo->u.log.level,
+ prefix,
+ in ? in->name : "",
+ out ? out->name : "");
+#ifdef CONFIG_BRIDGE_NETFILTER
+ if (skb->nf_bridge) {
+ const struct net_device *physindev;
+ const struct net_device *physoutdev;
+
+ physindev = skb->nf_bridge->physindev;
+ if (physindev && in != physindev)
+ sb_add(m, "PHYSIN=%s ", physindev->name);
+ physoutdev = skb->nf_bridge->physoutdev;
+ if (physoutdev && out != physoutdev)
+ sb_add(m, "PHYSOUT=%s ", physoutdev->name);
+ }
+#endif
+}
+
+
+static void
+ipt_log_packet(u_int8_t pf,
+ unsigned int hooknum,
+ const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const struct nf_loginfo *loginfo,
+ const char *prefix)
+{
+ struct sbuff *m = sb_open();
+
+ if (!loginfo)
+ loginfo = &default_loginfo;
+
+ log_packet_common(m, pf, hooknum, skb, in, out, loginfo, prefix);
+
+ if (in != NULL)
+ dump_ipv4_mac_header(m, loginfo, skb);
+
+ dump_ipv4_packet(m, loginfo, skb, 0);
+
+ sb_close(m);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+/* One level of recursion won't kill us */
+static void dump_ipv6_packet(struct sbuff *m,
+ const struct nf_loginfo *info,
+ const struct sk_buff *skb, unsigned int ip6hoff,
+ int recurse)
+{
+ u_int8_t currenthdr;
+ int fragment;
+ struct ipv6hdr _ip6h;
+ const struct ipv6hdr *ih;
+ unsigned int ptr;
+ unsigned int hdrlen = 0;
+ unsigned int logflags;
+
+ if (info->type == NF_LOG_TYPE_LOG)
+ logflags = info->u.log.logflags;
+ else
+ logflags = NF_LOG_MASK;
+
+ ih = skb_header_pointer(skb, ip6hoff, sizeof(_ip6h), &_ip6h);
+ if (ih == NULL) {
+ sb_add(m, "TRUNCATED");
+ return;
+ }
+
+ /* Max length: 88 "SRC=0000.0000.0000.0000.0000.0000.0000.0000 DST=0000.0000.0000.0000.0000.0000.0000.0000 " */
+ sb_add(m, "SRC=%pI6 DST=%pI6 ", &ih->saddr, &ih->daddr);
+
+ /* Max length: 44 "LEN=65535 TC=255 HOPLIMIT=255 FLOWLBL=FFFFF " */
+ sb_add(m, "LEN=%Zu TC=%u HOPLIMIT=%u FLOWLBL=%u ",
+ ntohs(ih->payload_len) + sizeof(struct ipv6hdr),
+ (ntohl(*(__be32 *)ih) & 0x0ff00000) >> 20,
+ ih->hop_limit,
+ (ntohl(*(__be32 *)ih) & 0x000fffff));
+
+ fragment = 0;
+ ptr = ip6hoff + sizeof(struct ipv6hdr);
+ currenthdr = ih->nexthdr;
+ while (currenthdr != NEXTHDR_NONE && ip6t_ext_hdr(currenthdr)) {
+ struct ipv6_opt_hdr _hdr;
+ const struct ipv6_opt_hdr *hp;
+
+ hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr);
+ if (hp == NULL) {
+ sb_add(m, "TRUNCATED");
+ return;
+ }
+
+ /* Max length: 48 "OPT (...) " */
+ if (logflags & XT_LOG_IPOPT)
+ sb_add(m, "OPT ( ");
+
+ switch (currenthdr) {
+ case IPPROTO_FRAGMENT: {
+ struct frag_hdr _fhdr;
+ const struct frag_hdr *fh;
+
+ sb_add(m, "FRAG:");
+ fh = skb_header_pointer(skb, ptr, sizeof(_fhdr),
+ &_fhdr);
+ if (fh == NULL) {
+ sb_add(m, "TRUNCATED ");
+ return;
+ }
+
+ /* Max length: 6 "65535 " */
+ sb_add(m, "%u ", ntohs(fh->frag_off) & 0xFFF8);
+
+ /* Max length: 11 "INCOMPLETE " */
+ if (fh->frag_off & htons(0x0001))
+ sb_add(m, "INCOMPLETE ");
+
+ sb_add(m, "ID:%08x ", ntohl(fh->identification));
+
+ if (ntohs(fh->frag_off) & 0xFFF8)
+ fragment = 1;
+
+ hdrlen = 8;
+
+ break;
+ }
+ case IPPROTO_DSTOPTS:
+ case IPPROTO_ROUTING:
+ case IPPROTO_HOPOPTS:
+ if (fragment) {
+ if (logflags & XT_LOG_IPOPT)
+ sb_add(m, ")");
+ return;
+ }
+ hdrlen = ipv6_optlen(hp);
+ break;
+ /* Max Length */
+ case IPPROTO_AH:
+ if (logflags & XT_LOG_IPOPT) {
+ struct ip_auth_hdr _ahdr;
+ const struct ip_auth_hdr *ah;
+
+ /* Max length: 3 "AH " */
+ sb_add(m, "AH ");
+
+ if (fragment) {
+ sb_add(m, ")");
+ return;
+ }
+
+ ah = skb_header_pointer(skb, ptr, sizeof(_ahdr),
+ &_ahdr);
+ if (ah == NULL) {
+ /*
+ * Max length: 26 "INCOMPLETE [65535
+ * bytes] )"
+ */
+ sb_add(m, "INCOMPLETE [%u bytes] )",
+ skb->len - ptr);
+ return;
+ }
+
+ /* Length: 15 "SPI=0xF1234567 */
+ sb_add(m, "SPI=0x%x ", ntohl(ah->spi));
+
+ }
+
+ hdrlen = (hp->hdrlen+2)<<2;
+ break;
+ case IPPROTO_ESP:
+ if (logflags & XT_LOG_IPOPT) {
+ struct ip_esp_hdr _esph;
+ const struct ip_esp_hdr *eh;
+
+ /* Max length: 4 "ESP " */
+ sb_add(m, "ESP ");
+
+ if (fragment) {
+ sb_add(m, ")");
+ return;
+ }
+
+ /*
+ * Max length: 26 "INCOMPLETE [65535 bytes] )"
+ */
+ eh = skb_header_pointer(skb, ptr, sizeof(_esph),
+ &_esph);
+ if (eh == NULL) {
+ sb_add(m, "INCOMPLETE [%u bytes] )",
+ skb->len - ptr);
+ return;
+ }
+
+ /* Length: 16 "SPI=0xF1234567 )" */
+ sb_add(m, "SPI=0x%x )", ntohl(eh->spi));
+
+ }
+ return;
+ default:
+ /* Max length: 20 "Unknown Ext Hdr 255" */
+ sb_add(m, "Unknown Ext Hdr %u", currenthdr);
+ return;
+ }
+ if (logflags & XT_LOG_IPOPT)
+ sb_add(m, ") ");
+
+ currenthdr = hp->nexthdr;
+ ptr += hdrlen;
+ }
+
+ switch (currenthdr) {
+ case IPPROTO_TCP:
+ if (dump_tcp_header(m, skb, currenthdr, fragment, ptr,
+ logflags))
+ return;
+ break;
+ case IPPROTO_UDP:
+ case IPPROTO_UDPLITE:
+ if (dump_udp_header(m, skb, currenthdr, fragment, ptr))
+ return;
+ break;
+ case IPPROTO_ICMPV6: {
+ struct icmp6hdr _icmp6h;
+ const struct icmp6hdr *ic;
+
+ /* Max length: 13 "PROTO=ICMPv6 " */
+ sb_add(m, "PROTO=ICMPv6 ");
+
+ if (fragment)
+ break;
+
+ /* Max length: 25 "INCOMPLETE [65535 bytes] " */
+ ic = skb_header_pointer(skb, ptr, sizeof(_icmp6h), &_icmp6h);
+ if (ic == NULL) {
+ sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - ptr);
+ return;
+ }
+
+ /* Max length: 18 "TYPE=255 CODE=255 " */
+ sb_add(m, "TYPE=%u CODE=%u ", ic->icmp6_type, ic->icmp6_code);
+
+ switch (ic->icmp6_type) {
+ case ICMPV6_ECHO_REQUEST:
+ case ICMPV6_ECHO_REPLY:
+ /* Max length: 19 "ID=65535 SEQ=65535 " */
+ sb_add(m, "ID=%u SEQ=%u ",
+ ntohs(ic->icmp6_identifier),
+ ntohs(ic->icmp6_sequence));
+ break;
+ case ICMPV6_MGM_QUERY:
+ case ICMPV6_MGM_REPORT:
+ case ICMPV6_MGM_REDUCTION:
+ break;
+
+ case ICMPV6_PARAMPROB:
+ /* Max length: 17 "POINTER=ffffffff " */
+ sb_add(m, "POINTER=%08x ", ntohl(ic->icmp6_pointer));
+ /* Fall through */
+ case ICMPV6_DEST_UNREACH:
+ case ICMPV6_PKT_TOOBIG:
+ case ICMPV6_TIME_EXCEED:
+ /* Max length: 3+maxlen */
+ if (recurse) {
+ sb_add(m, "[");
+ dump_ipv6_packet(m, info, skb,
+ ptr + sizeof(_icmp6h), 0);
+ sb_add(m, "] ");
+ }
+
+ /* Max length: 10 "MTU=65535 " */
+ if (ic->icmp6_type == ICMPV6_PKT_TOOBIG)
+ sb_add(m, "MTU=%u ", ntohl(ic->icmp6_mtu));
+ }
+ break;
+ }
+ /* Max length: 10 "PROTO=255 " */
+ default:
+ sb_add(m, "PROTO=%u ", currenthdr);
+ }
+
+ /* Max length: 15 "UID=4294967295 " */
+ if ((logflags & XT_LOG_UID) && recurse && skb->sk) {
+ read_lock_bh(&skb->sk->sk_callback_lock);
+ if (skb->sk->sk_socket && skb->sk->sk_socket->file)
+ sb_add(m, "UID=%u GID=%u ",
+ skb->sk->sk_socket->file->f_cred->fsuid,
+ skb->sk->sk_socket->file->f_cred->fsgid);
+ read_unlock_bh(&skb->sk->sk_callback_lock);
+ }
+
+ /* Max length: 16 "MARK=0xFFFFFFFF " */
+ if (!recurse && skb->mark)
+ sb_add(m, "MARK=0x%x ", skb->mark);
+}
+
+static void dump_ipv6_mac_header(struct sbuff *m,
+ const struct nf_loginfo *info,
+ const struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+ unsigned int logflags = 0;
+
+ if (info->type == NF_LOG_TYPE_LOG)
+ logflags = info->u.log.logflags;
+
+ if (!(logflags & XT_LOG_MACDECODE))
+ goto fallback;
+
+ switch (dev->type) {
+ case ARPHRD_ETHER:
+ sb_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
+ eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
+ ntohs(eth_hdr(skb)->h_proto));
+ return;
+ default:
+ break;
+ }
+
+fallback:
+ sb_add(m, "MAC=");
+ if (dev->hard_header_len &&
+ skb->mac_header != skb->network_header) {
+ const unsigned char *p = skb_mac_header(skb);
+ unsigned int len = dev->hard_header_len;
+ unsigned int i;
+
+ if (dev->type == ARPHRD_SIT) {
+ p -= ETH_HLEN;
+
+ if (p < skb->head)
+ p = NULL;
+ }
+
+ if (p != NULL) {
+ sb_add(m, "%02x", *p++);
+ for (i = 1; i < len; i++)
+ sb_add(m, ":%02x", *p++);
+ }
+ sb_add(m, " ");
+
+ if (dev->type == ARPHRD_SIT) {
+ const struct iphdr *iph =
+ (struct iphdr *)skb_mac_header(skb);
+ sb_add(m, "TUNNEL=%pI4->%pI4 ", &iph->saddr,
+ &iph->daddr);
+ }
+ } else
+ sb_add(m, " ");
+}
+
+static void
+ip6t_log_packet(u_int8_t pf,
+ unsigned int hooknum,
+ const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const struct nf_loginfo *loginfo,
+ const char *prefix)
+{
+ struct sbuff *m = sb_open();
+
+ if (!loginfo)
+ loginfo = &default_loginfo;
+
+ log_packet_common(m, pf, hooknum, skb, in, out, loginfo, prefix);
+
+ if (in != NULL)
+ dump_ipv6_mac_header(m, loginfo, skb);
+
+ dump_ipv6_packet(m, loginfo, skb, skb_network_offset(skb), 1);
+
+ sb_close(m);
+}
+#endif
+
+static unsigned int
+log_tg(struct sk_buff *skb, const struct xt_action_param *par)
+{
+ const struct xt_log_info *loginfo = par->targinfo;
+ struct nf_loginfo li;
+
+ li.type = NF_LOG_TYPE_LOG;
+ li.u.log.level = loginfo->level;
+ li.u.log.logflags = loginfo->logflags;
+
+ if (par->family == NFPROTO_IPV4)
+ ipt_log_packet(NFPROTO_IPV4, par->hooknum, skb, par->in,
+ par->out, &li, loginfo->prefix);
+#if IS_ENABLED(CONFIG_IPV6)
+ else if (par->family == NFPROTO_IPV6)
+ ip6t_log_packet(NFPROTO_IPV6, par->hooknum, skb, par->in,
+ par->out, &li, loginfo->prefix);
+#endif
+ else
+ WARN_ON_ONCE(1);
+
+ return XT_CONTINUE;
+}
+
+static int log_tg_check(const struct xt_tgchk_param *par)
+{
+ const struct xt_log_info *loginfo = par->targinfo;
+
+ if (par->family != NFPROTO_IPV4 && par->family != NFPROTO_IPV6)
+ return -EINVAL;
+
+ if (loginfo->level >= 8) {
+ pr_debug("level %u >= 8\n", loginfo->level);
+ return -EINVAL;
+ }
+
+ if (loginfo->prefix[sizeof(loginfo->prefix)-1] != '\0') {
+ pr_debug("prefix is not null-terminated\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct xt_target log_tg_regs[] __read_mostly = {
+ {
+ .name = "LOG",
+ .family = NFPROTO_IPV4,
+ .target = log_tg,
+ .targetsize = sizeof(struct xt_log_info),
+ .checkentry = log_tg_check,
+ .me = THIS_MODULE,
+ },
+#if IS_ENABLED(CONFIG_IPV6)
+ {
+ .name = "LOG",
+ .family = NFPROTO_IPV6,
+ .target = log_tg,
+ .targetsize = sizeof(struct xt_log_info),
+ .checkentry = log_tg_check,
+ .me = THIS_MODULE,
+ },
+#endif
+};
+
+static struct nf_logger ipt_log_logger __read_mostly = {
+ .name = "ipt_LOG",
+ .logfn = &ipt_log_packet,
+ .me = THIS_MODULE,
+};
+
+#if IS_ENABLED(CONFIG_IPV6)
+static struct nf_logger ip6t_log_logger __read_mostly = {
+ .name = "ip6t_LOG",
+ .logfn = &ip6t_log_packet,
+ .me = THIS_MODULE,
+};
+#endif
+
+static int __init log_tg_init(void)
+{
+ int ret;
+
+ ret = xt_register_targets(log_tg_regs, ARRAY_SIZE(log_tg_regs));
+ if (ret < 0)
+ return ret;
+
+ nf_log_register(NFPROTO_IPV4, &ipt_log_logger);
+#if IS_ENABLED(CONFIG_IPV6)
+ nf_log_register(NFPROTO_IPV6, &ip6t_log_logger);
+#endif
+ return 0;
+}
+
+static void __exit log_tg_exit(void)
+{
+ nf_log_unregister(&ipt_log_logger);
+#if IS_ENABLED(CONFIG_IPV6)
+ nf_log_unregister(&ip6t_log_logger);
+#endif
+ xt_unregister_targets(log_tg_regs, ARRAY_SIZE(log_tg_regs));
+}
+
+module_init(log_tg_init);
+module_exit(log_tg_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
+MODULE_AUTHOR("Jan Rekorajski <baggins@pld.org.pl>");
+MODULE_DESCRIPTION("Xtables: IPv4/IPv6 packet logging");
+MODULE_ALIAS("ipt_LOG");
+MODULE_ALIAS("ip6t_LOG");