network: add brcm fullcone support from ASUS Merlin kernel 4.19

This commit is contained in:
coolsnowwolf 2022-12-06 00:26:03 +08:00
parent cb6d8ad182
commit 193db6f186
9 changed files with 1075 additions and 2 deletions

View File

@ -9,7 +9,7 @@
include $(TOPDIR)/rules.mk include $(TOPDIR)/rules.mk
PKG_NAME:=firewall PKG_NAME:=firewall
PKG_RELEASE:=1 PKG_RELEASE:=2
PKG_SOURCE_PROTO:=git PKG_SOURCE_PROTO:=git
PKG_SOURCE_URL=$(PROJECT_GIT)/project/firewall3.git PKG_SOURCE_URL=$(PROJECT_GIT)/project/firewall3.git

View File

@ -0,0 +1,60 @@
--- a/defaults.c
+++ b/defaults.c
@@ -49,7 +49,7 @@ const struct fw3_option fw3_flag_opts[]
FW3_OPT("synflood_rate", limit, defaults, syn_flood_rate),
FW3_OPT("synflood_burst", int, defaults, syn_flood_rate.burst),
- FW3_OPT("fullcone", bool, defaults, fullcone),
+ FW3_OPT("fullcone", int, defaults, fullcone),
FW3_OPT("tcp_syncookies", bool, defaults, tcp_syncookies),
FW3_OPT("tcp_ecn", int, defaults, tcp_ecn),
--- a/options.h
+++ b/options.h
@@ -98,6 +98,13 @@ enum fw3_reject_code
__FW3_REJECT_CODE_MAX
};
+enum fullcone_code
+{
+ FULLCONE_DISABLED = 0,
+ FULLCONE_CHION = 1,
+ FULLCONE_BCM = 2,
+};
+
extern const char *fw3_flag_names[__FW3_FLAG_MAX];
@@ -297,7 +304,7 @@ struct fw3_defaults
enum fw3_reject_code any_reject_code;
bool syn_flood;
- bool fullcone;
+ int fullcone;
struct fw3_limit syn_flood_rate;
bool tcp_syncookies;
--- a/zones.c
+++ b/zones.c
@@ -757,7 +757,7 @@ print_zone_rule(struct fw3_ipt_handle *h
r = fw3_ipt_rule_new(handle);
fw3_ipt_rule_src_dest(r, msrc, mdest);
/*FIXME: Workaround for FULLCONE-NAT*/
- if(defs->fullcone)
+ if(defs->fullcone == FULLCONE_CHION)
{
warn("%s will enable FULLCONE-NAT", zone->name);
fw3_ipt_rule_target(r, "FULLCONENAT");
@@ -767,6 +767,12 @@ print_zone_rule(struct fw3_ipt_handle *h
fw3_ipt_rule_target(r, "FULLCONENAT");
fw3_ipt_rule_append(r, "zone_%s_prerouting", zone->name);
}
+ else if (defs->fullcone == FULLCONE_BCM)
+ {
+ fw3_ipt_rule_target(r, "MASQUERADE");
+ fw3_ipt_rule_extra(r, "--mode fullcone");
+ fw3_ipt_rule_append(r, "zone_%s_postrouting", zone->name);
+ }
else
{
fw3_ipt_rule_target(r, "MASQUERADE");

View File

@ -10,7 +10,7 @@ include $(INCLUDE_DIR)/kernel.mk
PKG_NAME:=iptables PKG_NAME:=iptables
PKG_VERSION:=1.8.7 PKG_VERSION:=1.8.7
PKG_RELEASE:=1 PKG_RELEASE:=2
PKG_SOURCE_URL:=https://netfilter.org/projects/iptables/files PKG_SOURCE_URL:=https://netfilter.org/projects/iptables/files
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.bz2 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.bz2

View File

@ -0,0 +1,73 @@
--- a/extensions/libipt_MASQUERADE.c
+++ b/extensions/libipt_MASQUERADE.c
@@ -11,6 +11,7 @@
enum {
O_TO_PORTS = 0,
O_RANDOM,
+ O_MODE,
O_RANDOM_FULLY,
};
@@ -23,13 +24,16 @@ static void MASQUERADE_help(void)
" --random\n"
" Randomize source port.\n"
" --random-fully\n"
-" Fully randomize source port.\n");
+" Fully randomize source port.\n"
+" --mode <fullcone|symmetric>\n"
+" NAT mode.\n");
}
static const struct xt_option_entry MASQUERADE_opts[] = {
{.name = "to-ports", .id = O_TO_PORTS, .type = XTTYPE_STRING},
{.name = "random", .id = O_RANDOM, .type = XTTYPE_NONE},
{.name = "random-fully", .id = O_RANDOM_FULLY, .type = XTTYPE_NONE},
+ {.name = "mode", .id = O_MODE, .type = XTTYPE_STRING},
XTOPT_TABLEEND,
};
@@ -90,6 +94,8 @@ static void MASQUERADE_parse(struct xt_o
else
portok = 0;
+ mr->range[0].min_ip = 0;
+
xtables_option_parse(cb);
switch (cb->entry->id) {
case O_TO_PORTS:
@@ -104,6 +110,15 @@ static void MASQUERADE_parse(struct xt_o
case O_RANDOM_FULLY:
mr->range[0].flags |= NF_NAT_RANGE_PROTO_RANDOM_FULLY;
break;
+ case O_MODE:
+ if (strcasecmp(cb->arg, "fullcone") == 0)
+ mr->range[0].min_ip = 1;
+ else if (strcasecmp(cb->arg, "symmetric") == 0)
+ mr->range[0].min_ip = 0;
+ else
+ xtables_error(PARAMETER_PROBLEM,
+ "Unknown mode %s", cb->arg);
+ break;
}
}
@@ -126,6 +141,9 @@ MASQUERADE_print(const void *ip, const s
if (r->flags & NF_NAT_RANGE_PROTO_RANDOM_FULLY)
printf(" random-fully");
+
+ if (r->min_ip == 1)
+ printf(" mode: fullcone");
}
static void
@@ -145,6 +163,9 @@ MASQUERADE_save(const void *ip, const st
if (r->flags & NF_NAT_RANGE_PROTO_RANDOM_FULLY)
printf(" --random-fully");
+
+ if (r->min_ip == 1)
+ printf(" --mode fullcone");
}
static int MASQUERADE_xlate(struct xt_xlate *xl,

View File

@ -0,0 +1,235 @@
--- a/net/netfilter/nf_nat_masquerade.c
+++ b/net/netfilter/nf_nat_masquerade.c
@@ -8,6 +8,9 @@
#include <linux/netfilter_ipv6.h>
#include <net/netfilter/nf_nat_masquerade.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_core.h>
struct masq_dev_work {
struct work_struct work;
@@ -23,6 +26,129 @@ static DEFINE_MUTEX(masq_mutex);
static unsigned int masq_refcnt __read_mostly;
static atomic_t masq_worker_count __read_mostly;
+static void bcm_nat_expect(struct nf_conn *ct,
+ struct nf_conntrack_expect *exp)
+{
+ struct nf_nat_range2 range;
+
+ /* This must be a fresh one. */
+ BUG_ON(ct->status & IPS_NAT_DONE_MASK);
+
+ /* Change src to where new ct comes from */
+ range.flags = NF_NAT_RANGE_MAP_IPS;
+ range.min_addr = range.max_addr =
+ ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3;
+ nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
+
+ /* For DST manip, map port here to where it's expected. */
+ range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
+ range.min_proto = range.max_proto = exp->saved_proto;
+ range.min_addr = range.max_addr = exp->saved_addr;
+ nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
+}
+
+/****************************************************************************/
+static int bcm_nat_help(struct sk_buff *skb, unsigned int protoff,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+ int dir = CTINFO2DIR(ctinfo);
+ struct nf_conn_help *help = nfct_help(ct);
+ struct nf_conntrack_expect *exp;
+
+ if (dir != IP_CT_DIR_ORIGINAL ||
+ help->expecting[NF_CT_EXPECT_CLASS_DEFAULT])
+ return NF_ACCEPT;
+
+ pr_debug("bcm_nat: packet[%d bytes] ", skb->len);
+ nf_ct_dump_tuple(&ct->tuplehash[dir].tuple);
+ pr_debug("reply: ");
+ nf_ct_dump_tuple(&ct->tuplehash[!dir].tuple);
+
+ /* Create expect */
+ if ((exp = nf_ct_expect_alloc(ct)) == NULL)
+ return NF_ACCEPT;
+
+ nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, AF_INET, NULL,
+ &ct->tuplehash[!dir].tuple.dst.u3, IPPROTO_UDP,
+ NULL, &ct->tuplehash[!dir].tuple.dst.u.udp.port);
+ exp->flags = NF_CT_EXPECT_PERMANENT;
+ exp->saved_addr = ct->tuplehash[dir].tuple.src.u3;
+ exp->saved_proto.udp.port = ct->tuplehash[dir].tuple.src.u.udp.port;
+ exp->dir = !dir;
+ exp->expectfn = bcm_nat_expect;
+
+ /* Setup expect */
+ nf_ct_expect_related(exp, 0);
+ nf_ct_expect_put(exp);
+ pr_debug("bcm_nat: expect setup\n");
+
+ return NF_ACCEPT;
+}
+
+/****************************************************************************/
+static struct nf_conntrack_expect_policy bcm_nat_exp_policy __read_mostly = {
+ .max_expected = 1000,
+ .timeout = 240,
+};
+
+/****************************************************************************/
+static struct nf_conntrack_helper nf_conntrack_helper_bcm_nat __read_mostly = {
+ .name = "BCM-NAT",
+ .me = THIS_MODULE,
+ .tuple.src.l3num = AF_INET,
+ .tuple.dst.protonum = IPPROTO_UDP,
+ .expect_policy = &bcm_nat_exp_policy,
+ .expect_class_max = 1,
+ .help = bcm_nat_help,
+};
+
+/****************************************************************************/
+static inline int find_exp(__be32 ip, __be16 port, struct nf_conn *ct)
+{
+ struct nf_conntrack_tuple tuple;
+ struct nf_conntrack_expect *i = NULL;
+
+
+ memset(&tuple, 0, sizeof(tuple));
+ tuple.src.l3num = AF_INET;
+ tuple.dst.protonum = IPPROTO_UDP;
+ tuple.dst.u3.ip = ip;
+ tuple.dst.u.udp.port = port;
+
+ rcu_read_lock();
+ i = __nf_ct_expect_find(nf_ct_net(ct), nf_ct_zone(ct), &tuple);
+ rcu_read_unlock();
+
+ return i != NULL;
+}
+
+/****************************************************************************/
+static inline struct nf_conntrack_expect *find_fullcone_exp(struct nf_conn *ct)
+{
+ struct nf_conntrack_tuple * tp =
+ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+ struct nf_conntrack_expect * exp = NULL;
+ struct nf_conntrack_expect * i;
+ unsigned int h;
+
+ rcu_read_lock();
+ for (h = 0; h < nf_ct_expect_hsize; h++) {
+ hlist_for_each_entry_rcu(i, &nf_ct_expect_hash[h], hnode) {
+ if (nf_inet_addr_cmp(&i->saved_addr, &tp->src.u3) &&
+ i->saved_proto.all == tp->src.u.all &&
+ i->tuple.dst.protonum == tp->dst.protonum &&
+ i->tuple.src.u3.ip == 0 &&
+ i->tuple.src.u.udp.port == 0) {
+ exp = i;
+ break;
+ }
+ }
+ }
+ rcu_read_unlock();
+
+ return exp;
+}
+
unsigned int
nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
const struct nf_nat_range2 *range,
@@ -60,6 +186,72 @@ nf_nat_masquerade_ipv4(struct sk_buff *s
if (nat)
nat->masq_index = out->ifindex;
+/* RFC 4787 - 4.2.2. Port Parity
+ i.e., an even port will be mapped to an even port, and an odd port will be mapped to an odd port.
+*/
+#define CHECK_PORT_PARITY(a, b) ((a%2)==(b%2))
+ if (range->min_addr.ip != 0 /* nat_mode == full cone */
+ && (nfct_help(ct) == NULL || nfct_help(ct)->helper == NULL)
+ && nf_ct_protonum(ct) == IPPROTO_UDP) {
+ unsigned int ret;
+ u_int16_t minport;
+ u_int16_t maxport;
+ struct nf_conntrack_expect *exp;
+
+ pr_debug("bcm_nat: need full cone NAT\n");
+
+ /* Choose port */
+ spin_lock_bh(&nf_conntrack_expect_lock);
+ /* Look for existing expectation */
+ exp = find_fullcone_exp(ct);
+ if (exp) {
+ minport = maxport = exp->tuple.dst.u.udp.port;
+ pr_debug("bcm_nat: existing mapped port = %hu\n",
+ ntohs(minport));
+ } else { /* no previous expect */
+ u_int16_t newport, tmpport, orgport;
+
+ minport = range->min_proto.all == 0?
+ ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.
+ u.udp.port : range->min_proto.all;
+ maxport = range->max_proto.all == 0?
+ htons(65535) : range->max_proto.all;
+ orgport = ntohs(minport);
+ for (newport = ntohs(minport),tmpport = ntohs(maxport);
+ newport <= tmpport; newport++) {
+ if (CHECK_PORT_PARITY(orgport, newport) && !find_exp(newsrc, htons(newport), ct)) {
+ pr_debug("bcm_nat: new mapped port = "
+ "%hu\n", newport);
+ minport = maxport = htons(newport);
+ break;
+ }
+ }
+ }
+ spin_unlock_bh(&nf_conntrack_expect_lock);
+
+
+ memset(&newrange.min_addr, 0, sizeof(newrange.min_addr));
+ memset(&newrange.max_addr, 0, sizeof(newrange.max_addr));
+
+ newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS |
+ NF_NAT_RANGE_PROTO_SPECIFIED;
+ newrange.max_addr.ip = newrange.min_addr.ip = newsrc;
+ newrange.min_proto.udp.port = newrange.max_proto.udp.port = minport;
+
+ /* Set ct helper */
+ ret = nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC);
+ if (ret == NF_ACCEPT) {
+ struct nf_conn_help *help = nfct_help(ct);
+ if (help == NULL)
+ help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
+ if (help != NULL) {
+ help->helper = &nf_conntrack_helper_bcm_nat;
+ pr_debug("bcm_nat: helper set\n");
+ }
+ }
+ return ret;
+ }
+
/* Transfer from original range. */
memset(&newrange.min_addr, 0, sizeof(newrange.min_addr));
memset(&newrange.max_addr, 0, sizeof(newrange.max_addr));
@@ -347,6 +539,7 @@ EXPORT_SYMBOL_GPL(nf_nat_masquerade_inet
void nf_nat_masquerade_inet_unregister_notifiers(void)
{
+ nf_conntrack_helper_unregister(&nf_conntrack_helper_bcm_nat);
mutex_lock(&masq_mutex);
/* check if the notifiers still have clients */
if (--masq_refcnt > 0)
--- a/net/netfilter/xt_MASQUERADE.c
+++ b/net/netfilter/xt_MASQUERADE.c
@@ -42,6 +42,9 @@ masquerade_tg(struct sk_buff *skb, const
range.min_proto = mr->range[0].min;
range.max_proto = mr->range[0].max;
+ range.min_addr.ip = mr->range[0].min_ip;
+ range.max_addr.ip = mr->range[0].max_ip;
+
return nf_nat_masquerade_ipv4(skb, xt_hooknum(par), &range,
xt_out(par));
}

View File

@ -0,0 +1,235 @@
--- a/net/netfilter/nf_nat_masquerade.c
+++ b/net/netfilter/nf_nat_masquerade.c
@@ -8,6 +8,9 @@
#include <linux/netfilter_ipv6.h>
#include <net/netfilter/nf_nat_masquerade.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_core.h>
struct masq_dev_work {
struct work_struct work;
@@ -23,6 +26,129 @@ static DEFINE_MUTEX(masq_mutex);
static unsigned int masq_refcnt __read_mostly;
static atomic_t masq_worker_count __read_mostly;
+static void bcm_nat_expect(struct nf_conn *ct,
+ struct nf_conntrack_expect *exp)
+{
+ struct nf_nat_range2 range;
+
+ /* This must be a fresh one. */
+ BUG_ON(ct->status & IPS_NAT_DONE_MASK);
+
+ /* Change src to where new ct comes from */
+ range.flags = NF_NAT_RANGE_MAP_IPS;
+ range.min_addr = range.max_addr =
+ ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3;
+ nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
+
+ /* For DST manip, map port here to where it's expected. */
+ range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
+ range.min_proto = range.max_proto = exp->saved_proto;
+ range.min_addr = range.max_addr = exp->saved_addr;
+ nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
+}
+
+/****************************************************************************/
+static int bcm_nat_help(struct sk_buff *skb, unsigned int protoff,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+ int dir = CTINFO2DIR(ctinfo);
+ struct nf_conn_help *help = nfct_help(ct);
+ struct nf_conntrack_expect *exp;
+
+ if (dir != IP_CT_DIR_ORIGINAL ||
+ help->expecting[NF_CT_EXPECT_CLASS_DEFAULT])
+ return NF_ACCEPT;
+
+ pr_debug("bcm_nat: packet[%d bytes] ", skb->len);
+ nf_ct_dump_tuple(&ct->tuplehash[dir].tuple);
+ pr_debug("reply: ");
+ nf_ct_dump_tuple(&ct->tuplehash[!dir].tuple);
+
+ /* Create expect */
+ if ((exp = nf_ct_expect_alloc(ct)) == NULL)
+ return NF_ACCEPT;
+
+ nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, AF_INET, NULL,
+ &ct->tuplehash[!dir].tuple.dst.u3, IPPROTO_UDP,
+ NULL, &ct->tuplehash[!dir].tuple.dst.u.udp.port);
+ exp->flags = NF_CT_EXPECT_PERMANENT;
+ exp->saved_addr = ct->tuplehash[dir].tuple.src.u3;
+ exp->saved_proto.udp.port = ct->tuplehash[dir].tuple.src.u.udp.port;
+ exp->dir = !dir;
+ exp->expectfn = bcm_nat_expect;
+
+ /* Setup expect */
+ nf_ct_expect_related(exp, 0);
+ nf_ct_expect_put(exp);
+ pr_debug("bcm_nat: expect setup\n");
+
+ return NF_ACCEPT;
+}
+
+/****************************************************************************/
+static struct nf_conntrack_expect_policy bcm_nat_exp_policy __read_mostly = {
+ .max_expected = 1000,
+ .timeout = 240,
+};
+
+/****************************************************************************/
+static struct nf_conntrack_helper nf_conntrack_helper_bcm_nat __read_mostly = {
+ .name = "BCM-NAT",
+ .me = THIS_MODULE,
+ .tuple.src.l3num = AF_INET,
+ .tuple.dst.protonum = IPPROTO_UDP,
+ .expect_policy = &bcm_nat_exp_policy,
+ .expect_class_max = 1,
+ .help = bcm_nat_help,
+};
+
+/****************************************************************************/
+static inline int find_exp(__be32 ip, __be16 port, struct nf_conn *ct)
+{
+ struct nf_conntrack_tuple tuple;
+ struct nf_conntrack_expect *i = NULL;
+
+
+ memset(&tuple, 0, sizeof(tuple));
+ tuple.src.l3num = AF_INET;
+ tuple.dst.protonum = IPPROTO_UDP;
+ tuple.dst.u3.ip = ip;
+ tuple.dst.u.udp.port = port;
+
+ rcu_read_lock();
+ i = __nf_ct_expect_find(nf_ct_net(ct), nf_ct_zone(ct), &tuple);
+ rcu_read_unlock();
+
+ return i != NULL;
+}
+
+/****************************************************************************/
+static inline struct nf_conntrack_expect *find_fullcone_exp(struct nf_conn *ct)
+{
+ struct nf_conntrack_tuple * tp =
+ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+ struct nf_conntrack_expect * exp = NULL;
+ struct nf_conntrack_expect * i;
+ unsigned int h;
+
+ rcu_read_lock();
+ for (h = 0; h < nf_ct_expect_hsize; h++) {
+ hlist_for_each_entry_rcu(i, &nf_ct_expect_hash[h], hnode) {
+ if (nf_inet_addr_cmp(&i->saved_addr, &tp->src.u3) &&
+ i->saved_proto.all == tp->src.u.all &&
+ i->tuple.dst.protonum == tp->dst.protonum &&
+ i->tuple.src.u3.ip == 0 &&
+ i->tuple.src.u.udp.port == 0) {
+ exp = i;
+ break;
+ }
+ }
+ }
+ rcu_read_unlock();
+
+ return exp;
+}
+
unsigned int
nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
const struct nf_nat_range2 *range,
@@ -60,6 +186,72 @@ nf_nat_masquerade_ipv4(struct sk_buff *s
if (nat)
nat->masq_index = out->ifindex;
+/* RFC 4787 - 4.2.2. Port Parity
+ i.e., an even port will be mapped to an even port, and an odd port will be mapped to an odd port.
+*/
+#define CHECK_PORT_PARITY(a, b) ((a%2)==(b%2))
+ if (range->min_addr.ip != 0 /* nat_mode == full cone */
+ && (nfct_help(ct) == NULL || nfct_help(ct)->helper == NULL)
+ && nf_ct_protonum(ct) == IPPROTO_UDP) {
+ unsigned int ret;
+ u_int16_t minport;
+ u_int16_t maxport;
+ struct nf_conntrack_expect *exp;
+
+ pr_debug("bcm_nat: need full cone NAT\n");
+
+ /* Choose port */
+ spin_lock_bh(&nf_conntrack_expect_lock);
+ /* Look for existing expectation */
+ exp = find_fullcone_exp(ct);
+ if (exp) {
+ minport = maxport = exp->tuple.dst.u.udp.port;
+ pr_debug("bcm_nat: existing mapped port = %hu\n",
+ ntohs(minport));
+ } else { /* no previous expect */
+ u_int16_t newport, tmpport, orgport;
+
+ minport = range->min_proto.all == 0?
+ ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.
+ u.udp.port : range->min_proto.all;
+ maxport = range->max_proto.all == 0?
+ htons(65535) : range->max_proto.all;
+ orgport = ntohs(minport);
+ for (newport = ntohs(minport),tmpport = ntohs(maxport);
+ newport <= tmpport; newport++) {
+ if (CHECK_PORT_PARITY(orgport, newport) && !find_exp(newsrc, htons(newport), ct)) {
+ pr_debug("bcm_nat: new mapped port = "
+ "%hu\n", newport);
+ minport = maxport = htons(newport);
+ break;
+ }
+ }
+ }
+ spin_unlock_bh(&nf_conntrack_expect_lock);
+
+
+ memset(&newrange.min_addr, 0, sizeof(newrange.min_addr));
+ memset(&newrange.max_addr, 0, sizeof(newrange.max_addr));
+
+ newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS |
+ NF_NAT_RANGE_PROTO_SPECIFIED;
+ newrange.max_addr.ip = newrange.min_addr.ip = newsrc;
+ newrange.min_proto.udp.port = newrange.max_proto.udp.port = minport;
+
+ /* Set ct helper */
+ ret = nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC);
+ if (ret == NF_ACCEPT) {
+ struct nf_conn_help *help = nfct_help(ct);
+ if (help == NULL)
+ help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
+ if (help != NULL) {
+ help->helper = &nf_conntrack_helper_bcm_nat;
+ pr_debug("bcm_nat: helper set\n");
+ }
+ }
+ return ret;
+ }
+
/* Transfer from original range. */
memset(&newrange.min_addr, 0, sizeof(newrange.min_addr));
memset(&newrange.max_addr, 0, sizeof(newrange.max_addr));
@@ -347,6 +539,7 @@ EXPORT_SYMBOL_GPL(nf_nat_masquerade_inet
void nf_nat_masquerade_inet_unregister_notifiers(void)
{
+ nf_conntrack_helper_unregister(&nf_conntrack_helper_bcm_nat);
mutex_lock(&masq_mutex);
/* check if the notifiers still have clients */
if (--masq_refcnt > 0)
--- a/net/netfilter/xt_MASQUERADE.c
+++ b/net/netfilter/xt_MASQUERADE.c
@@ -42,6 +42,9 @@ masquerade_tg(struct sk_buff *skb, const
range.min_proto = mr->range[0].min;
range.max_proto = mr->range[0].max;
+ range.min_addr.ip = mr->range[0].min_ip;
+ range.max_addr.ip = mr->range[0].max_ip;
+
return nf_nat_masquerade_ipv4(skb, xt_hooknum(par), &range,
xt_out(par));
}

View File

@ -0,0 +1,235 @@
--- a/net/netfilter/nf_nat_masquerade.c
+++ b/net/netfilter/nf_nat_masquerade.c
@@ -8,6 +8,9 @@
#include <linux/netfilter_ipv6.h>
#include <net/netfilter/nf_nat_masquerade.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_core.h>
struct masq_dev_work {
struct work_struct work;
@@ -23,6 +26,129 @@ static DEFINE_MUTEX(masq_mutex);
static unsigned int masq_refcnt __read_mostly;
static atomic_t masq_worker_count __read_mostly;
+static void bcm_nat_expect(struct nf_conn *ct,
+ struct nf_conntrack_expect *exp)
+{
+ struct nf_nat_range2 range;
+
+ /* This must be a fresh one. */
+ BUG_ON(ct->status & IPS_NAT_DONE_MASK);
+
+ /* Change src to where new ct comes from */
+ range.flags = NF_NAT_RANGE_MAP_IPS;
+ range.min_addr = range.max_addr =
+ ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3;
+ nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
+
+ /* For DST manip, map port here to where it's expected. */
+ range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
+ range.min_proto = range.max_proto = exp->saved_proto;
+ range.min_addr = range.max_addr = exp->saved_addr;
+ nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
+}
+
+/****************************************************************************/
+static int bcm_nat_help(struct sk_buff *skb, unsigned int protoff,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+ int dir = CTINFO2DIR(ctinfo);
+ struct nf_conn_help *help = nfct_help(ct);
+ struct nf_conntrack_expect *exp;
+
+ if (dir != IP_CT_DIR_ORIGINAL ||
+ help->expecting[NF_CT_EXPECT_CLASS_DEFAULT])
+ return NF_ACCEPT;
+
+ pr_debug("bcm_nat: packet[%d bytes] ", skb->len);
+ nf_ct_dump_tuple(&ct->tuplehash[dir].tuple);
+ pr_debug("reply: ");
+ nf_ct_dump_tuple(&ct->tuplehash[!dir].tuple);
+
+ /* Create expect */
+ if ((exp = nf_ct_expect_alloc(ct)) == NULL)
+ return NF_ACCEPT;
+
+ nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, AF_INET, NULL,
+ &ct->tuplehash[!dir].tuple.dst.u3, IPPROTO_UDP,
+ NULL, &ct->tuplehash[!dir].tuple.dst.u.udp.port);
+ exp->flags = NF_CT_EXPECT_PERMANENT;
+ exp->saved_addr = ct->tuplehash[dir].tuple.src.u3;
+ exp->saved_proto.udp.port = ct->tuplehash[dir].tuple.src.u.udp.port;
+ exp->dir = !dir;
+ exp->expectfn = bcm_nat_expect;
+
+ /* Setup expect */
+ nf_ct_expect_related(exp, 0);
+ nf_ct_expect_put(exp);
+ pr_debug("bcm_nat: expect setup\n");
+
+ return NF_ACCEPT;
+}
+
+/****************************************************************************/
+static struct nf_conntrack_expect_policy bcm_nat_exp_policy __read_mostly = {
+ .max_expected = 1000,
+ .timeout = 240,
+};
+
+/****************************************************************************/
+static struct nf_conntrack_helper nf_conntrack_helper_bcm_nat __read_mostly = {
+ .name = "BCM-NAT",
+ .me = THIS_MODULE,
+ .tuple.src.l3num = AF_INET,
+ .tuple.dst.protonum = IPPROTO_UDP,
+ .expect_policy = &bcm_nat_exp_policy,
+ .expect_class_max = 1,
+ .help = bcm_nat_help,
+};
+
+/****************************************************************************/
+static inline int find_exp(__be32 ip, __be16 port, struct nf_conn *ct)
+{
+ struct nf_conntrack_tuple tuple;
+ struct nf_conntrack_expect *i = NULL;
+
+
+ memset(&tuple, 0, sizeof(tuple));
+ tuple.src.l3num = AF_INET;
+ tuple.dst.protonum = IPPROTO_UDP;
+ tuple.dst.u3.ip = ip;
+ tuple.dst.u.udp.port = port;
+
+ rcu_read_lock();
+ i = __nf_ct_expect_find(nf_ct_net(ct), nf_ct_zone(ct), &tuple);
+ rcu_read_unlock();
+
+ return i != NULL;
+}
+
+/****************************************************************************/
+static inline struct nf_conntrack_expect *find_fullcone_exp(struct nf_conn *ct)
+{
+ struct nf_conntrack_tuple * tp =
+ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+ struct nf_conntrack_expect * exp = NULL;
+ struct nf_conntrack_expect * i;
+ unsigned int h;
+
+ rcu_read_lock();
+ for (h = 0; h < nf_ct_expect_hsize; h++) {
+ hlist_for_each_entry_rcu(i, &nf_ct_expect_hash[h], hnode) {
+ if (nf_inet_addr_cmp(&i->saved_addr, &tp->src.u3) &&
+ i->saved_proto.all == tp->src.u.all &&
+ i->tuple.dst.protonum == tp->dst.protonum &&
+ i->tuple.src.u3.ip == 0 &&
+ i->tuple.src.u.udp.port == 0) {
+ exp = i;
+ break;
+ }
+ }
+ }
+ rcu_read_unlock();
+
+ return exp;
+}
+
unsigned int
nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
const struct nf_nat_range2 *range,
@@ -60,6 +186,72 @@ nf_nat_masquerade_ipv4(struct sk_buff *s
if (nat)
nat->masq_index = out->ifindex;
+/* RFC 4787 - 4.2.2. Port Parity
+ i.e., an even port will be mapped to an even port, and an odd port will be mapped to an odd port.
+*/
+#define CHECK_PORT_PARITY(a, b) ((a%2)==(b%2))
+ if (range->min_addr.ip != 0 /* nat_mode == full cone */
+ && (nfct_help(ct) == NULL || nfct_help(ct)->helper == NULL)
+ && nf_ct_protonum(ct) == IPPROTO_UDP) {
+ unsigned int ret;
+ u_int16_t minport;
+ u_int16_t maxport;
+ struct nf_conntrack_expect *exp;
+
+ pr_debug("bcm_nat: need full cone NAT\n");
+
+ /* Choose port */
+ spin_lock_bh(&nf_conntrack_expect_lock);
+ /* Look for existing expectation */
+ exp = find_fullcone_exp(ct);
+ if (exp) {
+ minport = maxport = exp->tuple.dst.u.udp.port;
+ pr_debug("bcm_nat: existing mapped port = %hu\n",
+ ntohs(minport));
+ } else { /* no previous expect */
+ u_int16_t newport, tmpport, orgport;
+
+ minport = range->min_proto.all == 0?
+ ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.
+ u.udp.port : range->min_proto.all;
+ maxport = range->max_proto.all == 0?
+ htons(65535) : range->max_proto.all;
+ orgport = ntohs(minport);
+ for (newport = ntohs(minport),tmpport = ntohs(maxport);
+ newport <= tmpport; newport++) {
+ if (CHECK_PORT_PARITY(orgport, newport) && !find_exp(newsrc, htons(newport), ct)) {
+ pr_debug("bcm_nat: new mapped port = "
+ "%hu\n", newport);
+ minport = maxport = htons(newport);
+ break;
+ }
+ }
+ }
+ spin_unlock_bh(&nf_conntrack_expect_lock);
+
+
+ memset(&newrange.min_addr, 0, sizeof(newrange.min_addr));
+ memset(&newrange.max_addr, 0, sizeof(newrange.max_addr));
+
+ newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS |
+ NF_NAT_RANGE_PROTO_SPECIFIED;
+ newrange.max_addr.ip = newrange.min_addr.ip = newsrc;
+ newrange.min_proto.udp.port = newrange.max_proto.udp.port = minport;
+
+ /* Set ct helper */
+ ret = nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC);
+ if (ret == NF_ACCEPT) {
+ struct nf_conn_help *help = nfct_help(ct);
+ if (help == NULL)
+ help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
+ if (help != NULL) {
+ help->helper = &nf_conntrack_helper_bcm_nat;
+ pr_debug("bcm_nat: helper set\n");
+ }
+ }
+ return ret;
+ }
+
/* Transfer from original range. */
memset(&newrange.min_addr, 0, sizeof(newrange.min_addr));
memset(&newrange.max_addr, 0, sizeof(newrange.max_addr));
@@ -347,6 +539,7 @@ EXPORT_SYMBOL_GPL(nf_nat_masquerade_inet
void nf_nat_masquerade_inet_unregister_notifiers(void)
{
+ nf_conntrack_helper_unregister(&nf_conntrack_helper_bcm_nat);
mutex_lock(&masq_mutex);
/* check if the notifiers still have clients */
if (--masq_refcnt > 0)
--- a/net/netfilter/xt_MASQUERADE.c
+++ b/net/netfilter/xt_MASQUERADE.c
@@ -42,6 +42,9 @@ masquerade_tg(struct sk_buff *skb, const
range.min_proto = mr->range[0].min;
range.max_proto = mr->range[0].max;
+ range.min_addr.ip = mr->range[0].min_ip;
+ range.max_addr.ip = mr->range[0].max_ip;
+
return nf_nat_masquerade_ipv4(skb, xt_hooknum(par), &range,
xt_out(par));
}

View File

@ -0,0 +1,235 @@
--- a/net/netfilter/nf_nat_masquerade.c
+++ b/net/netfilter/nf_nat_masquerade.c
@@ -8,6 +8,9 @@
#include <linux/netfilter_ipv6.h>
#include <net/netfilter/nf_nat_masquerade.h>
+#include <net/netfilter/nf_conntrack_zones.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_core.h>
struct masq_dev_work {
struct work_struct work;
@@ -23,6 +26,129 @@ static DEFINE_MUTEX(masq_mutex);
static unsigned int masq_refcnt __read_mostly;
static atomic_t masq_worker_count __read_mostly;
+static void bcm_nat_expect(struct nf_conn *ct,
+ struct nf_conntrack_expect *exp)
+{
+ struct nf_nat_range2 range;
+
+ /* This must be a fresh one. */
+ BUG_ON(ct->status & IPS_NAT_DONE_MASK);
+
+ /* Change src to where new ct comes from */
+ range.flags = NF_NAT_RANGE_MAP_IPS;
+ range.min_addr = range.max_addr =
+ ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3;
+ nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
+
+ /* For DST manip, map port here to where it's expected. */
+ range.flags = (NF_NAT_RANGE_MAP_IPS | NF_NAT_RANGE_PROTO_SPECIFIED);
+ range.min_proto = range.max_proto = exp->saved_proto;
+ range.min_addr = range.max_addr = exp->saved_addr;
+ nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST);
+}
+
+/****************************************************************************/
+static int bcm_nat_help(struct sk_buff *skb, unsigned int protoff,
+ struct nf_conn *ct, enum ip_conntrack_info ctinfo)
+{
+ int dir = CTINFO2DIR(ctinfo);
+ struct nf_conn_help *help = nfct_help(ct);
+ struct nf_conntrack_expect *exp;
+
+ if (dir != IP_CT_DIR_ORIGINAL ||
+ help->expecting[NF_CT_EXPECT_CLASS_DEFAULT])
+ return NF_ACCEPT;
+
+ pr_debug("bcm_nat: packet[%d bytes] ", skb->len);
+ nf_ct_dump_tuple(&ct->tuplehash[dir].tuple);
+ pr_debug("reply: ");
+ nf_ct_dump_tuple(&ct->tuplehash[!dir].tuple);
+
+ /* Create expect */
+ if ((exp = nf_ct_expect_alloc(ct)) == NULL)
+ return NF_ACCEPT;
+
+ nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, AF_INET, NULL,
+ &ct->tuplehash[!dir].tuple.dst.u3, IPPROTO_UDP,
+ NULL, &ct->tuplehash[!dir].tuple.dst.u.udp.port);
+ exp->flags = NF_CT_EXPECT_PERMANENT;
+ exp->saved_addr = ct->tuplehash[dir].tuple.src.u3;
+ exp->saved_proto.udp.port = ct->tuplehash[dir].tuple.src.u.udp.port;
+ exp->dir = !dir;
+ exp->expectfn = bcm_nat_expect;
+
+ /* Setup expect */
+ nf_ct_expect_related(exp, 0);
+ nf_ct_expect_put(exp);
+ pr_debug("bcm_nat: expect setup\n");
+
+ return NF_ACCEPT;
+}
+
+/****************************************************************************/
+static struct nf_conntrack_expect_policy bcm_nat_exp_policy __read_mostly = {
+ .max_expected = 1000,
+ .timeout = 240,
+};
+
+/****************************************************************************/
+static struct nf_conntrack_helper nf_conntrack_helper_bcm_nat __read_mostly = {
+ .name = "BCM-NAT",
+ .me = THIS_MODULE,
+ .tuple.src.l3num = AF_INET,
+ .tuple.dst.protonum = IPPROTO_UDP,
+ .expect_policy = &bcm_nat_exp_policy,
+ .expect_class_max = 1,
+ .help = bcm_nat_help,
+};
+
+/****************************************************************************/
+static inline int find_exp(__be32 ip, __be16 port, struct nf_conn *ct)
+{
+ struct nf_conntrack_tuple tuple;
+ struct nf_conntrack_expect *i = NULL;
+
+
+ memset(&tuple, 0, sizeof(tuple));
+ tuple.src.l3num = AF_INET;
+ tuple.dst.protonum = IPPROTO_UDP;
+ tuple.dst.u3.ip = ip;
+ tuple.dst.u.udp.port = port;
+
+ rcu_read_lock();
+ i = __nf_ct_expect_find(nf_ct_net(ct), nf_ct_zone(ct), &tuple);
+ rcu_read_unlock();
+
+ return i != NULL;
+}
+
+/****************************************************************************/
+static inline struct nf_conntrack_expect *find_fullcone_exp(struct nf_conn *ct)
+{
+ struct nf_conntrack_tuple * tp =
+ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
+ struct nf_conntrack_expect * exp = NULL;
+ struct nf_conntrack_expect * i;
+ unsigned int h;
+
+ rcu_read_lock();
+ for (h = 0; h < nf_ct_expect_hsize; h++) {
+ hlist_for_each_entry_rcu(i, &nf_ct_expect_hash[h], hnode) {
+ if (nf_inet_addr_cmp(&i->saved_addr, &tp->src.u3) &&
+ i->saved_proto.all == tp->src.u.all &&
+ i->tuple.dst.protonum == tp->dst.protonum &&
+ i->tuple.src.u3.ip == 0 &&
+ i->tuple.src.u.udp.port == 0) {
+ exp = i;
+ break;
+ }
+ }
+ }
+ rcu_read_unlock();
+
+ return exp;
+}
+
unsigned int
nf_nat_masquerade_ipv4(struct sk_buff *skb, unsigned int hooknum,
const struct nf_nat_range2 *range,
@@ -60,6 +186,72 @@ nf_nat_masquerade_ipv4(struct sk_buff *s
if (nat)
nat->masq_index = out->ifindex;
+/* RFC 4787 - 4.2.2. Port Parity
+ i.e., an even port will be mapped to an even port, and an odd port will be mapped to an odd port.
+*/
+#define CHECK_PORT_PARITY(a, b) ((a%2)==(b%2))
+ if (range->min_addr.ip != 0 /* nat_mode == full cone */
+ && (nfct_help(ct) == NULL || nfct_help(ct)->helper == NULL)
+ && nf_ct_protonum(ct) == IPPROTO_UDP) {
+ unsigned int ret;
+ u_int16_t minport;
+ u_int16_t maxport;
+ struct nf_conntrack_expect *exp;
+
+ pr_debug("bcm_nat: need full cone NAT\n");
+
+ /* Choose port */
+ spin_lock_bh(&nf_conntrack_expect_lock);
+ /* Look for existing expectation */
+ exp = find_fullcone_exp(ct);
+ if (exp) {
+ minport = maxport = exp->tuple.dst.u.udp.port;
+ pr_debug("bcm_nat: existing mapped port = %hu\n",
+ ntohs(minport));
+ } else { /* no previous expect */
+ u_int16_t newport, tmpport, orgport;
+
+ minport = range->min_proto.all == 0?
+ ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.
+ u.udp.port : range->min_proto.all;
+ maxport = range->max_proto.all == 0?
+ htons(65535) : range->max_proto.all;
+ orgport = ntohs(minport);
+ for (newport = ntohs(minport),tmpport = ntohs(maxport);
+ newport <= tmpport; newport++) {
+ if (CHECK_PORT_PARITY(orgport, newport) && !find_exp(newsrc, htons(newport), ct)) {
+ pr_debug("bcm_nat: new mapped port = "
+ "%hu\n", newport);
+ minport = maxport = htons(newport);
+ break;
+ }
+ }
+ }
+ spin_unlock_bh(&nf_conntrack_expect_lock);
+
+
+ memset(&newrange.min_addr, 0, sizeof(newrange.min_addr));
+ memset(&newrange.max_addr, 0, sizeof(newrange.max_addr));
+
+ newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS |
+ NF_NAT_RANGE_PROTO_SPECIFIED;
+ newrange.max_addr.ip = newrange.min_addr.ip = newsrc;
+ newrange.min_proto.udp.port = newrange.max_proto.udp.port = minport;
+
+ /* Set ct helper */
+ ret = nf_nat_setup_info(ct, &newrange, NF_NAT_MANIP_SRC);
+ if (ret == NF_ACCEPT) {
+ struct nf_conn_help *help = nfct_help(ct);
+ if (help == NULL)
+ help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
+ if (help != NULL) {
+ help->helper = &nf_conntrack_helper_bcm_nat;
+ pr_debug("bcm_nat: helper set\n");
+ }
+ }
+ return ret;
+ }
+
/* Transfer from original range. */
memset(&newrange.min_addr, 0, sizeof(newrange.min_addr));
memset(&newrange.max_addr, 0, sizeof(newrange.max_addr));
@@ -347,6 +539,7 @@ EXPORT_SYMBOL_GPL(nf_nat_masquerade_inet
void nf_nat_masquerade_inet_unregister_notifiers(void)
{
+ nf_conntrack_helper_unregister(&nf_conntrack_helper_bcm_nat);
mutex_lock(&masq_mutex);
/* check if the notifiers still have clients */
if (--masq_refcnt > 0)
--- a/net/netfilter/xt_MASQUERADE.c
+++ b/net/netfilter/xt_MASQUERADE.c
@@ -42,6 +42,9 @@ masquerade_tg(struct sk_buff *skb, const
range.min_proto = mr->range[0].min;
range.max_proto = mr->range[0].max;
+ range.min_addr.ip = mr->range[0].min_ip;
+ range.max_addr.ip = mr->range[0].max_ip;
+
return nf_nat_masquerade_ipv4(skb, xt_hooknum(par), &range,
xt_out(par));
}