IPFW(4): Remove FreeBSD import

This firewall was not ported to RTEMS and is just dead code which may
make trouble during FreeBSD baseline updates.  It also increased the
compile-time of the library for nothing.

Update #3472.
This commit is contained in:
Sebastian Huber
2018-08-21 10:42:25 +02:00
parent f62c62d2fe
commit 63084c1988
39 changed files with 1 additions and 30557 deletions

View File

@@ -41,7 +41,6 @@ dpaa = on
evdev = on evdev = on
fdt = on fdt = on
in_cksum = on in_cksum = on
ipfw = on
mdnsresponder = on mdnsresponder = on
mghttpd = on mghttpd = on
mmc = on mmc = on

View File

@@ -1,154 +0,0 @@
/*-
* Copyright (c) 2015 Yandex LLC
* Copyright (c) 2015 Alexander V. Chernikov <melifaro@FreeBSD.org>
* Copyright (c) 2016 Andrey V. Elsukov <ae@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _NETINET6_IP_FW_NAT64_H_
#define _NETINET6_IP_FW_NAT64_H_
struct ipfw_nat64stl_stats {
uint64_t opcnt64; /* 6to4 of packets translated */
uint64_t opcnt46; /* 4to6 of packets translated */
uint64_t ofrags; /* number of fragments generated */
uint64_t ifrags; /* number of fragments received */
uint64_t oerrors; /* number of output errors */
uint64_t noroute4;
uint64_t noroute6;
uint64_t noproto; /* Protocol not supported */
uint64_t nomem; /* mbuf allocation filed */
uint64_t dropped; /* dropped due to some errors */
};
struct ipfw_nat64lsn_stats {
uint64_t opcnt64; /* 6to4 of packets translated */
uint64_t opcnt46; /* 4to6 of packets translated */
uint64_t ofrags; /* number of fragments generated */
uint64_t ifrags; /* number of fragments received */
uint64_t oerrors; /* number of output errors */
uint64_t noroute4;
uint64_t noroute6;
uint64_t noproto; /* Protocol not supported */
uint64_t nomem; /* mbuf allocation filed */
uint64_t dropped; /* dropped due to some errors */
uint64_t nomatch4; /* No addr/port match */
uint64_t jcalls; /* Number of job handler calls */
uint64_t jrequests; /* Number of job requests */
uint64_t jhostsreq; /* Number of job host requests */
uint64_t jportreq; /* Number of portgroup requests */
uint64_t jhostfails; /* Number of failed host allocs */
uint64_t jportfails; /* Number of failed portgroup allocs */
uint64_t jreinjected; /* Number of packets reinjected to q */
uint64_t jmaxlen; /* Max queue length reached */
uint64_t jnomem; /* No memory to alloc queue item */
uint64_t screated; /* Number of states created */
uint64_t sdeleted; /* Number of states deleted */
uint64_t spgcreated; /* Number of portgroups created */
uint64_t spgdeleted; /* Number of portgroups deleted */
uint64_t hostcount; /* Number of hosts */
uint64_t tcpchunks; /* Number of TCP chunks */
uint64_t udpchunks; /* Number of UDP chunks */
uint64_t icmpchunks; /* Number of ICMP chunks */
uint64_t _reserved[4];
};
#define NAT64_LOG 0x0001 /* Enable logging via BPF */
typedef struct _ipfw_nat64stl_cfg {
char name[64]; /* NAT name */
ipfw_obj_ntlv ntlv6; /* object name tlv */
ipfw_obj_ntlv ntlv4; /* object name tlv */
struct in6_addr prefix6; /* NAT64 prefix */
uint8_t plen6; /* Prefix length */
uint8_t set; /* Named instance set [0..31] */
uint8_t spare[2];
uint32_t flags;
} ipfw_nat64stl_cfg;
/*
* NAT64LSN default configuration values
*/
#define NAT64LSN_MAX_PORTS 2048 /* Max number of ports per host */
#define NAT64LSN_JMAXLEN 2048 /* Max outstanding requests. */
#define NAT64LSN_TCP_SYN_AGE 10 /* State's TTL after SYN received. */
#define NAT64LSN_TCP_EST_AGE (2 * 3600) /* TTL for established connection */
#define NAT64LSN_TCP_FIN_AGE 180 /* State's TTL after FIN/RST received */
#define NAT64LSN_UDP_AGE 120 /* TTL for UDP states */
#define NAT64LSN_ICMP_AGE 60 /* TTL for ICMP states */
#define NAT64LSN_HOST_AGE 3600 /* TTL for stale host entry */
#define NAT64LSN_PG_AGE 900 /* TTL for stale ports groups */
typedef struct _ipfw_nat64lsn_cfg {
char name[64]; /* NAT name */
uint32_t flags;
uint32_t max_ports; /* Max ports per client */
uint32_t agg_prefix_len; /* Prefix length to count */
uint32_t agg_prefix_max; /* Max hosts per agg prefix */
struct in_addr prefix4;
uint16_t plen4; /* Prefix length */
uint16_t plen6; /* Prefix length */
struct in6_addr prefix6; /* NAT64 prefix */
uint32_t jmaxlen; /* Max jobqueue length */
uint16_t min_port; /* Min port group # to use */
uint16_t max_port; /* Max port group # to use */
uint16_t nh_delete_delay;/* Stale host delete delay */
uint16_t pg_delete_delay;/* Stale portgroup delete delay */
uint16_t st_syn_ttl; /* TCP syn expire */
uint16_t st_close_ttl; /* TCP fin expire */
uint16_t st_estab_ttl; /* TCP established expire */
uint16_t st_udp_ttl; /* UDP expire */
uint16_t st_icmp_ttl; /* ICMP expire */
uint8_t set; /* Named instance set [0..31] */
uint8_t spare;
} ipfw_nat64lsn_cfg;
typedef struct _ipfw_nat64lsn_state {
struct in_addr daddr; /* Remote IPv4 address */
uint16_t dport; /* Remote destination port */
uint16_t aport; /* Local alias port */
uint16_t sport; /* Source port */
uint8_t flags; /* State flags */
uint8_t spare[3];
uint16_t idle; /* Last used time */
} ipfw_nat64lsn_state;
typedef struct _ipfw_nat64lsn_stg {
uint64_t next_idx; /* next state index */
struct in_addr alias4; /* IPv4 alias address */
uint8_t proto; /* protocol */
uint8_t flags;
uint16_t spare;
struct in6_addr host6; /* Bound IPv6 host */
uint32_t count; /* Number of states */
uint32_t spare2;
} ipfw_nat64lsn_stg;
#endif /* _NETINET6_IP_FW_NAT64_H_ */

View File

@@ -1,51 +0,0 @@
/*-
* Copyright (c) 2016 Yandex LLC
* Copyright (c) 2016 Andrey V. Elsukov <ae@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _NETINET6_IP_FW_NPTV6_H_
#define _NETINET6_IP_FW_NPTV6_H_
struct ipfw_nptv6_stats {
uint64_t in2ex; /* Int->Ext packets translated */
uint64_t ex2in; /* Ext->Int packets translated */
uint64_t dropped; /* dropped due to some errors */
uint64_t reserved[5];
};
typedef struct _ipfw_nptv6_cfg {
char name[64]; /* NPTv6 instance name */
struct in6_addr internal; /* NPTv6 internal prefix */
struct in6_addr external; /* NPTv6 external prefix */
uint8_t plen; /* Prefix length */
uint8_t set; /* Named instance set [0..31] */
uint8_t spare[2];
uint32_t flags;
} ipfw_nptv6_cfg;
#endif /* _NETINET6_IP_FW_NPTV6_H_ */

View File

@@ -1,167 +0,0 @@
/*-
* Copyright (C) 2016 Centre for Advanced Internet Architectures,
* Swinburne University of Technology, Melbourne, Australia.
* Portions of this code were made possible in part by a gift from
* The Comcast Innovation Fund.
* Implemented by Rasool Al-Saadi <ralsaadi@swin.edu.au>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* API for writing an Active Queue Management algorithm for Dummynet
*
* $FreeBSD$
*/
#ifndef _IP_DN_AQM_H
#define _IP_DN_AQM_H
/* NOW is the current time in millisecond*/
#define NOW ((dn_cfg.curr_time * tick) / 1000)
#define AQM_UNOW (dn_cfg.curr_time * tick)
#define AQM_TIME_1US ((aqm_time_t)(1))
#define AQM_TIME_1MS ((aqm_time_t)(1000))
#define AQM_TIME_1S ((aqm_time_t)(AQM_TIME_1MS * 1000))
/* aqm time allows to store up to 4294 seconds */
typedef uint32_t aqm_time_t;
typedef int32_t aqm_stime_t;
#define DN_AQM_MTAG_TS 55345
/* Macro for variable bounding */
#define BOUND_VAR(x,l,h) ((x) > (h)? (h) : ((x) > (l)? (x) : (l)))
/* sysctl variable to count number of dropped packets */
extern unsigned long io_pkt_drop;
/*
* Structure for holding data and function pointers that together represent a
* AQM algorithm.
*/
struct dn_aqm {
#define DN_AQM_NAME_MAX 50
char name[DN_AQM_NAME_MAX]; /* name of AQM algorithm */
uint32_t type; /* AQM type number */
/* Methods implemented by AQM algorithm:
*
* enqueue enqueue packet 'm' on queue 'q'.
* Return 0 on success, 1 on drop.
*
* dequeue dequeue a packet from queue 'q'.
* Return a packet, NULL if no packet available.
*
* config configure AQM algorithm
* If required, this function should allocate space to store
* the configurations and set 'fs->aqmcfg' to point to this space.
* 'dn_extra_parms' includes array of parameters send
* from ipfw userland command.
* Return 0 on success, non-zero otherwise.
*
* deconfig deconfigure AQM algorithm.
* The allocated configuration memory space should be freed here.
* Return 0 on success, non-zero otherwise.
*
* init initialise AQM status variables of queue 'q'
* This function is used to allocate space and init AQM status for a
* queue and q->aqm_status to point to this space.
* Return 0 on success, non-zero otherwise.
*
* cleanup cleanup AQM status variables of queue 'q'
* The allocated memory space for AQM status should be freed here.
* Return 0 on success, non-zero otherwise.
*
* getconfig retrieve AQM configurations
* This function is used to return AQM parameters to userland
* command. The function should fill 'dn_extra_parms' struct with
* the AQM configurations using 'par' array.
*
*/
int (*enqueue)(struct dn_queue *, struct mbuf *);
struct mbuf * (*dequeue)(struct dn_queue *);
int (*config)(struct dn_fsk *, struct dn_extra_parms *ep, int);
int (*deconfig)(struct dn_fsk *);
int (*init)(struct dn_queue *);
int (*cleanup)(struct dn_queue *);
int (*getconfig)(struct dn_fsk *, struct dn_extra_parms *);
int ref_count; /*Number of queues instances in the system */
int cfg_ref_count; /*Number of AQM instances in the system */
SLIST_ENTRY (dn_aqm) next; /* Next AQM in the list */
};
/* Helper function to update queue and scheduler statistics.
* negative len + drop -> drop
* negative len -> dequeue
* positive len -> enqueue
* positive len + drop -> drop during enqueue
*/
__inline static void
update_stats(struct dn_queue *q, int len, int drop)
{
int inc = 0;
struct dn_flow *sni;
struct dn_flow *qni;
sni = &q->_si->ni;
qni = &q->ni;
if (len < 0)
inc = -1;
else if(len > 0)
inc = 1;
if (drop) {
qni->drops++;
sni->drops++;
io_pkt_drop++;
} else {
/*update queue stats */
qni->length += inc;
qni->len_bytes += len;
/*update scheduler instance stats */
sni->length += inc;
sni->len_bytes += len;
}
/* tot_pkts is updated in dn_enqueue function */
}
/* kernel module related function */
int
dn_aqm_modevent(module_t mod, int cmd, void *arg);
#define DECLARE_DNAQM_MODULE(name, dnaqm) \
static moduledata_t name##_mod = { \
#name, dn_aqm_modevent, dnaqm \
}; \
DECLARE_MODULE(name, name##_mod, \
SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY); \
MODULE_DEPEND(name, dummynet, 3, 3, 3)
#endif

View File

@@ -1,222 +0,0 @@
/*
* Codel - The Controlled-Delay Active Queue Management algorithm.
*
* $FreeBSD$
*
* Copyright (C) 2016 Centre for Advanced Internet Architectures,
* Swinburne University of Technology, Melbourne, Australia.
* Portions of this code were made possible in part by a gift from
* The Comcast Innovation Fund.
* Implemented by Rasool Al-Saadi <ralsaadi@swin.edu.au>
*
* Copyright (C) 2011-2014 Kathleen Nichols <nichols@pollere.com>.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* o Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
*
* o Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* o The names of the authors may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* Alternatively, provided that this notice is retained in full, this
* software may be distributed under the terms of the GNU General Public
* License ("GPL") version 2, in which case the provisions of the GPL
* apply INSTEAD OF those given above.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _IP_DN_AQM_CODEL_H
#define _IP_DN_AQM_CODEL_H
// XXX How to choose MTAG?
#define FIX_POINT_BITS 16
enum {
CODEL_ECN_ENABLED = 1
};
/* Codel parameters */
struct dn_aqm_codel_parms {
aqm_time_t target;
aqm_time_t interval;
uint32_t flags;
};
/* codel status variables */
struct codel_status {
uint32_t count; /* number of dropped pkts since entering drop state */
uint16_t dropping; /* dropping state */
aqm_time_t drop_next_time; /* time for next drop */
aqm_time_t first_above_time; /* time for first ts over target we observed */
uint16_t isqrt; /* last isqrt for control low */
uint16_t maxpkt_size; /* max packet size seen so far */
};
struct mbuf *codel_extract_head(struct dn_queue *, aqm_time_t *);
aqm_time_t control_law(struct codel_status *,
struct dn_aqm_codel_parms *, aqm_time_t );
__inline static struct mbuf *
codel_dodequeue(struct dn_queue *q, aqm_time_t now, uint16_t *ok_to_drop)
{
struct mbuf * m;
struct dn_aqm_codel_parms *cprms;
struct codel_status *cst;
aqm_time_t pkt_ts, sojourn_time;
*ok_to_drop = 0;
m = codel_extract_head(q, &pkt_ts);
cst = q->aqm_status;
if (m == NULL) {
/* queue is empty - we can't be above target */
cst->first_above_time= 0;
return m;
}
cprms = q->fs->aqmcfg;
/* To span a large range of bandwidths, CoDel runs two
* different AQMs in parallel. One is sojourn-time-based
* and takes effect when the time to send an MTU-sized
* packet is less than target. The 1st term of the "if"
* below does this. The other is backlog-based and takes
* effect when the time to send an MTU-sized packet is >=
* target. The goal here is to keep the output link
* utilization high by never allowing the queue to get
* smaller than the amount that arrives in a typical
* interarrival time (MTU-sized packets arriving spaced
* by the amount of time it takes to send such a packet on
* the bottleneck). The 2nd term of the "if" does this.
*/
sojourn_time = now - pkt_ts;
if (sojourn_time < cprms->target || q->ni.len_bytes <= cst->maxpkt_size) {
/* went below - stay below for at least interval */
cst->first_above_time = 0;
} else {
if (cst->first_above_time == 0) {
/* just went above from below. if still above at
* first_above_time, will say it's ok to drop. */
cst->first_above_time = now + cprms->interval;
} else if (now >= cst->first_above_time) {
*ok_to_drop = 1;
}
}
return m;
}
/*
* Dequeue a packet from queue 'q'
*/
__inline static struct mbuf *
codel_dequeue(struct dn_queue *q)
{
struct mbuf *m;
struct dn_aqm_codel_parms *cprms;
struct codel_status *cst;
aqm_time_t now;
uint16_t ok_to_drop;
cst = q->aqm_status;;
cprms = q->fs->aqmcfg;
now = AQM_UNOW;
m = codel_dodequeue(q, now, &ok_to_drop);
if (cst->dropping) {
if (!ok_to_drop) {
/* sojourn time below target - leave dropping state */
cst->dropping = false;
}
/*
* Time for the next drop. Drop current packet and dequeue
* next. If the dequeue doesn't take us out of dropping
* state, schedule the next drop. A large backlog might
* result in drop rates so high that the next drop should
* happen now, hence the 'while' loop.
*/
while (now >= cst->drop_next_time && cst->dropping) {
/* mark the packet */
if (cprms->flags & CODEL_ECN_ENABLED && ecn_mark(m)) {
cst->count++;
/* schedule the next mark. */
cst->drop_next_time = control_law(cst, cprms,
cst->drop_next_time);
return m;
}
/* drop the packet */
update_stats(q, 0, 1);
FREE_PKT(m);
m = codel_dodequeue(q, now, &ok_to_drop);
if (!ok_to_drop) {
/* leave dropping state */
cst->dropping = false;
} else {
cst->count++;
/* schedule the next drop. */
cst->drop_next_time = control_law(cst, cprms,
cst->drop_next_time);
}
}
/* If we get here we're not in dropping state. The 'ok_to_drop'
* return from dodequeue means that the sojourn time has been
* above 'target' for 'interval' so enter dropping state.
*/
} else if (ok_to_drop) {
/* if ECN option is disabled or the packet cannot be marked,
* drop the packet and extract another.
*/
if (!(cprms->flags & CODEL_ECN_ENABLED) || !ecn_mark(m)) {
update_stats(q, 0, 1);
FREE_PKT(m);
m = codel_dodequeue(q, now, &ok_to_drop);
}
cst->dropping = true;
/* If min went above target close to when it last went
* below, assume that the drop rate that controlled the
* queue on the last cycle is a good starting point to
* control it now. ('drop_next' will be at most 'interval'
* later than the time of the last drop so 'now - drop_next'
* is a good approximation of the time from the last drop
* until now.)
*/
cst->count = (cst->count > 2 && ((aqm_stime_t)now -
(aqm_stime_t)cst->drop_next_time) < 8* cprms->interval)?
cst->count - 2 : 1;
/* we don't have to set initial guess for Newton's method isqrt as
* we initilaize isqrt in control_law function when count == 1 */
cst->drop_next_time = control_law(cst, cprms, now);
}
return m;
}
#endif

View File

@@ -1,153 +0,0 @@
/*
* PIE - Proportional Integral controller Enhanced AQM algorithm.
*
* $FreeBSD$
*
* Copyright (C) 2016 Centre for Advanced Internet Architectures,
* Swinburne University of Technology, Melbourne, Australia.
* Portions of this code were made possible in part by a gift from
* The Comcast Innovation Fund.
* Implemented by Rasool Al-Saadi <ralsaadi@swin.edu.au>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _IP_DN_AQM_PIE_H
#define _IP_DN_AQM_PIE_H
#define DN_AQM_PIE 2
#define PIE_DQ_THRESHOLD_BITS 14
/* 2^14 =16KB */
#define PIE_DQ_THRESHOLD (1L << PIE_DQ_THRESHOLD_BITS)
#define MEAN_PKTSIZE 800
/* 31-bits because random() generates range from 0->(2**31)-1 */
#define PIE_PROB_BITS 31
#define PIE_MAX_PROB ((1LL<<PIE_PROB_BITS) -1)
/* for 16-bits, we have 3-bits for integer part and 13-bits for fraction */
#define PIE_FIX_POINT_BITS 13
#define PIE_SCALE (1L<<PIE_FIX_POINT_BITS)
/* PIE options */
enum {
PIE_ECN_ENABLED =1,
PIE_CAPDROP_ENABLED = 2,
PIE_ON_OFF_MODE_ENABLED = 4,
PIE_DEPRATEEST_ENABLED = 8,
PIE_DERAND_ENABLED = 16
};
/* PIE parameters */
struct dn_aqm_pie_parms {
aqm_time_t qdelay_ref; /* AQM Latency Target (default: 15ms) */
aqm_time_t tupdate; /* a period to calculate drop probability (default:15ms) */
aqm_time_t max_burst; /* AQM Max Burst Allowance (default: 150ms) */
uint16_t max_ecnth; /*AQM Max ECN Marking Threshold (default: 10%) */
uint16_t alpha; /* (default: 1/8) */
uint16_t beta; /* (default: 1+1/4) */
uint32_t flags; /* PIE options */
};
/* PIE status variables */
struct pie_status{
struct callout aqm_pie_callout;
aqm_time_t burst_allowance;
uint32_t drop_prob;
aqm_time_t current_qdelay;
aqm_time_t qdelay_old;
uint64_t accu_prob;
aqm_time_t measurement_start;
aqm_time_t avg_dq_time;
uint32_t dq_count;
uint32_t sflags;
struct dn_aqm_pie_parms *parms; /* pointer to PIE configurations */
/* pointer to parent queue of FQ-PIE sub-queues, or queue of owner fs. */
struct dn_queue *pq;
struct mtx lock_mtx;
uint32_t one_third_q_size; /* 1/3 of queue size, for speed optization */
};
enum {
ENQUE = 1,
DROP,
MARKECN
};
/* PIE current state */
enum {
PIE_ACTIVE = 1,
PIE_INMEASUREMENT = 2
};
/*
* Check if eneque should drop packet to control delay or not based on
* PIe algorithm.
* return DROP if it is time to drop or ENQUE otherwise.
* This function is used by PIE and FQ-PIE.
*/
__inline static int
drop_early(struct pie_status *pst, uint32_t qlen)
{
struct dn_aqm_pie_parms *pprms;
pprms = pst->parms;
/* queue is not congested */
if ((pst->qdelay_old < (pprms->qdelay_ref >> 1)
&& pst->drop_prob < PIE_MAX_PROB / 5 )
|| qlen <= 2 * MEAN_PKTSIZE)
return ENQUE;
if (pst->drop_prob == 0)
pst->accu_prob = 0;
/* increment accu_prob */
if (pprms->flags & PIE_DERAND_ENABLED)
pst->accu_prob += pst->drop_prob;
/* De-randomize option
* if accu_prob < 0.85 -> enqueue
* if accu_prob>8.5 ->drop
* between 0.85 and 8.5 || !De-randomize --> drop on prob
*
* (0.85 = 17/20 ,8.5 = 17/2)
*/
if (pprms->flags & PIE_DERAND_ENABLED) {
if(pst->accu_prob < (uint64_t) (PIE_MAX_PROB * 17 / 20))
return ENQUE;
if( pst->accu_prob >= (uint64_t) (PIE_MAX_PROB * 17 / 2))
return DROP;
}
if (random() < pst->drop_prob) {
pst->accu_prob = 0;
return DROP;
}
return ENQUE;
}
#endif

View File

@@ -1,191 +0,0 @@
/*-
* Copyright (c) 1998-2010 Luigi Rizzo, Universita` di Pisa
* All rights reserved
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Binary heap and hash tables, header file
*
* $FreeBSD$
*/
#ifndef _IP_DN_HEAP_H
#define _IP_DN_HEAP_H
#define DN_KEY_LT(a,b) ((int64_t)((a)-(b)) < 0)
#define DN_KEY_LEQ(a,b) ((int64_t)((a)-(b)) <= 0)
/*
* This module implements a binary heap supporting random extraction.
*
* A heap entry contains an uint64_t key and a pointer to object.
* DN_KEY_LT(a,b) returns true if key 'a' is smaller than 'b'
*
* The heap is a struct dn_heap plus a dynamically allocated
* array of dn_heap_entry entries. 'size' represents the size of
* the array, 'elements' count entries in use. The topmost
* element has the smallest key.
* The heap supports ordered insert, and extract from the top.
* To extract an object from the middle of the heap, we the object
* must reserve an 'int32_t' to store the position of the object
* in the heap itself, and the location of this field must be
* passed as an argument to heap_init() -- use -1 if the feature
* is not used.
*/
struct dn_heap_entry {
uint64_t key; /* sorting key, smallest comes first */
void *object; /* object pointer */
};
struct dn_heap {
int size; /* the size of the array */
int elements; /* elements in use */
int ofs; /* offset in the object of heap index */
struct dn_heap_entry *p; /* array of "size" entries */
};
enum {
HEAP_SCAN_DEL = 1,
HEAP_SCAN_END = 2,
};
/*
* heap_init() reinitializes the heap setting the size and the offset
* of the index for random extraction (use -1 if not used).
* The 'elements' counter is set to 0.
*
* SET_HEAP_OFS() indicates where, in the object, is stored the index
* for random extractions from the heap.
*
* heap_free() frees the memory associated to a heap.
*
* heap_insert() adds a key-pointer pair to the heap
*
* HEAP_TOP() returns a pointer to the top element of the heap,
* but makes no checks on its existence (XXX should we change ?)
*
* heap_extract() removes the entry at the top, returning the pointer.
* (the key should have been read before).
*
* heap_scan() invokes a callback on each entry of the heap.
* The callback can return a combination of HEAP_SCAN_DEL and
* HEAP_SCAN_END. HEAP_SCAN_DEL means the current element must
* be removed, and HEAP_SCAN_END means to terminate the scan.
* heap_scan() returns the number of elements removed.
* Because the order is not guaranteed, we should use heap_scan()
* only as a last resort mechanism.
*/
#define HEAP_TOP(h) ((h)->p)
#define SET_HEAP_OFS(h, n) do { (h)->ofs = n; } while (0)
int heap_init(struct dn_heap *h, int size, int ofs);
int heap_insert(struct dn_heap *h, uint64_t key1, void *p);
void heap_extract(struct dn_heap *h, void *obj);
void heap_free(struct dn_heap *h);
int heap_scan(struct dn_heap *, int (*)(void *, uintptr_t), uintptr_t);
/*------------------------------------------------------
* This module implements a generic hash table with support for
* running callbacks on the entire table. To avoid allocating
* memory during hash table operations, objects must reserve
* space for a link field. XXX if the heap is moderately full,
* an SLIST suffices, and we can tolerate the cost of a hash
* computation on each removal.
*
* dn_ht_init() initializes the table, setting the number of
* buckets, the offset of the link field, the main callbacks.
* Callbacks are:
*
* hash(key, flags, arg) called to return a bucket index.
* match(obj, key, flags, arg) called to determine if key
* matches the current 'obj' in the heap
* newh(key, flags, arg) optional, used to allocate a new
* object during insertions.
*
* dn_ht_free() frees the heap or unlink elements.
* DNHT_REMOVE unlink elements, 0 frees the heap.
* You need two calls to do both.
*
* dn_ht_find() is the main lookup function, which can also be
* used to insert or delete elements in the hash table.
* The final 'arg' is passed to all callbacks.
*
* dn_ht_scan() is used to invoke a callback on all entries of
* the heap, or possibly on just one bucket. The callback
* is invoked with a pointer to the object, and must return
* one of DNHT_SCAN_DEL or DNHT_SCAN_END to request the
* removal of the object from the heap and the end of the
* scan, respectively.
*
* dn_ht_scan_bucket() is similar to dn_ht_scan(), except that it scans
* only the specific bucket of the table. The bucket is a in-out
* parameter and return a valid bucket number if the original
* is invalid.
*
* A combination of flags can be used to modify the operation
* of the dn_ht_find(), and of the callbacks:
*
* DNHT_KEY_IS_OBJ means the key is the object pointer.
* It is usually of interest for the hash and match functions.
*
* DNHT_MATCH_PTR during a lookup, match pointers instead
* of calling match(). Normally used when removing specific
* entries. Does not imply KEY_IS_OBJ as the latter _is_ used
* by the match function.
*
* DNHT_INSERT insert the element if not found.
* Calls new() to allocates a new object unless
* DNHT_KEY_IS_OBJ is set.
*
* DNHT_UNIQUE only insert if object not found.
* XXX should it imply DNHT_INSERT ?
*
* DNHT_REMOVE remove objects if we find them.
*/
struct dn_ht; /* should be opaque */
struct dn_ht *dn_ht_init(struct dn_ht *, int buckets, int ofs,
uint32_t (*hash)(uintptr_t, int, void *),
int (*match)(void *, uintptr_t, int, void *),
void *(*newh)(uintptr_t, int, void *));
void dn_ht_free(struct dn_ht *, int flags);
void *dn_ht_find(struct dn_ht *, uintptr_t, int, void *);
int dn_ht_scan(struct dn_ht *, int (*)(void *, void *), void *);
int dn_ht_scan_bucket(struct dn_ht *, int * , int (*)(void *, void *), void *);
int dn_ht_entries(struct dn_ht *);
enum { /* flags values.
* first two are returned by the scan callback to indicate
* to delete the matching element or to end the scan
*/
DNHT_SCAN_DEL = 0x0001,
DNHT_SCAN_END = 0x0002,
DNHT_KEY_IS_OBJ = 0x0004, /* key is the obj pointer */
DNHT_MATCH_PTR = 0x0008, /* match by pointer, not match() */
DNHT_INSERT = 0x0010, /* insert if not found */
DNHT_UNIQUE = 0x0020, /* report error if already there */
DNHT_REMOVE = 0x0040, /* remove on find or dn_ht_free */
};
#endif /* _IP_DN_HEAP_H */

View File

@@ -1,201 +0,0 @@
/*
* Copyright (c) 2010 Riccardo Panicucci, Luigi Rizzo, Universita` di Pisa
* All rights reserved
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* The API to write a packet scheduling algorithm for dummynet.
*
* $FreeBSD$
*/
#ifndef _DN_SCHED_H
#define _DN_SCHED_H
#define DN_MULTIQUEUE 0x01
/*
* Descriptor for a scheduling algorithm.
* Contains all function pointers for a given scheduler
* This is typically created when a module is loaded, and stored
* in a global list of schedulers.
*/
struct dn_alg {
uint32_t type; /* the scheduler type */
const char *name; /* scheduler name */
uint32_t flags; /* DN_MULTIQUEUE if supports multiple queues */
/*
* The following define the size of 3 optional data structures
* that may need to be allocated at runtime, and are appended
* to each of the base data structures: scheduler, sched.inst,
* and queue. We don't have a per-flowset structure.
*/
/* + parameters attached to the template, e.g.
* default queue sizes, weights, quantum size, and so on;
*/
size_t schk_datalen;
/* + per-instance parameters, such as timestamps,
* containers for queues, etc;
*/
size_t si_datalen;
size_t q_datalen; /* per-queue parameters (e.g. S,F) */
/*
* Methods implemented by the scheduler:
* enqueue enqueue packet 'm' on scheduler 's', queue 'q'.
* q is NULL for !MULTIQUEUE.
* Return 0 on success, 1 on drop (packet consumed anyways).
* Note that q should be interpreted only as a hint
* on the flow that the mbuf belongs to: while a
* scheduler will normally enqueue m into q, it is ok
* to leave q alone and put the mbuf elsewhere.
* This function is called in two cases:
* - when a new packet arrives to the scheduler;
* - when a scheduler is reconfigured. In this case the
* call is issued by the new_queue callback, with a
* non empty queue (q) and m pointing to the first
* mbuf in the queue. For this reason, the function
* should internally check for (m != q->mq.head)
* before calling dn_enqueue().
*
* dequeue Called when scheduler instance 's' can
* dequeue a packet. Return NULL if none are available.
* XXX what about non work-conserving ?
*
* config called on 'sched X config ...', normally writes
* in the area of size sch_arg
*
* destroy called on 'sched delete', frees everything
* in sch_arg (other parts are handled by more specific
* functions)
*
* new_sched called when a new instance is created, e.g.
* to create the local queue for !MULTIQUEUE, set V or
* copy parameters for WFQ, and so on.
*
* free_sched called when deleting an instance, cleans
* extra data in the per-instance area.
*
* new_fsk called when a flowset is linked to a scheduler,
* e.g. to validate parameters such as weights etc.
* free_fsk when a flowset is unlinked from a scheduler.
* (probably unnecessary)
*
* new_queue called to set the per-queue parameters,
* e.g. S and F, adjust sum of weights in the parent, etc.
*
* The new_queue callback is normally called from when
* creating a new queue. In some cases (such as a
* scheduler change or reconfiguration) it can be called
* with a non empty queue. In this case, the queue
* In case of non empty queue, the new_queue callback could
* need to call the enqueue function. In this case,
* the callback should eventually call enqueue() passing
* as m the first element in the queue.
*
* free_queue actions related to a queue removal, e.g. undo
* all the above. If the queue has data in it, also remove
* from the scheduler. This can e.g. happen during a reconfigure.
*/
int (*enqueue)(struct dn_sch_inst *, struct dn_queue *,
struct mbuf *);
struct mbuf * (*dequeue)(struct dn_sch_inst *);
int (*config)(struct dn_schk *);
int (*destroy)(struct dn_schk*);
int (*new_sched)(struct dn_sch_inst *);
int (*free_sched)(struct dn_sch_inst *);
int (*new_fsk)(struct dn_fsk *f);
int (*free_fsk)(struct dn_fsk *f);
int (*new_queue)(struct dn_queue *q);
int (*free_queue)(struct dn_queue *q);
#ifdef NEW_AQM
/* Getting scheduler extra parameters */
int (*getconfig)(struct dn_schk *, struct dn_extra_parms *);
#endif
/* run-time fields */
int ref_count; /* XXX number of instances in the system */
SLIST_ENTRY(dn_alg) next; /* Next scheduler in the list */
};
/* MSVC does not support initializers so we need this ugly macro */
#ifdef _WIN32
#define _SI(fld)
#else
#define _SI(fld) fld
#endif
/*
* Additionally, dummynet exports some functions and macros
* to be used by schedulers:
*/
void dn_free_pkts(struct mbuf *mnext);
int dn_enqueue(struct dn_queue *q, struct mbuf* m, int drop);
/* bound a variable between min and max */
int ipdn_bound_var(int *v, int dflt, int lo, int hi, const char *msg);
/*
* Extract the head of a queue, update stats. Must be the very last
* thing done on a dequeue as the queue itself may go away.
*/
static __inline struct mbuf*
dn_dequeue(struct dn_queue *q)
{
struct mbuf *m = q->mq.head;
if (m == NULL)
return NULL;
#ifdef NEW_AQM
/* Call AQM dequeue function */
if (q->fs->aqmfp && q->fs->aqmfp->dequeue )
return q->fs->aqmfp->dequeue(q);
#endif
q->mq.head = m->m_nextpkt;
q->mq.count--;
/* Update stats for the queue */
q->ni.length--;
q->ni.len_bytes -= m->m_pkthdr.len;
if (q->_si) {
q->_si->ni.length--;
q->_si->ni.len_bytes -= m->m_pkthdr.len;
}
if (q->ni.length == 0) /* queue is now idle */
q->q_time = dn_cfg.curr_time;
return m;
}
int dn_sched_modevent(module_t mod, int cmd, void *arg);
#define DECLARE_DNSCHED_MODULE(name, dnsched) \
static moduledata_t name##_mod = { \
#name, dn_sched_modevent, dnsched \
}; \
DECLARE_MODULE(name, name##_mod, \
SI_SUB_PROTO_FIREWALL, SI_ORDER_ANY); \
MODULE_DEPEND(name, dummynet, 3, 3, 3)
#endif /* _DN_SCHED_H */

View File

@@ -1,167 +0,0 @@
/*-
* Copyright (C) 2016 Centre for Advanced Internet Architectures,
* Swinburne University of Technology, Melbourne, Australia.
* Portions of this code were made possible in part by a gift from
* The Comcast Innovation Fund.
* Implemented by Rasool Al-Saadi <ralsaadi@swin.edu.au>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* FQ_Codel Structures and helper functions
*
* $FreeBSD$
*/
#ifndef _IP_DN_SCHED_FQ_CODEL_H
#define _IP_DN_SCHED_FQ_CODEL_H
/* list of queues */
STAILQ_HEAD(fq_codel_list, fq_codel_flow) ;
/* fq_codel parameters including codel */
struct dn_sch_fq_codel_parms {
struct dn_aqm_codel_parms ccfg; /* CoDel Parameters */
/* FQ_CODEL Parameters */
uint32_t flows_cnt; /* number of flows */
uint32_t limit; /* hard limit of fq_codel queue size*/
uint32_t quantum;
}; /* defaults */
/* flow (sub-queue) stats */
struct flow_stats {
uint64_t tot_pkts; /* statistics counters */
uint64_t tot_bytes;
uint32_t length; /* Queue length, in packets */
uint32_t len_bytes; /* Queue length, in bytes */
uint32_t drops;
};
/* A flow of packets (sub-queue).*/
struct fq_codel_flow {
struct mq mq; /* list of packets */
struct flow_stats stats; /* statistics */
int deficit;
int active; /* 1: flow is active (in a list) */
struct codel_status cst;
STAILQ_ENTRY(fq_codel_flow) flowchain;
};
/* extra fq_codel scheduler configurations */
struct fq_codel_schk {
struct dn_sch_fq_codel_parms cfg;
};
/* fq_codel scheduler instance */
struct fq_codel_si {
struct dn_sch_inst _si; /* standard scheduler instance */
struct dn_queue main_q; /* main queue is after si directly */
struct fq_codel_flow *flows; /* array of flows (queues) */
uint32_t perturbation; /* random value */
struct fq_codel_list newflows; /* list of new queues */
struct fq_codel_list oldflows; /* list of old queues */
};
/* Helper function to update queue&main-queue and scheduler statistics.
* negative len + drop -> drop
* negative len -> dequeue
* positive len -> enqueue
* positive len + drop -> drop during enqueue
*/
__inline static void
fq_update_stats(struct fq_codel_flow *q, struct fq_codel_si *si, int len,
int drop)
{
int inc = 0;
if (len < 0)
inc = -1;
else if (len > 0)
inc = 1;
if (drop) {
si->main_q.ni.drops ++;
q->stats.drops ++;
si->_si.ni.drops ++;
io_pkt_drop ++;
}
if (!drop || (drop && len < 0)) {
/* Update stats for the main queue */
si->main_q.ni.length += inc;
si->main_q.ni.len_bytes += len;
/*update sub-queue stats */
q->stats.length += inc;
q->stats.len_bytes += len;
/*update scheduler instance stats */
si->_si.ni.length += inc;
si->_si.ni.len_bytes += len;
}
if (inc > 0) {
si->main_q.ni.tot_bytes += len;
si->main_q.ni.tot_pkts ++;
q->stats.tot_bytes +=len;
q->stats.tot_pkts++;
si->_si.ni.tot_bytes +=len;
si->_si.ni.tot_pkts ++;
}
}
/* extract the head of fq_codel sub-queue */
__inline static struct mbuf *
fq_codel_extract_head(struct fq_codel_flow *q, aqm_time_t *pkt_ts, struct fq_codel_si *si)
{
struct mbuf *m = q->mq.head;
if (m == NULL)
return m;
q->mq.head = m->m_nextpkt;
fq_update_stats(q, si, -m->m_pkthdr.len, 0);
if (si->main_q.ni.length == 0) /* queue is now idle */
si->main_q.q_time = dn_cfg.curr_time;
/* extract packet timestamp*/
struct m_tag *mtag;
mtag = m_tag_locate(m, MTAG_ABI_COMPAT, DN_AQM_MTAG_TS, NULL);
if (mtag == NULL){
D("timestamp tag is not found!");
*pkt_ts = 0;
} else {
*pkt_ts = *(aqm_time_t *)(mtag + 1);
m_tag_delete(m,mtag);
}
return m;
}
#endif

View File

@@ -1,187 +0,0 @@
/*
* Codel - The Controlled-Delay Active Queue Management algorithm.
*
* $FreeBSD$
*
* Copyright (C) 2016 Centre for Advanced Internet Architectures,
* Swinburne University of Technology, Melbourne, Australia.
* Portions of this code were made possible in part by a gift from
* The Comcast Innovation Fund.
* Implemented by Rasool Al-Saadi <ralsaadi@swin.edu.au>
*
* Copyright (C) 2011-2014 Kathleen Nichols <nichols@pollere.com>.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* o Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
*
* o Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* o The names of the authors may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* Alternatively, provided that this notice is retained in full, this
* software may be distributed under the terms of the GNU General Public
* License ("GPL") version 2, in which case the provisions of the GPL
* apply INSTEAD OF those given above.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _IP_DN_SCHED_FQ_CODEL_HELPER_H
#define _IP_DN_SCHED_FQ_CODEL_HELPER_H
__inline static struct mbuf *
fqc_dodequeue(struct fq_codel_flow *q, aqm_time_t now, uint16_t *ok_to_drop,
struct fq_codel_si *si)
{
struct mbuf * m;
struct fq_codel_schk *schk = (struct fq_codel_schk *)(si->_si.sched+1);
aqm_time_t pkt_ts, sojourn_time;
*ok_to_drop = 0;
m = fq_codel_extract_head(q, &pkt_ts, si);
if (m == NULL) {
/*queue is empty - we can't be above target*/
q->cst.first_above_time= 0;
return m;
}
/* To span a large range of bandwidths, CoDel runs two
* different AQMs in parallel. One is sojourn-time-based
* and takes effect when the time to send an MTU-sized
* packet is less than target. The 1st term of the "if"
* below does this. The other is backlog-based and takes
* effect when the time to send an MTU-sized packet is >=
* target. The goal here is to keep the output link
* utilization high by never allowing the queue to get
* smaller than the amount that arrives in a typical
* interarrival time (MTU-sized packets arriving spaced
* by the amount of time it takes to send such a packet on
* the bottleneck). The 2nd term of the "if" does this.
*/
sojourn_time = now - pkt_ts;
if (sojourn_time < schk->cfg.ccfg.target || q->stats.len_bytes <= q->cst.maxpkt_size) {
/* went below - stay below for at least interval */
q->cst.first_above_time = 0;
} else {
if (q->cst.first_above_time == 0) {
/* just went above from below. if still above at
* first_above_time, will say it's ok to drop. */
q->cst.first_above_time = now + schk->cfg.ccfg.interval;
} else if (now >= q->cst.first_above_time) {
*ok_to_drop = 1;
}
}
return m;
}
/* Codel dequeue function */
__inline static struct mbuf *
fqc_codel_dequeue(struct fq_codel_flow *q, struct fq_codel_si *si)
{
struct mbuf *m;
struct dn_aqm_codel_parms *cprms;
struct codel_status *cst;
aqm_time_t now;
uint16_t ok_to_drop;
struct fq_codel_schk *schk = (struct fq_codel_schk *)(si->_si.sched+1);
cst = &q->cst;
cprms = &schk->cfg.ccfg;
now = AQM_UNOW;
m = fqc_dodequeue(q, now, &ok_to_drop, si);
if (cst->dropping) {
if (!ok_to_drop) {
/* sojourn time below target - leave dropping state */
cst->dropping = false;
}
/* Time for the next drop. Drop current packet and dequeue
* next. If the dequeue doesn't take us out of dropping
* state, schedule the next drop. A large backlog might
* result in drop rates so high that the next drop should
* happen now, hence the 'while' loop.
*/
while (now >= cst->drop_next_time && cst->dropping) {
/* mark the packet */
if (cprms->flags & CODEL_ECN_ENABLED && ecn_mark(m)) {
cst->count++;
/* schedule the next mark. */
cst->drop_next_time = control_law(cst, cprms, cst->drop_next_time);
return m;
}
/* drop the packet */
fq_update_stats(q, si, 0, 1);
m_freem(m);
m = fqc_dodequeue(q, now, &ok_to_drop, si);
if (!ok_to_drop) {
/* leave dropping state */
cst->dropping = false;
} else {
cst->count++;
/* schedule the next drop. */
cst->drop_next_time = control_law(cst, cprms, cst->drop_next_time);
}
}
/* If we get here we're not in dropping state. The 'ok_to_drop'
* return from dodequeue means that the sojourn time has been
* above 'target' for 'interval' so enter dropping state.
*/
} else if (ok_to_drop) {
/* if ECN option is disabled or the packet cannot be marked,
* drop the packet and extract another.
*/
if (!(cprms->flags & CODEL_ECN_ENABLED) || !ecn_mark(m)) {
fq_update_stats(q, si, 0, 1);
m_freem(m);
m = fqc_dodequeue(q, now, &ok_to_drop,si);
}
cst->dropping = true;
/* If min went above target close to when it last went
* below, assume that the drop rate that controlled the
* queue on the last cycle is a good starting point to
* control it now. ('drop_next' will be at most 'interval'
* later than the time of the last drop so 'now - drop_next'
* is a good approximation of the time from the last drop
* until now.)
*/
cst->count = (cst->count > 2 && ((aqm_stime_t)now -
(aqm_stime_t)cst->drop_next_time) < 8* cprms->interval)? cst->count - 2 : 1;
/* we don't have to set initial guess for Newton's method isqrt as
* we initilaize isqrt in control_law function when count == 1 */
cst->drop_next_time = control_law(cst, cprms, now);
}
return m;
}
#endif

View File

@@ -1,463 +0,0 @@
/*-
* Copyright (c) 2010 Luigi Rizzo, Riccardo Panicucci, Universita` di Pisa
* All rights reserved
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* internal dummynet APIs.
*
* $FreeBSD$
*/
#ifndef _IP_DN_PRIVATE_H
#define _IP_DN_PRIVATE_H
/* debugging support
* use ND() to remove debugging, D() to print a line,
* DX(level, ...) to print above a certain level
* If you redefine D() you are expected to redefine all.
*/
#ifndef D
#define ND(fmt, ...) do {} while (0)
#define D1(fmt, ...) do {} while (0)
#define D(fmt, ...) printf("%-10s " fmt "\n", \
__FUNCTION__, ## __VA_ARGS__)
#define DX(lev, fmt, ...) do { \
if (dn_cfg.debug > lev) D(fmt, ## __VA_ARGS__); } while (0)
#endif
MALLOC_DECLARE(M_DUMMYNET);
#ifndef __linux__
#define div64(a, b) ((int64_t)(a) / (int64_t)(b))
#endif
#define DN_LOCK_INIT() do { \
mtx_init(&dn_cfg.uh_mtx, "dn_uh", NULL, MTX_DEF); \
mtx_init(&dn_cfg.bh_mtx, "dn_bh", NULL, MTX_DEF); \
} while (0)
#define DN_LOCK_DESTROY() do { \
mtx_destroy(&dn_cfg.uh_mtx); \
mtx_destroy(&dn_cfg.bh_mtx); \
} while (0)
#if 0 /* not used yet */
#define DN_UH_RLOCK() mtx_lock(&dn_cfg.uh_mtx)
#define DN_UH_RUNLOCK() mtx_unlock(&dn_cfg.uh_mtx)
#define DN_UH_WLOCK() mtx_lock(&dn_cfg.uh_mtx)
#define DN_UH_WUNLOCK() mtx_unlock(&dn_cfg.uh_mtx)
#define DN_UH_LOCK_ASSERT() mtx_assert(&dn_cfg.uh_mtx, MA_OWNED)
#endif
#define DN_BH_RLOCK() mtx_lock(&dn_cfg.uh_mtx)
#define DN_BH_RUNLOCK() mtx_unlock(&dn_cfg.uh_mtx)
#define DN_BH_WLOCK() mtx_lock(&dn_cfg.uh_mtx)
#define DN_BH_WUNLOCK() mtx_unlock(&dn_cfg.uh_mtx)
#define DN_BH_LOCK_ASSERT() mtx_assert(&dn_cfg.uh_mtx, MA_OWNED)
SLIST_HEAD(dn_schk_head, dn_schk);
SLIST_HEAD(dn_sch_inst_head, dn_sch_inst);
SLIST_HEAD(dn_fsk_head, dn_fsk);
SLIST_HEAD(dn_queue_head, dn_queue);
SLIST_HEAD(dn_alg_head, dn_alg);
#ifdef NEW_AQM
SLIST_HEAD(dn_aqm_head, dn_aqm); /* for new AQMs */
#endif
struct mq { /* a basic queue of packets*/
struct mbuf *head, *tail;
int count;
};
static inline void
set_oid(struct dn_id *o, int type, int len)
{
o->type = type;
o->len = len;
o->subtype = 0;
}
/*
* configuration and global data for a dummynet instance
*
* When a configuration is modified from userland, 'id' is incremented
* so we can use the value to check for stale pointers.
*/
struct dn_parms {
uint32_t id; /* configuration version */
/* defaults (sysctl-accessible) */
int red_lookup_depth;
int red_avg_pkt_size;
int red_max_pkt_size;
int hash_size;
int max_hash_size;
long byte_limit; /* max queue sizes */
long slot_limit;
int io_fast;
int debug;
/* timekeeping */
struct timeval prev_t; /* last time dummynet_tick ran */
struct dn_heap evheap; /* scheduled events */
/* counters of objects -- used for reporting space */
int schk_count;
int si_count;
int fsk_count;
int queue_count;
/* ticks and other stuff */
uint64_t curr_time;
/* flowsets and schedulers are in hash tables, with 'hash_size'
* buckets. fshash is looked up at every packet arrival
* so better be generous if we expect many entries.
*/
struct dn_ht *fshash;
struct dn_ht *schedhash;
/* list of flowsets without a scheduler -- use sch_chain */
struct dn_fsk_head fsu; /* list of unlinked flowsets */
struct dn_alg_head schedlist; /* list of algorithms */
#ifdef NEW_AQM
struct dn_aqm_head aqmlist; /* list of AQMs */
#endif
/* Store the fs/sch to scan when draining. The value is the
* bucket number of the hash table. Expire can be disabled
* with net.inet.ip.dummynet.expire=0, or it happens every
* expire ticks.
**/
int drain_fs;
int drain_sch;
uint32_t expire;
uint32_t expire_cycle; /* tick count */
int init_done;
/* if the upper half is busy doing something long,
* can set the busy flag and we will enqueue packets in
* a queue for later processing.
*/
int busy;
struct mq pending;
#ifdef _KERNEL
/*
* This file is normally used in the kernel, unless we do
* some userland tests, in which case we do not need a mtx.
* uh_mtx arbitrates between system calls and also
* protects fshash, schedhash and fsunlinked.
* These structures are readonly for the lower half.
* bh_mtx protects all other structures which may be
* modified upon packet arrivals
*/
#if defined( __linux__ ) || defined( _WIN32 )
spinlock_t uh_mtx;
spinlock_t bh_mtx;
#else
struct mtx uh_mtx;
struct mtx bh_mtx;
#endif
#endif /* _KERNEL */
};
/*
* Delay line, contains all packets on output from a link.
* Every scheduler instance has one.
*/
struct delay_line {
struct dn_id oid;
struct dn_sch_inst *si;
struct mq mq;
};
/*
* The kernel side of a flowset. It is linked in a hash table
* of flowsets, and in a list of children of their parent scheduler.
* qht is either the queue or (if HAVE_MASK) a hash table queues.
* Note that the mask to use is the (flow_mask|sched_mask), which
* changes as we attach/detach schedulers. So we store it here.
*
* XXX If we want to add scheduler-specific parameters, we need to
* put them in external storage because the scheduler may not be
* available when the fsk is created.
*/
struct dn_fsk { /* kernel side of a flowset */
struct dn_fs fs;
SLIST_ENTRY(dn_fsk) fsk_next; /* hash chain for fshash */
struct ipfw_flow_id fsk_mask;
/* qht is a hash table of queues, or just a single queue
* a bit in fs.flags tells us which one
*/
struct dn_ht *qht;
struct dn_schk *sched; /* Sched we are linked to */
SLIST_ENTRY(dn_fsk) sch_chain; /* list of fsk attached to sched */
/* bucket index used by drain routine to drain queues for this
* flowset
*/
int drain_bucket;
/* Parameter realted to RED / GRED */
/* original values are in dn_fs*/
int w_q ; /* queue weight (scaled) */
int max_th ; /* maximum threshold for queue (scaled) */
int min_th ; /* minimum threshold for queue (scaled) */
int max_p ; /* maximum value for p_b (scaled) */
u_int c_1 ; /* max_p/(max_th-min_th) (scaled) */
u_int c_2 ; /* max_p*min_th/(max_th-min_th) (scaled) */
u_int c_3 ; /* for GRED, (1-max_p)/max_th (scaled) */
u_int c_4 ; /* for GRED, 1 - 2*max_p (scaled) */
u_int * w_q_lookup ; /* lookup table for computing (1-w_q)^t */
u_int lookup_depth ; /* depth of lookup table */
int lookup_step ; /* granularity inside the lookup table */
int lookup_weight ; /* equal to (1-w_q)^t / (1-w_q)^(t+1) */
int avg_pkt_size ; /* medium packet size */
int max_pkt_size ; /* max packet size */
#ifdef NEW_AQM
struct dn_aqm *aqmfp; /* Pointer to AQM functions */
void *aqmcfg; /* configuration parameters for AQM */
#endif
};
/*
* A queue is created as a child of a flowset unless it belongs to
* a !MULTIQUEUE scheduler. It is normally in a hash table in the
* flowset. fs always points to the parent flowset.
* si normally points to the sch_inst, unless the flowset has been
* detached from the scheduler -- in this case si == NULL and we
* should not enqueue.
*/
struct dn_queue {
struct dn_flow ni; /* oid, flow_id, stats */
struct mq mq; /* packets queue */
struct dn_sch_inst *_si; /* owner scheduler instance */
SLIST_ENTRY(dn_queue) q_next; /* hash chain list for qht */
struct dn_fsk *fs; /* parent flowset. */
/* RED parameters */
int avg; /* average queue length est. (scaled) */
int count; /* arrivals since last RED drop */
int random; /* random value (scaled) */
uint64_t q_time; /* start of queue idle time */
#ifdef NEW_AQM
void *aqm_status; /* per-queue status variables*/
#endif
};
/*
* The kernel side of a scheduler. Contains the userland config,
* a link, pointer to extra config arguments from command line,
* kernel flags, and a pointer to the scheduler methods.
* It is stored in a hash table, and holds a list of all
* flowsets and scheduler instances.
* XXX sch must be at the beginning, see schk_hash().
*/
struct dn_schk {
struct dn_sch sch;
struct dn_alg *fp; /* Pointer to scheduler functions */
struct dn_link link; /* The link, embedded */
struct dn_profile *profile; /* delay profile, if any */
struct dn_id *cfg; /* extra config arguments */
SLIST_ENTRY(dn_schk) schk_next; /* hash chain for schedhash */
struct dn_fsk_head fsk_list; /* all fsk linked to me */
struct dn_fsk *fs; /* Flowset for !MULTIQUEUE */
/* bucket index used by the drain routine to drain the scheduler
* instance for this flowset.
*/
int drain_bucket;
/* Hash table of all instances (through sch.sched_mask)
* or single instance if no mask. Always valid.
*/
struct dn_ht *siht;
};
/*
* Scheduler instance.
* Contains variables and all queues relative to a this instance.
* This struct is created a runtime.
*/
struct dn_sch_inst {
struct dn_flow ni; /* oid, flowid and stats */
SLIST_ENTRY(dn_sch_inst) si_next; /* hash chain for siht */
struct delay_line dline;
struct dn_schk *sched; /* the template */
int kflags; /* DN_ACTIVE */
int64_t credit; /* bits I can transmit (more or less). */
uint64_t sched_time; /* time link was scheduled in ready_heap */
uint64_t idle_time; /* start of scheduler instance idle time */
/* q_count is the number of queues that this instance is using.
* The counter is incremented or decremented when
* a reference from the queue is created or deleted.
* It is used to make sure that a scheduler instance can be safely
* deleted by the drain routine. See notes below.
*/
int q_count;
};
/*
* NOTE about object drain.
* The system will automatically (XXX check when) drain queues and
* scheduler instances when they are idle.
* A queue is idle when it has no packets; an instance is idle when
* it is not in the evheap heap, and the corresponding delay line is empty.
* A queue can be safely deleted when it is idle because of the scheduler
* function xxx_free_queue() will remove any references to it.
* An instance can be only deleted when no queues reference it. To be sure
* of that, a counter (q_count) stores the number of queues that are pointing
* to the instance.
*
* XXX
* Order of scan:
* - take all flowset in a bucket for the flowset hash table
* - take all queues in a bucket for the flowset
* - increment the queue bucket
* - scan next flowset bucket
* Nothing is done if a bucket contains no entries.
*
* The same schema is used for sceduler instances
*/
/* kernel-side flags. Linux has DN_DELETE in fcntl.h
*/
enum {
/* 1 and 2 are reserved for the SCAN flags */
DN_DESTROY = 0x0004, /* destroy */
DN_DELETE_FS = 0x0008, /* destroy flowset */
DN_DETACH = 0x0010,
DN_ACTIVE = 0x0020, /* object is in evheap */
DN_F_DLINE = 0x0040, /* object is a delay line */
DN_DEL_SAFE = 0x0080, /* delete a queue only if no longer needed
* by scheduler */
DN_QHT_IS_Q = 0x0100, /* in flowset, qht is a single queue */
};
extern struct dn_parms dn_cfg;
//VNET_DECLARE(struct dn_parms, _base_dn_cfg);
//#define dn_cfg VNET(_base_dn_cfg)
int dummynet_io(struct mbuf **, int , struct ip_fw_args *);
void dummynet_task(void *context, int pending);
void dn_reschedule(void);
struct dn_queue *ipdn_q_find(struct dn_fsk *, struct dn_sch_inst *,
struct ipfw_flow_id *);
struct dn_sch_inst *ipdn_si_find(struct dn_schk *, struct ipfw_flow_id *);
/*
* copy_range is a template for requests for ranges of pipes/queues/scheds.
* The number of ranges is variable and can be derived by o.len.
* As a default, we use a small number of entries so that the struct
* fits easily on the stack and is sufficient for most common requests.
*/
#define DEFAULT_RANGES 5
struct copy_range {
struct dn_id o;
uint32_t r[ 2 * DEFAULT_RANGES ];
};
struct copy_args {
char **start;
char *end;
int flags;
int type;
struct copy_range *extra; /* extra filtering */
};
struct sockopt;
int ip_dummynet_compat(struct sockopt *sopt);
int dummynet_get(struct sockopt *sopt, void **compat);
int dn_c_copy_q (void *_ni, void *arg);
int dn_c_copy_pipe(struct dn_schk *s, struct copy_args *a, int nq);
int dn_c_copy_fs(struct dn_fsk *f, struct copy_args *a, int nq);
int dn_compat_copy_queue(struct copy_args *a, void *_o);
int dn_compat_copy_pipe(struct copy_args *a, void *_o);
int copy_data_helper_compat(void *_o, void *_arg);
int dn_compat_calc_size(void);
int do_config(void *p, int l);
/* function to drain idle object */
void dn_drain_scheduler(void);
void dn_drain_queue(void);
#ifdef NEW_AQM
int ecn_mark(struct mbuf* m);
/* moved from ip_dn_io.c to here to be available for AQMs modules*/
static inline void
mq_append(struct mq *q, struct mbuf *m)
{
#ifdef USERSPACE
// buffers from netmap need to be copied
// XXX note that the routine is not expected to fail
ND("append %p to %p", m, q);
if (m->m_flags & M_STACK) {
struct mbuf *m_new;
void *p;
int l, ofs;
ofs = m->m_data - m->__m_extbuf;
// XXX allocate
MGETHDR(m_new, M_NOWAIT, MT_DATA);
ND("*** WARNING, volatile buf %p ext %p %d dofs %d m_new %p",
m, m->__m_extbuf, m->__m_extlen, ofs, m_new);
p = m_new->__m_extbuf; /* new pointer */
l = m_new->__m_extlen; /* new len */
if (l <= m->__m_extlen) {
panic("extlen too large");
}
*m_new = *m; // copy
m_new->m_flags &= ~M_STACK;
m_new->__m_extbuf = p; // point to new buffer
_pkt_copy(m->__m_extbuf, p, m->__m_extlen);
m_new->m_data = p + ofs;
m = m_new;
}
#endif /* USERSPACE */
if (q->head == NULL)
q->head = m;
else
q->tail->m_nextpkt = m;
q->count++;
q->tail = m;
m->m_nextpkt = NULL;
}
#endif /* NEW_AQM */
#endif /* _IP_DN_PRIVATE_H */

File diff suppressed because it is too large Load Diff

View File

@@ -1,211 +0,0 @@
#include <machine/rtems-bsd-kernel-space.h>
/*-
* Copyright (c) 2016 Yandex LLC
* Copyright (c) 2016 Andrey V. Elsukov <ae@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/mbuf.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/rmlock.h>
#include <sys/socket.h>
#include <net/ethernet.h>
#include <net/if.h>
#include <net/if_pflog.h>
#include <net/if_var.h>
#include <net/if_clone.h>
#include <net/if_types.h>
#include <net/vnet.h>
#include <net/bpf.h>
#include <netinet/in.h>
#include <netinet/ip_fw.h>
#include <netinet/ip_var.h>
#include <netpfil/ipfw/ip_fw_private.h>
static VNET_DEFINE(struct ifnet *, log_if);
static VNET_DEFINE(struct ifnet *, pflog_if);
static VNET_DEFINE(struct if_clone *, ipfw_cloner);
static VNET_DEFINE(struct if_clone *, ipfwlog_cloner);
#define V_ipfw_cloner VNET(ipfw_cloner)
#define V_ipfwlog_cloner VNET(ipfwlog_cloner)
#define V_log_if VNET(log_if)
#define V_pflog_if VNET(pflog_if)
static struct rmlock log_if_lock;
#define LOGIF_LOCK_INIT(x) rm_init(&log_if_lock, "ipfw log_if lock")
#define LOGIF_LOCK_DESTROY(x) rm_destroy(&log_if_lock)
#define LOGIF_RLOCK_TRACKER struct rm_priotracker _log_tracker
#define LOGIF_RLOCK(x) rm_rlock(&log_if_lock, &_log_tracker)
#define LOGIF_RUNLOCK(x) rm_runlock(&log_if_lock, &_log_tracker)
#define LOGIF_WLOCK(x) rm_wlock(&log_if_lock)
#define LOGIF_WUNLOCK(x) rm_wunlock(&log_if_lock)
static const char ipfwname[] = "ipfw";
static const char ipfwlogname[] = "ipfwlog";
static int
ipfw_bpf_ioctl(struct ifnet *ifp, u_long cmd, caddr_t addr)
{
return (EINVAL);
}
static int
ipfw_bpf_output(struct ifnet *ifp, struct mbuf *m,
const struct sockaddr *dst, struct route *ro)
{
if (m != NULL)
FREE_PKT(m);
return (0);
}
static void
ipfw_clone_destroy(struct ifnet *ifp)
{
LOGIF_WLOCK();
if (ifp->if_hdrlen == ETHER_HDR_LEN)
V_log_if = NULL;
else
V_pflog_if = NULL;
LOGIF_WUNLOCK();
bpfdetach(ifp);
if_detach(ifp);
if_free(ifp);
}
static int
ipfw_clone_create(struct if_clone *ifc, int unit, caddr_t params)
{
struct ifnet *ifp;
ifp = if_alloc(IFT_PFLOG);
if (ifp == NULL)
return (ENOSPC);
if_initname(ifp, ipfwname, unit);
ifp->if_flags = IFF_UP | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_mtu = 65536;
ifp->if_ioctl = ipfw_bpf_ioctl;
ifp->if_output = ipfw_bpf_output;
ifp->if_hdrlen = ETHER_HDR_LEN;
if_attach(ifp);
bpfattach(ifp, DLT_EN10MB, ETHER_HDR_LEN);
LOGIF_WLOCK();
if (V_log_if != NULL) {
LOGIF_WUNLOCK();
bpfdetach(ifp);
if_detach(ifp);
if_free(ifp);
return (EEXIST);
}
V_log_if = ifp;
LOGIF_WUNLOCK();
return (0);
}
static int
ipfwlog_clone_create(struct if_clone *ifc, int unit, caddr_t params)
{
struct ifnet *ifp;
ifp = if_alloc(IFT_PFLOG);
if (ifp == NULL)
return (ENOSPC);
if_initname(ifp, ipfwlogname, unit);
ifp->if_flags = IFF_UP | IFF_SIMPLEX | IFF_MULTICAST;
ifp->if_mtu = 65536;
ifp->if_ioctl = ipfw_bpf_ioctl;
ifp->if_output = ipfw_bpf_output;
ifp->if_hdrlen = PFLOG_HDRLEN;
if_attach(ifp);
bpfattach(ifp, DLT_PFLOG, PFLOG_HDRLEN);
LOGIF_WLOCK();
if (V_pflog_if != NULL) {
LOGIF_WUNLOCK();
bpfdetach(ifp);
if_detach(ifp);
if_free(ifp);
return (EEXIST);
}
V_pflog_if = ifp;
LOGIF_WUNLOCK();
return (0);
}
void
ipfw_bpf_mtap2(void *data, u_int dlen, struct mbuf *m)
{
LOGIF_RLOCK_TRACKER;
LOGIF_RLOCK();
if (dlen == ETHER_HDR_LEN) {
if (V_log_if == NULL) {
LOGIF_RUNLOCK();
return;
}
BPF_MTAP2(V_log_if, data, dlen, m);
} else if (dlen == PFLOG_HDRLEN) {
if (V_pflog_if == NULL) {
LOGIF_RUNLOCK();
return;
}
BPF_MTAP2(V_pflog_if, data, dlen, m);
}
LOGIF_RUNLOCK();
}
void
ipfw_bpf_init(int first)
{
if (first) {
LOGIF_LOCK_INIT();
V_log_if = NULL;
V_pflog_if = NULL;
}
V_ipfw_cloner = if_clone_simple(ipfwname, ipfw_clone_create,
ipfw_clone_destroy, 0);
V_ipfwlog_cloner = if_clone_simple(ipfwlogname, ipfwlog_clone_create,
ipfw_clone_destroy, 0);
}
void
ipfw_bpf_uninit(int last)
{
if_clone_detach(V_ipfw_cloner);
if_clone_detach(V_ipfwlog_cloner);
if (last)
LOGIF_LOCK_DESTROY();
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,388 +0,0 @@
#include <machine/rtems-bsd-kernel-space.h>
/*-
* Copyright (c) 2016-2017 Yandex LLC
* Copyright (c) 2016-2017 Andrey V. Elsukov <ae@FreeBSD.org>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/hash.h>
#include <sys/lock.h>
#include <sys/rwlock.h>
#include <sys/rmlock.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/queue.h>
#include <net/pfil.h>
#include <net/if.h> /* ip_fw.h requires IFNAMSIZ */
#include <netinet/in.h>
#include <netinet/ip_var.h> /* struct ipfw_rule_ref */
#include <netinet/ip_fw.h>
#include <netpfil/ipfw/ip_fw_private.h>
#include <rtems/bsd/local/opt_ipfw.h>
/*
* External actions support for ipfw.
*
* This code provides KPI for implementing loadable modules, that
* can provide handlers for external action opcodes in the ipfw's
* rules.
* Module should implement opcode handler with type ipfw_eaction_t.
* This handler will be called by ipfw_chk() function when
* O_EXTERNAL_ACTION opcode is matched. The handler must return
* value used as return value in ipfw_chk(), i.e. IP_FW_PASS,
* IP_FW_DENY (see ip_fw_private.h).
* Also the last argument must be set by handler. If it is zero,
* the search continues to the next rule. If it has non zero value,
* the search terminates.
*
* The module that implements external action should register its
* handler and name with ipfw_add_eaction() function.
* This function will return eaction_id, that can be used by module.
*
* It is possible to pass some additional information to external
* action handler using O_EXTERNAL_INSTANCE and O_EXTERNAL_DATA opcodes.
* Such opcodes should be next after the O_EXTERNAL_ACTION opcode.
* For the O_EXTERNAL_INSTANCE opcode the cmd->arg1 contains index of named
* object related to an instance of external action.
* For the O_EXTERNAL_DATA opcode the cmd contains the data that can be used
* by external action handler without needing to create named instance.
*
* In case when eaction module uses named instances, it should register
* opcode rewriting routines for O_EXTERNAL_INSTANCE opcode. The
* classifier callback can look back into O_EXTERNAL_ACTION opcode (it
* must be in the (ipfw_insn *)(cmd - 1)). By arg1 from O_EXTERNAL_ACTION
* it can deteremine eaction_id and compare it with its own.
* The macro IPFW_TLV_EACTION_NAME(eaction_id) can be used to deteremine
* the type of named_object related to external action instance.
*
* On module unload handler should be deregistered with ipfw_del_eaction()
* function using known eaction_id.
*/
struct eaction_obj {
struct named_object no;
ipfw_eaction_t *handler;
char name[64];
};
#define EACTION_OBJ(ch, cmd) \
((struct eaction_obj *)SRV_OBJECT((ch), (cmd)->arg1))
#if 0
#define EACTION_DEBUG(fmt, ...) do { \
printf("%s: " fmt "\n", __func__, ## __VA_ARGS__); \
} while (0)
#else
#define EACTION_DEBUG(fmt, ...)
#endif
const char *default_eaction_typename = "drop";
static int
default_eaction(struct ip_fw_chain *ch, struct ip_fw_args *args,
ipfw_insn *cmd, int *done)
{
*done = 1; /* terminate the search */
return (IP_FW_DENY);
}
/*
* Opcode rewriting callbacks.
*/
static int
eaction_classify(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype)
{
EACTION_DEBUG("opcode %d, arg1 %d", cmd->opcode, cmd->arg1);
*puidx = cmd->arg1;
*ptype = 0;
return (0);
}
static void
eaction_update(ipfw_insn *cmd, uint16_t idx)
{
cmd->arg1 = idx;
EACTION_DEBUG("opcode %d, arg1 -> %d", cmd->opcode, cmd->arg1);
}
static int
eaction_findbyname(struct ip_fw_chain *ch, struct tid_info *ti,
struct named_object **pno)
{
ipfw_obj_ntlv *ntlv;
if (ti->tlvs == NULL)
return (EINVAL);
/* Search ntlv in the buffer provided by user */
ntlv = ipfw_find_name_tlv_type(ti->tlvs, ti->tlen, ti->uidx,
IPFW_TLV_EACTION);
if (ntlv == NULL)
return (EINVAL);
EACTION_DEBUG("name %s, uidx %u, type %u", ntlv->name,
ti->uidx, ti->type);
/*
* Search named object with corresponding name.
* Since eaction objects are global - ignore the set value
* and use zero instead.
*/
*pno = ipfw_objhash_lookup_name_type(CHAIN_TO_SRV(ch),
0, IPFW_TLV_EACTION, ntlv->name);
if (*pno == NULL)
return (ESRCH);
return (0);
}
static struct named_object *
eaction_findbykidx(struct ip_fw_chain *ch, uint16_t idx)
{
EACTION_DEBUG("kidx %u", idx);
return (ipfw_objhash_lookup_kidx(CHAIN_TO_SRV(ch), idx));
}
static struct opcode_obj_rewrite eaction_opcodes[] = {
{
.opcode = O_EXTERNAL_ACTION,
.etlv = IPFW_TLV_EACTION,
.classifier = eaction_classify,
.update = eaction_update,
.find_byname = eaction_findbyname,
.find_bykidx = eaction_findbykidx,
},
};
static int
create_eaction_obj(struct ip_fw_chain *ch, ipfw_eaction_t handler,
const char *name, uint16_t *eaction_id)
{
struct namedobj_instance *ni;
struct eaction_obj *obj;
IPFW_UH_UNLOCK_ASSERT(ch);
ni = CHAIN_TO_SRV(ch);
obj = malloc(sizeof(*obj), M_IPFW, M_WAITOK | M_ZERO);
obj->no.name = obj->name;
obj->no.etlv = IPFW_TLV_EACTION;
obj->handler = handler;
strlcpy(obj->name, name, sizeof(obj->name));
IPFW_UH_WLOCK(ch);
if (ipfw_objhash_lookup_name_type(ni, 0, IPFW_TLV_EACTION,
name) != NULL) {
/*
* Object is already created.
* We don't allow eactions with the same name.
*/
IPFW_UH_WUNLOCK(ch);
free(obj, M_IPFW);
EACTION_DEBUG("External action with typename "
"'%s' already exists", name);
return (EEXIST);
}
if (ipfw_objhash_alloc_idx(ni, &obj->no.kidx) != 0) {
IPFW_UH_WUNLOCK(ch);
free(obj, M_IPFW);
EACTION_DEBUG("alloc_idx failed");
return (ENOSPC);
}
ipfw_objhash_add(ni, &obj->no);
IPFW_WLOCK(ch);
SRV_OBJECT(ch, obj->no.kidx) = obj;
IPFW_WUNLOCK(ch);
obj->no.refcnt++;
IPFW_UH_WUNLOCK(ch);
if (eaction_id != NULL)
*eaction_id = obj->no.kidx;
return (0);
}
static void
destroy_eaction_obj(struct ip_fw_chain *ch, struct named_object *no)
{
struct namedobj_instance *ni;
struct eaction_obj *obj;
IPFW_UH_WLOCK_ASSERT(ch);
ni = CHAIN_TO_SRV(ch);
IPFW_WLOCK(ch);
obj = SRV_OBJECT(ch, no->kidx);
SRV_OBJECT(ch, no->kidx) = NULL;
IPFW_WUNLOCK(ch);
ipfw_objhash_del(ni, no);
ipfw_objhash_free_idx(ni, no->kidx);
free(obj, M_IPFW);
}
/*
* Resets all eaction opcodes to default handlers.
*/
static void
reset_eaction_obj(struct ip_fw_chain *ch, uint16_t eaction_id)
{
struct named_object *no;
struct ip_fw *rule;
ipfw_insn *cmd;
int i;
IPFW_UH_WLOCK_ASSERT(ch);
no = ipfw_objhash_lookup_name_type(CHAIN_TO_SRV(ch), 0,
IPFW_TLV_EACTION, default_eaction_typename);
if (no == NULL)
panic("Default external action handler is not found");
if (eaction_id == no->kidx)
panic("Wrong eaction_id");
EACTION_DEBUG("replace id %u with %u", eaction_id, no->kidx);
IPFW_WLOCK(ch);
for (i = 0; i < ch->n_rules; i++) {
rule = ch->map[i];
cmd = ACTION_PTR(rule);
if (cmd->opcode != O_EXTERNAL_ACTION)
continue;
if (cmd->arg1 != eaction_id)
continue;
cmd->arg1 = no->kidx; /* Set to default id */
/*
* XXX: we only bump refcount on default_eaction.
* Refcount on the original object will be just
* ignored on destroy. But on default_eaction it
* will be decremented on rule deletion.
*/
no->refcnt++;
/*
* Since named_object related to this instance will be
* also destroyed, truncate the chain of opcodes to
* remove the rest of cmd chain just after O_EXTERNAL_ACTION
* opcode.
*/
if (rule->act_ofs < rule->cmd_len - 1) {
EACTION_DEBUG("truncate rule %d: len %u -> %u",
rule->rulenum, rule->cmd_len, rule->act_ofs + 1);
rule->cmd_len = rule->act_ofs + 1;
}
}
IPFW_WUNLOCK(ch);
}
/*
* Initialize external actions framework.
* Create object with default eaction handler "drop".
*/
int
ipfw_eaction_init(struct ip_fw_chain *ch, int first)
{
int error;
error = create_eaction_obj(ch, default_eaction,
default_eaction_typename, NULL);
if (error != 0)
return (error);
IPFW_ADD_OBJ_REWRITER(first, eaction_opcodes);
EACTION_DEBUG("External actions support initialized");
return (0);
}
void
ipfw_eaction_uninit(struct ip_fw_chain *ch, int last)
{
struct namedobj_instance *ni;
struct named_object *no;
ni = CHAIN_TO_SRV(ch);
IPFW_UH_WLOCK(ch);
no = ipfw_objhash_lookup_name_type(ni, 0, IPFW_TLV_EACTION,
default_eaction_typename);
if (no != NULL)
destroy_eaction_obj(ch, no);
IPFW_UH_WUNLOCK(ch);
IPFW_DEL_OBJ_REWRITER(last, eaction_opcodes);
EACTION_DEBUG("External actions support uninitialized");
}
/*
* Registers external action handler to the global array.
* On success it returns eaction id, otherwise - zero.
*/
uint16_t
ipfw_add_eaction(struct ip_fw_chain *ch, ipfw_eaction_t handler,
const char *name)
{
uint16_t eaction_id;
eaction_id = 0;
if (ipfw_check_object_name_generic(name) == 0) {
create_eaction_obj(ch, handler, name, &eaction_id);
EACTION_DEBUG("Registered external action '%s' with id %u",
name, eaction_id);
}
return (eaction_id);
}
/*
* Deregisters external action handler with id eaction_id.
*/
int
ipfw_del_eaction(struct ip_fw_chain *ch, uint16_t eaction_id)
{
struct named_object *no;
IPFW_UH_WLOCK(ch);
no = ipfw_objhash_lookup_kidx(CHAIN_TO_SRV(ch), eaction_id);
if (no == NULL || no->etlv != IPFW_TLV_EACTION) {
IPFW_UH_WUNLOCK(ch);
return (EINVAL);
}
if (no->refcnt > 1)
reset_eaction_obj(ch, eaction_id);
EACTION_DEBUG("External action '%s' with id %u unregistered",
no->name, eaction_id);
destroy_eaction_obj(ch, no);
IPFW_UH_WUNLOCK(ch);
return (0);
}
int
ipfw_run_eaction(struct ip_fw_chain *ch, struct ip_fw_args *args,
ipfw_insn *cmd, int *done)
{
return (EACTION_OBJ(ch, cmd)->handler(ch, args, cmd, done));
}

View File

@@ -1,542 +0,0 @@
#include <machine/rtems-bsd-kernel-space.h>
/*-
* Copyright (c) 2014 Yandex LLC.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* Kernel interface tracking API.
*
*/
#include <rtems/bsd/local/opt_ipfw.h>
#include <rtems/bsd/local/opt_inet.h>
#ifndef INET
#error IPFIREWALL requires INET.
#endif /* INET */
#include <rtems/bsd/local/opt_inet6.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/rwlock.h>
#include <sys/rmlock.h>
#include <sys/socket.h>
#include <sys/queue.h>
#include <sys/eventhandler.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/pfil.h>
#include <net/vnet.h>
#include <netinet/in.h>
#include <netinet/ip_var.h> /* struct ipfw_rule_ref */
#include <netinet/ip_fw.h>
#include <netpfil/ipfw/ip_fw_private.h>
#define CHAIN_TO_II(ch) ((struct namedobj_instance *)ch->ifcfg)
#define DEFAULT_IFACES 128
static void handle_ifdetach(struct ip_fw_chain *ch, struct ipfw_iface *iif,
uint16_t ifindex);
static void handle_ifattach(struct ip_fw_chain *ch, struct ipfw_iface *iif,
uint16_t ifindex);
static int list_ifaces(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
struct sockopt_data *sd);
static struct ipfw_sopt_handler scodes[] = {
{ IP_FW_XIFLIST, 0, HDIR_GET, list_ifaces },
};
/*
* FreeBSD Kernel interface.
*/
static void ipfw_kifhandler(void *arg, struct ifnet *ifp);
static int ipfw_kiflookup(char *name);
static void iface_khandler_register(void);
static void iface_khandler_deregister(void);
static eventhandler_tag ipfw_ifdetach_event, ipfw_ifattach_event;
static int num_vnets = 0;
static struct mtx vnet_mtx;
/*
* Checks if kernel interface is contained in our tracked
* interface list and calls attach/detach handler.
*/
static void
ipfw_kifhandler(void *arg, struct ifnet *ifp)
{
struct ip_fw_chain *ch;
struct ipfw_iface *iif;
struct namedobj_instance *ii;
uintptr_t htype;
if (V_ipfw_vnet_ready == 0)
return;
ch = &V_layer3_chain;
htype = (uintptr_t)arg;
IPFW_UH_WLOCK(ch);
ii = CHAIN_TO_II(ch);
if (ii == NULL) {
IPFW_UH_WUNLOCK(ch);
return;
}
iif = (struct ipfw_iface*)ipfw_objhash_lookup_name(ii, 0,
if_name(ifp));
if (iif != NULL) {
if (htype == 1)
handle_ifattach(ch, iif, ifp->if_index);
else
handle_ifdetach(ch, iif, ifp->if_index);
}
IPFW_UH_WUNLOCK(ch);
}
/*
* Reference current VNET as iface tracking API user.
* Registers interface tracking handlers for first VNET.
*/
static void
iface_khandler_register()
{
int create;
create = 0;
mtx_lock(&vnet_mtx);
if (num_vnets == 0)
create = 1;
num_vnets++;
mtx_unlock(&vnet_mtx);
if (create == 0)
return;
printf("IPFW: starting up interface tracker\n");
ipfw_ifdetach_event = EVENTHANDLER_REGISTER(
ifnet_departure_event, ipfw_kifhandler, NULL,
EVENTHANDLER_PRI_ANY);
ipfw_ifattach_event = EVENTHANDLER_REGISTER(
ifnet_arrival_event, ipfw_kifhandler, (void*)((uintptr_t)1),
EVENTHANDLER_PRI_ANY);
}
/*
*
* Detach interface event handlers on last VNET instance
* detach.
*/
static void
iface_khandler_deregister()
{
int destroy;
destroy = 0;
mtx_lock(&vnet_mtx);
if (num_vnets == 1)
destroy = 1;
num_vnets--;
mtx_unlock(&vnet_mtx);
if (destroy == 0)
return;
EVENTHANDLER_DEREGISTER(ifnet_arrival_event,
ipfw_ifattach_event);
EVENTHANDLER_DEREGISTER(ifnet_departure_event,
ipfw_ifdetach_event);
}
/*
* Retrieves ifindex for given @name.
*
* Returns ifindex or 0.
*/
static int
ipfw_kiflookup(char *name)
{
struct ifnet *ifp;
int ifindex;
ifindex = 0;
if ((ifp = ifunit_ref(name)) != NULL) {
ifindex = ifp->if_index;
if_rele(ifp);
}
return (ifindex);
}
/*
* Global ipfw startup hook.
* Since we perform lazy initialization, do nothing except
* mutex init.
*/
int
ipfw_iface_init()
{
mtx_init(&vnet_mtx, "IPFW ifhandler mtx", NULL, MTX_DEF);
IPFW_ADD_SOPT_HANDLER(1, scodes);
return (0);
}
/*
* Global ipfw destroy hook.
* Unregister khandlers iff init has been done.
*/
void
ipfw_iface_destroy()
{
IPFW_DEL_SOPT_HANDLER(1, scodes);
mtx_destroy(&vnet_mtx);
}
/*
* Perform actual init on internal request.
* Inits both namehash and global khandler.
*/
static void
vnet_ipfw_iface_init(struct ip_fw_chain *ch)
{
struct namedobj_instance *ii;
ii = ipfw_objhash_create(DEFAULT_IFACES);
IPFW_UH_WLOCK(ch);
if (ch->ifcfg == NULL) {
ch->ifcfg = ii;
ii = NULL;
}
IPFW_UH_WUNLOCK(ch);
if (ii != NULL) {
/* Already initialized. Free namehash. */
ipfw_objhash_destroy(ii);
} else {
/* We're the first ones. Init kernel hooks. */
iface_khandler_register();
}
}
static int
destroy_iface(struct namedobj_instance *ii, struct named_object *no,
void *arg)
{
/* Assume all consumers have been already detached */
free(no, M_IPFW);
return (0);
}
/*
* Per-VNET ipfw detach hook.
*
*/
void
vnet_ipfw_iface_destroy(struct ip_fw_chain *ch)
{
struct namedobj_instance *ii;
IPFW_UH_WLOCK(ch);
ii = CHAIN_TO_II(ch);
ch->ifcfg = NULL;
IPFW_UH_WUNLOCK(ch);
if (ii != NULL) {
ipfw_objhash_foreach(ii, destroy_iface, ch);
ipfw_objhash_destroy(ii);
iface_khandler_deregister();
}
}
/*
* Notify the subsystem that we are interested in tracking
* interface @name. This function has to be called without
* holding any locks to permit allocating the necessary states
* for proper interface tracking.
*
* Returns 0 on success.
*/
int
ipfw_iface_ref(struct ip_fw_chain *ch, char *name,
struct ipfw_ifc *ic)
{
struct namedobj_instance *ii;
struct ipfw_iface *iif, *tmp;
if (strlen(name) >= sizeof(iif->ifname))
return (EINVAL);
IPFW_UH_WLOCK(ch);
ii = CHAIN_TO_II(ch);
if (ii == NULL) {
/*
* First request to subsystem.
* Let's perform init.
*/
IPFW_UH_WUNLOCK(ch);
vnet_ipfw_iface_init(ch);
IPFW_UH_WLOCK(ch);
ii = CHAIN_TO_II(ch);
}
iif = (struct ipfw_iface *)ipfw_objhash_lookup_name(ii, 0, name);
if (iif != NULL) {
iif->no.refcnt++;
ic->iface = iif;
IPFW_UH_WUNLOCK(ch);
return (0);
}
IPFW_UH_WUNLOCK(ch);
/* Not found. Let's create one */
iif = malloc(sizeof(struct ipfw_iface), M_IPFW, M_WAITOK | M_ZERO);
TAILQ_INIT(&iif->consumers);
iif->no.name = iif->ifname;
strlcpy(iif->ifname, name, sizeof(iif->ifname));
/*
* Ref & link to the list.
*
* We assume ifnet_arrival_event / ifnet_departure_event
* are not holding any locks.
*/
iif->no.refcnt = 1;
IPFW_UH_WLOCK(ch);
tmp = (struct ipfw_iface *)ipfw_objhash_lookup_name(ii, 0, name);
if (tmp != NULL) {
/* Interface has been created since unlock. Ref and return */
tmp->no.refcnt++;
ic->iface = tmp;
IPFW_UH_WUNLOCK(ch);
free(iif, M_IPFW);
return (0);
}
iif->ifindex = ipfw_kiflookup(name);
if (iif->ifindex != 0)
iif->resolved = 1;
ipfw_objhash_add(ii, &iif->no);
ic->iface = iif;
IPFW_UH_WUNLOCK(ch);
return (0);
}
/*
* Adds @ic to the list of iif interface consumers.
* Must be called with holding both UH+WLOCK.
* Callback may be immediately called (if interface exists).
*/
void
ipfw_iface_add_notify(struct ip_fw_chain *ch, struct ipfw_ifc *ic)
{
struct ipfw_iface *iif;
IPFW_UH_WLOCK_ASSERT(ch);
IPFW_WLOCK_ASSERT(ch);
iif = ic->iface;
TAILQ_INSERT_TAIL(&iif->consumers, ic, next);
if (iif->resolved != 0)
ic->cb(ch, ic->cbdata, iif->ifindex);
}
/*
* Unlinks interface tracker object @ic from interface.
* Must be called while holding UH lock.
*/
void
ipfw_iface_del_notify(struct ip_fw_chain *ch, struct ipfw_ifc *ic)
{
struct ipfw_iface *iif;
IPFW_UH_WLOCK_ASSERT(ch);
iif = ic->iface;
TAILQ_REMOVE(&iif->consumers, ic, next);
}
/*
* Unreference interface specified by @ic.
* Must be called while holding UH lock.
*/
void
ipfw_iface_unref(struct ip_fw_chain *ch, struct ipfw_ifc *ic)
{
struct ipfw_iface *iif;
IPFW_UH_WLOCK_ASSERT(ch);
iif = ic->iface;
ic->iface = NULL;
iif->no.refcnt--;
/* TODO: check for references & delete */
}
/*
* Interface arrival handler.
*/
static void
handle_ifattach(struct ip_fw_chain *ch, struct ipfw_iface *iif,
uint16_t ifindex)
{
struct ipfw_ifc *ic;
IPFW_UH_WLOCK_ASSERT(ch);
iif->gencnt++;
iif->resolved = 1;
iif->ifindex = ifindex;
IPFW_WLOCK(ch);
TAILQ_FOREACH(ic, &iif->consumers, next)
ic->cb(ch, ic->cbdata, iif->ifindex);
IPFW_WUNLOCK(ch);
}
/*
* Interface departure handler.
*/
static void
handle_ifdetach(struct ip_fw_chain *ch, struct ipfw_iface *iif,
uint16_t ifindex)
{
struct ipfw_ifc *ic;
IPFW_UH_WLOCK_ASSERT(ch);
IPFW_WLOCK(ch);
TAILQ_FOREACH(ic, &iif->consumers, next)
ic->cb(ch, ic->cbdata, 0);
IPFW_WUNLOCK(ch);
iif->gencnt++;
iif->resolved = 0;
iif->ifindex = 0;
}
struct dump_iface_args {
struct ip_fw_chain *ch;
struct sockopt_data *sd;
};
static int
export_iface_internal(struct namedobj_instance *ii, struct named_object *no,
void *arg)
{
ipfw_iface_info *i;
struct dump_iface_args *da;
struct ipfw_iface *iif;
da = (struct dump_iface_args *)arg;
i = (ipfw_iface_info *)ipfw_get_sopt_space(da->sd, sizeof(*i));
KASSERT(i != NULL, ("previously checked buffer is not enough"));
iif = (struct ipfw_iface *)no;
strlcpy(i->ifname, iif->ifname, sizeof(i->ifname));
if (iif->resolved)
i->flags |= IPFW_IFFLAG_RESOLVED;
i->ifindex = iif->ifindex;
i->refcnt = iif->no.refcnt;
i->gencnt = iif->gencnt;
return (0);
}
/*
* Lists all interface currently tracked by ipfw.
* Data layout (v0)(current):
* Request: [ ipfw_obj_lheader ], size = ipfw_obj_lheader.size
* Reply: [ ipfw_obj_lheader ipfw_iface_info x N ]
*
* Returns 0 on success
*/
static int
list_ifaces(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
struct sockopt_data *sd)
{
struct namedobj_instance *ii;
struct _ipfw_obj_lheader *olh;
struct dump_iface_args da;
uint32_t count, size;
olh = (struct _ipfw_obj_lheader *)ipfw_get_sopt_header(sd,sizeof(*olh));
if (olh == NULL)
return (EINVAL);
if (sd->valsize < olh->size)
return (EINVAL);
IPFW_UH_RLOCK(ch);
ii = CHAIN_TO_II(ch);
if (ii != NULL)
count = ipfw_objhash_count(ii);
else
count = 0;
size = count * sizeof(ipfw_iface_info) + sizeof(ipfw_obj_lheader);
/* Fill in header regadless of buffer size */
olh->count = count;
olh->objsize = sizeof(ipfw_iface_info);
if (size > olh->size) {
olh->size = size;
IPFW_UH_RUNLOCK(ch);
return (ENOMEM);
}
olh->size = size;
da.ch = ch;
da.sd = sd;
if (ii != NULL)
ipfw_objhash_foreach(ii, export_iface_internal, &da);
IPFW_UH_RUNLOCK(ch);
return (0);
}

View File

@@ -1,419 +0,0 @@
#include <machine/rtems-bsd-kernel-space.h>
/*-
* Copyright (c) 2002-2009 Luigi Rizzo, Universita` di Pisa
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* Logging support for ipfw
*/
#include <rtems/bsd/local/opt_ipfw.h>
#include <rtems/bsd/local/opt_inet.h>
#ifndef INET
#error IPFIREWALL requires INET.
#endif /* INET */
#include <rtems/bsd/local/opt_inet6.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/mbuf.h>
#include <sys/socket.h>
#include <sys/sysctl.h>
#include <sys/syslog.h>
#include <net/ethernet.h> /* for ETHERTYPE_IP */
#include <net/if.h>
#include <net/if_var.h>
#include <net/vnet.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/ip_icmp.h>
#include <netinet/ip_var.h>
#include <netinet/ip_fw.h>
#include <netinet/tcp_var.h>
#include <netinet/udp.h>
#include <netinet/ip6.h>
#include <netinet/icmp6.h>
#ifdef INET6
#include <netinet6/in6_var.h> /* ip6_sprintf() */
#endif
#include <netpfil/ipfw/ip_fw_private.h>
#ifdef MAC
#include <security/mac/mac_framework.h>
#endif
/*
* L3HDR maps an ipv4 pointer into a layer3 header pointer of type T
* Other macros just cast void * into the appropriate type
*/
#define L3HDR(T, ip) ((T *)((u_int32_t *)(ip) + (ip)->ip_hl))
#define TCP(p) ((struct tcphdr *)(p))
#define SCTP(p) ((struct sctphdr *)(p))
#define UDP(p) ((struct udphdr *)(p))
#define ICMP(p) ((struct icmphdr *)(p))
#define ICMP6(p) ((struct icmp6_hdr *)(p))
#ifdef __APPLE__
#undef snprintf
#define snprintf sprintf
#define SNPARGS(buf, len) buf + len
#define SNP(buf) buf
#else /* !__APPLE__ */
#define SNPARGS(buf, len) buf + len, sizeof(buf) > len ? sizeof(buf) - len : 0
#define SNP(buf) buf, sizeof(buf)
#endif /* !__APPLE__ */
#define TARG(k, f) IP_FW_ARG_TABLEARG(chain, k, f)
/*
* We enter here when we have a rule with O_LOG.
* XXX this function alone takes about 2Kbytes of code!
*/
void
ipfw_log(struct ip_fw_chain *chain, struct ip_fw *f, u_int hlen,
struct ip_fw_args *args, struct mbuf *m, struct ifnet *oif,
u_short offset, uint32_t tablearg, struct ip *ip)
{
char *action;
int limit_reached = 0;
char action2[92], proto[128], fragment[32];
if (V_fw_verbose == 0) {
if (args->eh) /* layer2, use orig hdr */
ipfw_bpf_mtap2(args->eh, ETHER_HDR_LEN, m);
else {
/* Add fake header. Later we will store
* more info in the header.
*/
if (ip->ip_v == 4)
ipfw_bpf_mtap2("DDDDDDSSSSSS\x08\x00",
ETHER_HDR_LEN, m);
else if (ip->ip_v == 6)
ipfw_bpf_mtap2("DDDDDDSSSSSS\x86\xdd",
ETHER_HDR_LEN, m);
else
/* Obviously bogus EtherType. */
ipfw_bpf_mtap2("DDDDDDSSSSSS\xff\xff",
ETHER_HDR_LEN, m);
}
return;
}
/* the old 'log' function */
fragment[0] = '\0';
proto[0] = '\0';
if (f == NULL) { /* bogus pkt */
if (V_verbose_limit != 0 && V_norule_counter >= V_verbose_limit)
return;
V_norule_counter++;
if (V_norule_counter == V_verbose_limit)
limit_reached = V_verbose_limit;
action = "Refuse";
} else { /* O_LOG is the first action, find the real one */
ipfw_insn *cmd = ACTION_PTR(f);
ipfw_insn_log *l = (ipfw_insn_log *)cmd;
if (l->max_log != 0 && l->log_left == 0)
return;
l->log_left--;
if (l->log_left == 0)
limit_reached = l->max_log;
cmd += F_LEN(cmd); /* point to first action */
if (cmd->opcode == O_ALTQ) {
ipfw_insn_altq *altq = (ipfw_insn_altq *)cmd;
snprintf(SNPARGS(action2, 0), "Altq %d",
altq->qid);
cmd += F_LEN(cmd);
}
if (cmd->opcode == O_PROB || cmd->opcode == O_TAG ||
cmd->opcode == O_SETDSCP)
cmd += F_LEN(cmd);
action = action2;
switch (cmd->opcode) {
case O_DENY:
action = "Deny";
break;
case O_REJECT:
if (cmd->arg1==ICMP_REJECT_RST)
action = "Reset";
else if (cmd->arg1==ICMP_UNREACH_HOST)
action = "Reject";
else
snprintf(SNPARGS(action2, 0), "Unreach %d",
cmd->arg1);
break;
case O_UNREACH6:
if (cmd->arg1==ICMP6_UNREACH_RST)
action = "Reset";
else
snprintf(SNPARGS(action2, 0), "Unreach %d",
cmd->arg1);
break;
case O_ACCEPT:
action = "Accept";
break;
case O_COUNT:
action = "Count";
break;
case O_DIVERT:
snprintf(SNPARGS(action2, 0), "Divert %d",
TARG(cmd->arg1, divert));
break;
case O_TEE:
snprintf(SNPARGS(action2, 0), "Tee %d",
TARG(cmd->arg1, divert));
break;
case O_SETFIB:
snprintf(SNPARGS(action2, 0), "SetFib %d",
TARG(cmd->arg1, fib) & 0x7FFF);
break;
case O_SKIPTO:
snprintf(SNPARGS(action2, 0), "SkipTo %d",
TARG(cmd->arg1, skipto));
break;
case O_PIPE:
snprintf(SNPARGS(action2, 0), "Pipe %d",
TARG(cmd->arg1, pipe));
break;
case O_QUEUE:
snprintf(SNPARGS(action2, 0), "Queue %d",
TARG(cmd->arg1, pipe));
break;
case O_FORWARD_IP: {
char buf[INET_ADDRSTRLEN];
ipfw_insn_sa *sa = (ipfw_insn_sa *)cmd;
int len;
struct in_addr dummyaddr;
if (sa->sa.sin_addr.s_addr == INADDR_ANY)
dummyaddr.s_addr = htonl(tablearg);
else
dummyaddr.s_addr = sa->sa.sin_addr.s_addr;
len = snprintf(SNPARGS(action2, 0), "Forward to %s",
inet_ntoa_r(dummyaddr, buf));
if (sa->sa.sin_port)
snprintf(SNPARGS(action2, len), ":%d",
sa->sa.sin_port);
}
break;
#ifdef INET6
case O_FORWARD_IP6: {
char buf[INET6_ADDRSTRLEN];
ipfw_insn_sa6 *sa = (ipfw_insn_sa6 *)cmd;
int len;
len = snprintf(SNPARGS(action2, 0), "Forward to [%s]",
ip6_sprintf(buf, &sa->sa.sin6_addr));
if (sa->sa.sin6_port)
snprintf(SNPARGS(action2, len), ":%u",
sa->sa.sin6_port);
}
break;
#endif
case O_NETGRAPH:
snprintf(SNPARGS(action2, 0), "Netgraph %d",
cmd->arg1);
break;
case O_NGTEE:
snprintf(SNPARGS(action2, 0), "Ngtee %d",
cmd->arg1);
break;
case O_NAT:
action = "Nat";
break;
case O_REASS:
action = "Reass";
break;
case O_CALLRETURN:
if (cmd->len & F_NOT)
action = "Return";
else
snprintf(SNPARGS(action2, 0), "Call %d",
cmd->arg1);
break;
case O_EXTERNAL_ACTION:
snprintf(SNPARGS(action2, 0), "Eaction %s",
((struct named_object *)SRV_OBJECT(chain,
cmd->arg1))->name);
break;
default:
action = "UNKNOWN";
break;
}
}
if (hlen == 0) { /* non-ip */
snprintf(SNPARGS(proto, 0), "MAC");
} else {
int len;
#ifdef INET6
char src[INET6_ADDRSTRLEN + 2], dst[INET6_ADDRSTRLEN + 2];
#else
char src[INET_ADDRSTRLEN], dst[INET_ADDRSTRLEN];
#endif
struct icmphdr *icmp;
struct tcphdr *tcp;
struct udphdr *udp;
#ifdef INET6
struct ip6_hdr *ip6 = NULL;
struct icmp6_hdr *icmp6;
u_short ip6f_mf;
#endif
src[0] = '\0';
dst[0] = '\0';
#ifdef INET6
ip6f_mf = offset & IP6F_MORE_FRAG;
offset &= IP6F_OFF_MASK;
if (IS_IP6_FLOW_ID(&(args->f_id))) {
char ip6buf[INET6_ADDRSTRLEN];
snprintf(src, sizeof(src), "[%s]",
ip6_sprintf(ip6buf, &args->f_id.src_ip6));
snprintf(dst, sizeof(dst), "[%s]",
ip6_sprintf(ip6buf, &args->f_id.dst_ip6));
ip6 = (struct ip6_hdr *)ip;
tcp = (struct tcphdr *)(((char *)ip) + hlen);
udp = (struct udphdr *)(((char *)ip) + hlen);
} else
#endif
{
tcp = L3HDR(struct tcphdr, ip);
udp = L3HDR(struct udphdr, ip);
inet_ntop(AF_INET, &ip->ip_src, src, sizeof(src));
inet_ntop(AF_INET, &ip->ip_dst, dst, sizeof(dst));
}
switch (args->f_id.proto) {
case IPPROTO_TCP:
len = snprintf(SNPARGS(proto, 0), "TCP %s", src);
if (offset == 0)
snprintf(SNPARGS(proto, len), ":%d %s:%d",
ntohs(tcp->th_sport),
dst,
ntohs(tcp->th_dport));
else
snprintf(SNPARGS(proto, len), " %s", dst);
break;
case IPPROTO_UDP:
len = snprintf(SNPARGS(proto, 0), "UDP %s", src);
if (offset == 0)
snprintf(SNPARGS(proto, len), ":%d %s:%d",
ntohs(udp->uh_sport),
dst,
ntohs(udp->uh_dport));
else
snprintf(SNPARGS(proto, len), " %s", dst);
break;
case IPPROTO_ICMP:
icmp = L3HDR(struct icmphdr, ip);
if (offset == 0)
len = snprintf(SNPARGS(proto, 0),
"ICMP:%u.%u ",
icmp->icmp_type, icmp->icmp_code);
else
len = snprintf(SNPARGS(proto, 0), "ICMP ");
len += snprintf(SNPARGS(proto, len), "%s", src);
snprintf(SNPARGS(proto, len), " %s", dst);
break;
#ifdef INET6
case IPPROTO_ICMPV6:
icmp6 = (struct icmp6_hdr *)(((char *)ip) + hlen);
if (offset == 0)
len = snprintf(SNPARGS(proto, 0),
"ICMPv6:%u.%u ",
icmp6->icmp6_type, icmp6->icmp6_code);
else
len = snprintf(SNPARGS(proto, 0), "ICMPv6 ");
len += snprintf(SNPARGS(proto, len), "%s", src);
snprintf(SNPARGS(proto, len), " %s", dst);
break;
#endif
default:
len = snprintf(SNPARGS(proto, 0), "P:%d %s",
args->f_id.proto, src);
snprintf(SNPARGS(proto, len), " %s", dst);
break;
}
#ifdef INET6
if (IS_IP6_FLOW_ID(&(args->f_id))) {
if (offset || ip6f_mf)
snprintf(SNPARGS(fragment, 0),
" (frag %08x:%d@%d%s)",
args->f_id.extra,
ntohs(ip6->ip6_plen) - hlen,
ntohs(offset) << 3, ip6f_mf ? "+" : "");
} else
#endif
{
int ipoff, iplen;
ipoff = ntohs(ip->ip_off);
iplen = ntohs(ip->ip_len);
if (ipoff & (IP_MF | IP_OFFMASK))
snprintf(SNPARGS(fragment, 0),
" (frag %d:%d@%d%s)",
ntohs(ip->ip_id), iplen - (ip->ip_hl << 2),
offset << 3,
(ipoff & IP_MF) ? "+" : "");
}
}
#ifdef __FreeBSD__
if (oif || m->m_pkthdr.rcvif)
log(LOG_SECURITY | LOG_INFO,
"ipfw: %d %s %s %s via %s%s\n",
f ? f->rulenum : -1,
action, proto, oif ? "out" : "in",
oif ? oif->if_xname : m->m_pkthdr.rcvif->if_xname,
fragment);
else
#endif
log(LOG_SECURITY | LOG_INFO,
"ipfw: %d %s %s [no if info]%s\n",
f ? f->rulenum : -1,
action, proto, fragment);
if (limit_reached)
log(LOG_SECURITY | LOG_NOTICE,
"ipfw: limit %d reached on entry %d\n",
limit_reached, f ? f->rulenum : -1);
}
/* end of file */

File diff suppressed because it is too large Load Diff

View File

@@ -1,582 +0,0 @@
#include <machine/rtems-bsd-kernel-space.h>
/*-
* Copyright (c) 2004 Andre Oppermann, Internet Business Solutions AG
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <rtems/bsd/local/opt_ipfw.h>
#include <rtems/bsd/local/opt_inet.h>
#include <rtems/bsd/local/opt_inet6.h>
#ifndef INET
#error IPFIREWALL requires INET.
#endif /* INET */
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/rwlock.h>
#include <sys/socket.h>
#include <sys/sysctl.h>
#include <net/if.h>
#include <net/route.h>
#include <net/ethernet.h>
#include <net/pfil.h>
#include <net/vnet.h>
#include <netinet/in.h>
#include <netinet/in_systm.h>
#include <netinet/ip.h>
#include <netinet/ip_var.h>
#include <netinet/ip_fw.h>
#ifdef INET6
#include <netinet/ip6.h>
#include <netinet6/ip6_var.h>
#include <netinet6/scope6_var.h>
#endif
#include <netgraph/ng_ipfw.h>
#include <netpfil/ipfw/ip_fw_private.h>
#include <machine/in_cksum.h>
static VNET_DEFINE(int, fw_enable) = 1;
#define V_fw_enable VNET(fw_enable)
#ifdef INET6
static VNET_DEFINE(int, fw6_enable) = 1;
#define V_fw6_enable VNET(fw6_enable)
#endif
static VNET_DEFINE(int, fwlink_enable) = 0;
#define V_fwlink_enable VNET(fwlink_enable)
int ipfw_chg_hook(SYSCTL_HANDLER_ARGS);
/* Forward declarations. */
static int ipfw_divert(struct mbuf **, int, struct ipfw_rule_ref *, int);
int ipfw_check_packet(void *, struct mbuf **, struct ifnet *, int,
struct inpcb *);
int ipfw_check_frame(void *, struct mbuf **, struct ifnet *, int,
struct inpcb *);
#ifdef SYSCTL_NODE
SYSBEGIN(f1)
SYSCTL_DECL(_net_inet_ip_fw);
SYSCTL_PROC(_net_inet_ip_fw, OID_AUTO, enable,
CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_SECURE3,
&VNET_NAME(fw_enable), 0, ipfw_chg_hook, "I", "Enable ipfw");
#ifdef INET6
SYSCTL_DECL(_net_inet6_ip6_fw);
SYSCTL_PROC(_net_inet6_ip6_fw, OID_AUTO, enable,
CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_SECURE3,
&VNET_NAME(fw6_enable), 0, ipfw_chg_hook, "I", "Enable ipfw+6");
#endif /* INET6 */
SYSCTL_DECL(_net_link_ether);
SYSCTL_PROC(_net_link_ether, OID_AUTO, ipfw,
CTLFLAG_VNET | CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_SECURE3,
&VNET_NAME(fwlink_enable), 0, ipfw_chg_hook, "I",
"Pass ether pkts through firewall");
SYSEND
#endif /* SYSCTL_NODE */
/*
* The pfilter hook to pass packets to ipfw_chk and then to
* dummynet, divert, netgraph or other modules.
* The packet may be consumed.
*/
int
ipfw_check_packet(void *arg, struct mbuf **m0, struct ifnet *ifp, int dir,
struct inpcb *inp)
{
struct ip_fw_args args;
struct m_tag *tag;
int ipfw;
int ret;
/* convert dir to IPFW values */
dir = (dir == PFIL_IN) ? DIR_IN : DIR_OUT;
bzero(&args, sizeof(args));
again:
/*
* extract and remove the tag if present. If we are left
* with onepass, optimize the outgoing path.
*/
tag = m_tag_locate(*m0, MTAG_IPFW_RULE, 0, NULL);
if (tag != NULL) {
args.rule = *((struct ipfw_rule_ref *)(tag+1));
m_tag_delete(*m0, tag);
if (args.rule.info & IPFW_ONEPASS)
return (0);
}
args.m = *m0;
args.oif = dir == DIR_OUT ? ifp : NULL;
args.inp = inp;
ipfw = ipfw_chk(&args);
*m0 = args.m;
KASSERT(*m0 != NULL || ipfw == IP_FW_DENY, ("%s: m0 is NULL",
__func__));
/* breaking out of the switch means drop */
ret = 0; /* default return value for pass */
switch (ipfw) {
case IP_FW_PASS:
/* next_hop may be set by ipfw_chk */
if (args.next_hop == NULL && args.next_hop6 == NULL)
break; /* pass */
#if (!defined(INET6) && !defined(INET))
ret = EACCES;
#else
{
struct m_tag *fwd_tag;
size_t len;
KASSERT(args.next_hop == NULL || args.next_hop6 == NULL,
("%s: both next_hop=%p and next_hop6=%p not NULL", __func__,
args.next_hop, args.next_hop6));
#ifdef INET6
if (args.next_hop6 != NULL)
len = sizeof(struct sockaddr_in6);
#endif
#ifdef INET
if (args.next_hop != NULL)
len = sizeof(struct sockaddr_in);
#endif
/* Incoming packets should not be tagged so we do not
* m_tag_find. Outgoing packets may be tagged, so we
* reuse the tag if present.
*/
fwd_tag = (dir == DIR_IN) ? NULL :
m_tag_find(*m0, PACKET_TAG_IPFORWARD, NULL);
if (fwd_tag != NULL) {
m_tag_unlink(*m0, fwd_tag);
} else {
fwd_tag = m_tag_get(PACKET_TAG_IPFORWARD, len,
M_NOWAIT);
if (fwd_tag == NULL) {
ret = EACCES;
break; /* i.e. drop */
}
}
#ifdef INET6
if (args.next_hop6 != NULL) {
struct sockaddr_in6 *sa6;
sa6 = (struct sockaddr_in6 *)(fwd_tag + 1);
bcopy(args.next_hop6, sa6, len);
/*
* If nh6 address is link-local we should convert
* it to kernel internal form before doing any
* comparisons.
*/
if (sa6_embedscope(sa6, V_ip6_use_defzone) != 0) {
ret = EACCES;
break;
}
if (in6_localip(&sa6->sin6_addr))
(*m0)->m_flags |= M_FASTFWD_OURS;
(*m0)->m_flags |= M_IP6_NEXTHOP;
}
#endif
#ifdef INET
if (args.next_hop != NULL) {
bcopy(args.next_hop, (fwd_tag+1), len);
if (in_localip(args.next_hop->sin_addr))
(*m0)->m_flags |= M_FASTFWD_OURS;
(*m0)->m_flags |= M_IP_NEXTHOP;
}
#endif
m_tag_prepend(*m0, fwd_tag);
}
#endif /* INET || INET6 */
break;
case IP_FW_DENY:
ret = EACCES;
break; /* i.e. drop */
case IP_FW_DUMMYNET:
ret = EACCES;
if (ip_dn_io_ptr == NULL)
break; /* i.e. drop */
if (mtod(*m0, struct ip *)->ip_v == 4)
ret = ip_dn_io_ptr(m0, dir, &args);
else if (mtod(*m0, struct ip *)->ip_v == 6)
ret = ip_dn_io_ptr(m0, dir | PROTO_IPV6, &args);
else
break; /* drop it */
/*
* XXX should read the return value.
* dummynet normally eats the packet and sets *m0=NULL
* unless the packet can be sent immediately. In this
* case args is updated and we should re-run the
* check without clearing args.
*/
if (*m0 != NULL)
goto again;
break;
case IP_FW_TEE:
case IP_FW_DIVERT:
if (ip_divert_ptr == NULL) {
ret = EACCES;
break; /* i.e. drop */
}
ret = ipfw_divert(m0, dir, &args.rule,
(ipfw == IP_FW_TEE) ? 1 : 0);
/* continue processing for the original packet (tee). */
if (*m0)
goto again;
break;
case IP_FW_NGTEE:
case IP_FW_NETGRAPH:
if (ng_ipfw_input_p == NULL) {
ret = EACCES;
break; /* i.e. drop */
}
ret = ng_ipfw_input_p(m0, dir, &args,
(ipfw == IP_FW_NGTEE) ? 1 : 0);
if (ipfw == IP_FW_NGTEE) /* ignore errors for NGTEE */
goto again; /* continue with packet */
break;
case IP_FW_NAT:
/* honor one-pass in case of successful nat */
if (V_fw_one_pass)
break; /* ret is already 0 */
goto again;
case IP_FW_REASS:
goto again; /* continue with packet */
default:
KASSERT(0, ("%s: unknown retval", __func__));
}
if (ret != 0) {
if (*m0)
FREE_PKT(*m0);
*m0 = NULL;
}
return ret;
}
/*
* ipfw processing for ethernet packets (in and out).
*/
int
ipfw_check_frame(void *arg, struct mbuf **m0, struct ifnet *ifp, int dir,
struct inpcb *inp)
{
struct ether_header *eh;
struct ether_header save_eh;
struct mbuf *m;
int i, ret;
struct ip_fw_args args;
struct m_tag *mtag;
/* fetch start point from rule, if any. remove the tag if present. */
mtag = m_tag_locate(*m0, MTAG_IPFW_RULE, 0, NULL);
if (mtag == NULL) {
args.rule.slot = 0;
} else {
args.rule = *((struct ipfw_rule_ref *)(mtag+1));
m_tag_delete(*m0, mtag);
if (args.rule.info & IPFW_ONEPASS)
return (0);
}
/* I need some amt of data to be contiguous */
m = *m0;
i = min(m->m_pkthdr.len, max_protohdr);
if (m->m_len < i) {
m = m_pullup(m, i);
if (m == NULL) {
*m0 = m;
return (0);
}
}
eh = mtod(m, struct ether_header *);
save_eh = *eh; /* save copy for restore below */
m_adj(m, ETHER_HDR_LEN); /* strip ethernet header */
args.m = m; /* the packet we are looking at */
args.oif = dir == PFIL_OUT ? ifp: NULL; /* destination, if any */
args.next_hop = NULL; /* we do not support forward yet */
args.next_hop6 = NULL; /* we do not support forward yet */
args.eh = &save_eh; /* MAC header for bridged/MAC packets */
args.inp = NULL; /* used by ipfw uid/gid/jail rules */
i = ipfw_chk(&args);
m = args.m;
if (m != NULL) {
/*
* Restore Ethernet header, as needed, in case the
* mbuf chain was replaced by ipfw.
*/
M_PREPEND(m, ETHER_HDR_LEN, M_NOWAIT);
if (m == NULL) {
*m0 = NULL;
return (0);
}
if (eh != mtod(m, struct ether_header *))
bcopy(&save_eh, mtod(m, struct ether_header *),
ETHER_HDR_LEN);
}
*m0 = m;
ret = 0;
/* Check result of ipfw_chk() */
switch (i) {
case IP_FW_PASS:
break;
case IP_FW_DENY:
ret = EACCES;
break; /* i.e. drop */
case IP_FW_DUMMYNET:
ret = EACCES;
if (ip_dn_io_ptr == NULL)
break; /* i.e. drop */
*m0 = NULL;
dir = (dir == PFIL_IN) ? DIR_IN : DIR_OUT;
ip_dn_io_ptr(&m, dir | PROTO_LAYER2, &args);
return 0;
default:
KASSERT(0, ("%s: unknown retval", __func__));
}
if (ret != 0) {
if (*m0)
FREE_PKT(*m0);
*m0 = NULL;
}
return ret;
}
/* do the divert, return 1 on error 0 on success */
static int
ipfw_divert(struct mbuf **m0, int incoming, struct ipfw_rule_ref *rule,
int tee)
{
/*
* ipfw_chk() has already tagged the packet with the divert tag.
* If tee is set, copy packet and return original.
* If not tee, consume packet and send it to divert socket.
*/
struct mbuf *clone;
struct ip *ip = mtod(*m0, struct ip *);
struct m_tag *tag;
/* Cloning needed for tee? */
if (tee == 0) {
clone = *m0; /* use the original mbuf */
*m0 = NULL;
} else {
clone = m_dup(*m0, M_NOWAIT);
/* If we cannot duplicate the mbuf, we sacrifice the divert
* chain and continue with the tee-ed packet.
*/
if (clone == NULL)
return 1;
}
/*
* Divert listeners can normally handle non-fragmented packets,
* but we can only reass in the non-tee case.
* This means that listeners on a tee rule may get fragments,
* and have to live with that.
* Note that we now have the 'reass' ipfw option so if we care
* we can do it before a 'tee'.
*/
if (!tee) switch (ip->ip_v) {
case IPVERSION:
if (ntohs(ip->ip_off) & (IP_MF | IP_OFFMASK)) {
int hlen;
struct mbuf *reass;
reass = ip_reass(clone); /* Reassemble packet. */
if (reass == NULL)
return 0; /* not an error */
/* if reass = NULL then it was consumed by ip_reass */
/*
* IP header checksum fixup after reassembly and leave header
* in network byte order.
*/
ip = mtod(reass, struct ip *);
hlen = ip->ip_hl << 2;
ip->ip_sum = 0;
if (hlen == sizeof(struct ip))
ip->ip_sum = in_cksum_hdr(ip);
else
ip->ip_sum = in_cksum(reass, hlen);
clone = reass;
}
break;
#ifdef INET6
case IPV6_VERSION >> 4:
{
struct ip6_hdr *const ip6 = mtod(clone, struct ip6_hdr *);
if (ip6->ip6_nxt == IPPROTO_FRAGMENT) {
int nxt, off;
off = sizeof(struct ip6_hdr);
nxt = frag6_input(&clone, &off, 0);
if (nxt == IPPROTO_DONE)
return (0);
}
break;
}
#endif
}
/* attach a tag to the packet with the reinject info */
tag = m_tag_alloc(MTAG_IPFW_RULE, 0,
sizeof(struct ipfw_rule_ref), M_NOWAIT);
if (tag == NULL) {
FREE_PKT(clone);
return 1;
}
*((struct ipfw_rule_ref *)(tag+1)) = *rule;
m_tag_prepend(clone, tag);
/* Do the dirty job... */
ip_divert_ptr(clone, incoming);
return 0;
}
/*
* attach or detach hooks for a given protocol family
*/
static int
ipfw_hook(int onoff, int pf)
{
struct pfil_head *pfh;
pfil_func_t hook_func;
pfh = pfil_head_get(PFIL_TYPE_AF, pf);
if (pfh == NULL)
return ENOENT;
hook_func = (pf == AF_LINK) ? ipfw_check_frame : ipfw_check_packet;
(void) (onoff ? pfil_add_hook : pfil_remove_hook)
(hook_func, NULL, PFIL_IN | PFIL_OUT | PFIL_WAITOK, pfh);
return 0;
}
int
ipfw_attach_hooks(int arg)
{
int error = 0;
if (arg == 0) /* detach */
ipfw_hook(0, AF_INET);
else if (V_fw_enable && ipfw_hook(1, AF_INET) != 0) {
error = ENOENT; /* see ip_fw_pfil.c::ipfw_hook() */
printf("ipfw_hook() error\n");
}
#ifdef INET6
if (arg == 0) /* detach */
ipfw_hook(0, AF_INET6);
else if (V_fw6_enable && ipfw_hook(1, AF_INET6) != 0) {
error = ENOENT;
printf("ipfw6_hook() error\n");
}
#endif
if (arg == 0) /* detach */
ipfw_hook(0, AF_LINK);
else if (V_fwlink_enable && ipfw_hook(1, AF_LINK) != 0) {
error = ENOENT;
printf("ipfw_link_hook() error\n");
}
return error;
}
int
ipfw_chg_hook(SYSCTL_HANDLER_ARGS)
{
int newval;
int error;
int af;
if (arg1 == &V_fw_enable)
af = AF_INET;
#ifdef INET6
else if (arg1 == &V_fw6_enable)
af = AF_INET6;
#endif
else if (arg1 == &V_fwlink_enable)
af = AF_LINK;
else
return (EINVAL);
newval = *(int *)arg1;
/* Handle sysctl change */
error = sysctl_handle_int(oidp, &newval, 0, req);
if (error)
return (error);
/* Formalize new value */
newval = (newval) ? 1 : 0;
if (*(int *)arg1 == newval)
return (0);
error = ipfw_hook(newval, af);
if (error)
return (error);
*(int *)arg1 = newval;
return (0);
}
/* end of file */

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,234 +0,0 @@
/*-
* Copyright (c) 2002-2009 Luigi Rizzo, Universita` di Pisa
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _IPFW2_TABLE_H
#define _IPFW2_TABLE_H
/*
* Internal constants and data structures used by ipfw tables
* not meant to be exported outside the kernel.
*/
#ifdef _KERNEL
struct table_algo;
struct tables_config {
struct namedobj_instance *namehash;
struct namedobj_instance *valhash;
uint32_t val_size;
uint32_t algo_count;
struct table_algo *algo[256];
struct table_algo *def_algo[IPFW_TABLE_MAXTYPE + 1];
TAILQ_HEAD(op_state_l,op_state) state_list;
};
#define CHAIN_TO_TCFG(chain) ((struct tables_config *)(chain)->tblcfg)
struct table_info {
table_lookup_t *lookup; /* Lookup function */
void *state; /* Lookup radix/other structure */
void *xstate; /* eXtended state */
u_long data; /* Hints for given func */
};
struct table_value;
struct tentry_info {
void *paddr;
struct table_value *pvalue;
void *ptv; /* Temporary field to hold obj */
uint8_t masklen; /* mask length */
uint8_t subtype;
uint16_t flags; /* record flags */
uint32_t value; /* value index */
};
#define TEI_FLAGS_UPDATE 0x0001 /* Add or update rec if exists */
#define TEI_FLAGS_UPDATED 0x0002 /* Entry has been updated */
#define TEI_FLAGS_COMPAT 0x0004 /* Called from old ABI */
#define TEI_FLAGS_DONTADD 0x0008 /* Do not create new rec */
#define TEI_FLAGS_ADDED 0x0010 /* Entry was added */
#define TEI_FLAGS_DELETED 0x0020 /* Entry was deleted */
#define TEI_FLAGS_LIMIT 0x0040 /* Limit was hit */
#define TEI_FLAGS_ERROR 0x0080 /* Unknown request error */
#define TEI_FLAGS_NOTFOUND 0x0100 /* Entry was not found */
#define TEI_FLAGS_EXISTS 0x0200 /* Entry already exists */
typedef int (ta_init)(struct ip_fw_chain *ch, void **ta_state,
struct table_info *ti, char *data, uint8_t tflags);
typedef void (ta_destroy)(void *ta_state, struct table_info *ti);
typedef int (ta_prepare_add)(struct ip_fw_chain *ch, struct tentry_info *tei,
void *ta_buf);
typedef int (ta_prepare_del)(struct ip_fw_chain *ch, struct tentry_info *tei,
void *ta_buf);
typedef int (ta_add)(void *ta_state, struct table_info *ti,
struct tentry_info *tei, void *ta_buf, uint32_t *pnum);
typedef int (ta_del)(void *ta_state, struct table_info *ti,
struct tentry_info *tei, void *ta_buf, uint32_t *pnum);
typedef void (ta_flush_entry)(struct ip_fw_chain *ch, struct tentry_info *tei,
void *ta_buf);
typedef int (ta_need_modify)(void *ta_state, struct table_info *ti,
uint32_t count, uint64_t *pflags);
typedef int (ta_prepare_mod)(void *ta_buf, uint64_t *pflags);
typedef int (ta_fill_mod)(void *ta_state, struct table_info *ti,
void *ta_buf, uint64_t *pflags);
typedef void (ta_modify)(void *ta_state, struct table_info *ti,
void *ta_buf, uint64_t pflags);
typedef void (ta_flush_mod)(void *ta_buf);
typedef void (ta_change_ti)(void *ta_state, struct table_info *ti);
typedef void (ta_print_config)(void *ta_state, struct table_info *ti, char *buf,
size_t bufsize);
typedef int ta_foreach_f(void *node, void *arg);
typedef void ta_foreach(void *ta_state, struct table_info *ti, ta_foreach_f *f,
void *arg);
typedef int ta_dump_tentry(void *ta_state, struct table_info *ti, void *e,
ipfw_obj_tentry *tent);
typedef int ta_find_tentry(void *ta_state, struct table_info *ti,
ipfw_obj_tentry *tent);
typedef void ta_dump_tinfo(void *ta_state, struct table_info *ti,
ipfw_ta_tinfo *tinfo);
typedef uint32_t ta_get_count(void *ta_state, struct table_info *ti);
struct table_algo {
char name[16];
uint32_t idx;
uint32_t type;
uint32_t refcnt;
uint32_t flags;
uint32_t vlimit;
size_t ta_buf_size;
ta_init *init;
ta_destroy *destroy;
ta_prepare_add *prepare_add;
ta_prepare_del *prepare_del;
ta_add *add;
ta_del *del;
ta_flush_entry *flush_entry;
ta_find_tentry *find_tentry;
ta_need_modify *need_modify;
ta_prepare_mod *prepare_mod;
ta_fill_mod *fill_mod;
ta_modify *modify;
ta_flush_mod *flush_mod;
ta_change_ti *change_ti;
ta_foreach *foreach;
ta_dump_tentry *dump_tentry;
ta_print_config *print_config;
ta_dump_tinfo *dump_tinfo;
ta_get_count *get_count;
};
#define TA_FLAG_DEFAULT 0x01 /* Algo is default for given type */
#define TA_FLAG_READONLY 0x02 /* Algo does not support modifications*/
#define TA_FLAG_EXTCOUNTER 0x04 /* Algo has external counter available*/
int ipfw_add_table_algo(struct ip_fw_chain *ch, struct table_algo *ta,
size_t size, int *idx);
void ipfw_del_table_algo(struct ip_fw_chain *ch, int idx);
void ipfw_table_algo_init(struct ip_fw_chain *chain);
void ipfw_table_algo_destroy(struct ip_fw_chain *chain);
MALLOC_DECLARE(M_IPFW_TBL);
/* Exported to support legacy opcodes */
int add_table_entry(struct ip_fw_chain *ch, struct tid_info *ti,
struct tentry_info *tei, uint8_t flags, uint32_t count);
int del_table_entry(struct ip_fw_chain *ch, struct tid_info *ti,
struct tentry_info *tei, uint8_t flags, uint32_t count);
int flush_table(struct ip_fw_chain *ch, struct tid_info *ti);
void ipfw_import_table_value_legacy(uint32_t value, struct table_value *v);
uint32_t ipfw_export_table_value_legacy(struct table_value *v);
int ipfw_get_table_size(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
struct sockopt_data *sd);
/* ipfw_table_value.c functions */
struct table_config;
struct tableop_state;
void ipfw_table_value_init(struct ip_fw_chain *ch, int first);
void ipfw_table_value_destroy(struct ip_fw_chain *ch, int last);
int ipfw_link_table_values(struct ip_fw_chain *ch, struct tableop_state *ts);
void ipfw_garbage_table_values(struct ip_fw_chain *ch, struct table_config *tc,
struct tentry_info *tei, uint32_t count, int rollback);
void ipfw_import_table_value_v1(ipfw_table_value *iv);
void ipfw_export_table_value_v1(struct table_value *v, ipfw_table_value *iv);
void ipfw_unref_table_values(struct ip_fw_chain *ch, struct table_config *tc,
struct table_algo *ta, void *astate, struct table_info *ti);
void rollback_table_values(struct tableop_state *ts);
int ipfw_rewrite_table_uidx(struct ip_fw_chain *chain,
struct rule_check_info *ci);
int ipfw_mark_table_kidx(struct ip_fw_chain *chain, struct ip_fw *rule,
uint32_t *bmask);
int ipfw_export_table_ntlv(struct ip_fw_chain *ch, uint16_t kidx,
struct sockopt_data *sd);
void ipfw_unref_rule_tables(struct ip_fw_chain *chain, struct ip_fw *rule);
struct namedobj_instance *ipfw_get_table_objhash(struct ip_fw_chain *ch);
/* utility functions */
int ipfw_move_tables_sets(struct ip_fw_chain *ch, ipfw_range_tlv *rt,
uint32_t new_set);
void ipfw_swap_tables_sets(struct ip_fw_chain *ch, uint32_t old_set,
uint32_t new_set, int mv);
int ipfw_foreach_table_tentry(struct ip_fw_chain *ch, uint16_t kidx,
ta_foreach_f f, void *arg);
/* internal functions */
void tc_ref(struct table_config *tc);
void tc_unref(struct table_config *tc);
struct op_state;
typedef void (op_rollback_f)(void *object, struct op_state *state);
struct op_state {
TAILQ_ENTRY(op_state) next; /* chain link */
op_rollback_f *func;
};
struct tableop_state {
struct op_state opstate;
struct ip_fw_chain *ch;
struct table_config *tc;
struct table_algo *ta;
struct tentry_info *tei;
uint32_t count;
uint32_t vmask;
int vshared;
int modified;
};
void add_toperation_state(struct ip_fw_chain *ch, struct tableop_state *ts);
void del_toperation_state(struct ip_fw_chain *ch, struct tableop_state *ts);
void rollback_toperation_state(struct ip_fw_chain *ch, void *object);
/* Legacy interfaces */
int ipfw_count_table(struct ip_fw_chain *ch, struct tid_info *ti,
uint32_t *cnt);
int ipfw_count_xtable(struct ip_fw_chain *ch, struct tid_info *ti,
uint32_t *cnt);
int ipfw_dump_table_legacy(struct ip_fw_chain *ch, struct tid_info *ti,
ipfw_table *tbl);
#endif /* _KERNEL */
#endif /* _IPFW2_TABLE_H */

File diff suppressed because it is too large Load Diff

View File

@@ -1,811 +0,0 @@
#include <machine/rtems-bsd-kernel-space.h>
/*-
* Copyright (c) 2014 Yandex LLC
* Copyright (c) 2014 Alexander V. Chernikov
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
/*
* Multi-field value support for ipfw tables.
*
* This file contains necessary functions to convert
* large multi-field values into u32 indices suitable to be fed
* to various table algorithms. Other machinery like proper refcounting,
* internal structures resizing are also kept here.
*/
#include <rtems/bsd/local/opt_ipfw.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/kernel.h>
#include <sys/hash.h>
#include <sys/lock.h>
#include <sys/rwlock.h>
#include <sys/rmlock.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/queue.h>
#include <net/if.h> /* ip_fw.h requires IFNAMSIZ */
#include <net/pfil.h>
#include <netinet/in.h>
#include <netinet/ip_var.h> /* struct ipfw_rule_ref */
#include <netinet/ip_fw.h>
#include <netpfil/ipfw/ip_fw_private.h>
#include <netpfil/ipfw/ip_fw_table.h>
static uint32_t hash_table_value(struct namedobj_instance *ni, const void *key,
uint32_t kopt);
static int cmp_table_value(struct named_object *no, const void *key,
uint32_t kopt);
static int list_table_values(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
struct sockopt_data *sd);
static struct ipfw_sopt_handler scodes[] = {
{ IP_FW_TABLE_VLIST, 0, HDIR_GET, list_table_values },
};
#define CHAIN_TO_VI(chain) (CHAIN_TO_TCFG(chain)->valhash)
struct table_val_link
{
struct named_object no;
struct table_value *pval; /* Pointer to real table value */
};
#define VALDATA_START_SIZE 64 /* Allocate 64-items array by default */
struct vdump_args {
struct ip_fw_chain *ch;
struct sockopt_data *sd;
struct table_value *pval;
int error;
};
static uint32_t
hash_table_value(struct namedobj_instance *ni, const void *key, uint32_t kopt)
{
return (hash32_buf(key, 56, 0));
}
static int
cmp_table_value(struct named_object *no, const void *key, uint32_t kopt)
{
return (memcmp(((struct table_val_link *)no)->pval, key, 56));
}
static void
mask_table_value(struct table_value *src, struct table_value *dst,
uint32_t mask)
{
#define _MCPY(f, b) if ((mask & (b)) != 0) { dst->f = src->f; }
memset(dst, 0, sizeof(*dst));
_MCPY(tag, IPFW_VTYPE_TAG);
_MCPY(pipe, IPFW_VTYPE_PIPE);
_MCPY(divert, IPFW_VTYPE_DIVERT);
_MCPY(skipto, IPFW_VTYPE_SKIPTO);
_MCPY(netgraph, IPFW_VTYPE_NETGRAPH);
_MCPY(fib, IPFW_VTYPE_FIB);
_MCPY(nat, IPFW_VTYPE_NAT);
_MCPY(dscp, IPFW_VTYPE_DSCP);
_MCPY(nh4, IPFW_VTYPE_NH4);
_MCPY(nh6, IPFW_VTYPE_NH6);
_MCPY(zoneid, IPFW_VTYPE_NH6);
#undef _MCPY
}
static void
get_value_ptrs(struct ip_fw_chain *ch, struct table_config *tc, int vshared,
struct table_value **ptv, struct namedobj_instance **pvi)
{
struct table_value *pval;
struct namedobj_instance *vi;
if (vshared != 0) {
pval = (struct table_value *)ch->valuestate;
vi = CHAIN_TO_VI(ch);
} else {
pval = NULL;
vi = NULL;
//pval = (struct table_value *)&tc->ti.data;
}
if (ptv != NULL)
*ptv = pval;
if (pvi != NULL)
*pvi = vi;
}
/*
* Update pointers to real vaues after @pval change.
*/
static int
update_tvalue(struct namedobj_instance *ni, struct named_object *no, void *arg)
{
struct vdump_args *da;
struct table_val_link *ptv;
struct table_value *pval;
da = (struct vdump_args *)arg;
ptv = (struct table_val_link *)no;
pval = da->pval;
ptv->pval = &pval[ptv->no.kidx];
ptv->no.name = (char *)&pval[ptv->no.kidx];
return (0);
}
/*
* Grows value storage shared among all tables.
* Drops/reacquires UH locks.
* Notifies other running adds on @ch shared storage resize.
* Note function does not guarantee that free space
* will be available after invocation, so one caller needs
* to roll cycle himself.
*
* Returns 0 if case of no errors.
*/
static int
resize_shared_value_storage(struct ip_fw_chain *ch)
{
struct tables_config *tcfg;
struct namedobj_instance *vi;
struct table_value *pval, *valuestate, *old_valuestate;
void *new_idx;
struct vdump_args da;
int new_blocks;
int val_size, val_size_old;
IPFW_UH_WLOCK_ASSERT(ch);
valuestate = NULL;
new_idx = NULL;
pval = (struct table_value *)ch->valuestate;
vi = CHAIN_TO_VI(ch);
tcfg = CHAIN_TO_TCFG(ch);
val_size = tcfg->val_size * 2;
if (val_size == (1 << 30))
return (ENOSPC);
IPFW_UH_WUNLOCK(ch);
valuestate = malloc(sizeof(struct table_value) * val_size, M_IPFW,
M_WAITOK | M_ZERO);
ipfw_objhash_bitmap_alloc(val_size, (void *)&new_idx,
&new_blocks);
IPFW_UH_WLOCK(ch);
/*
* Check if we still need to resize
*/
if (tcfg->val_size >= val_size)
goto done;
/* Update pointers and notify everyone we're changing @ch */
pval = (struct table_value *)ch->valuestate;
rollback_toperation_state(ch, ch);
/* Good. Let's merge */
memcpy(valuestate, pval, sizeof(struct table_value) * tcfg->val_size);
ipfw_objhash_bitmap_merge(CHAIN_TO_VI(ch), &new_idx, &new_blocks);
IPFW_WLOCK(ch);
/* Change pointers */
old_valuestate = ch->valuestate;
ch->valuestate = valuestate;
valuestate = old_valuestate;
ipfw_objhash_bitmap_swap(CHAIN_TO_VI(ch), &new_idx, &new_blocks);
val_size_old = tcfg->val_size;
tcfg->val_size = val_size;
val_size = val_size_old;
IPFW_WUNLOCK(ch);
/* Update pointers to reflect resize */
memset(&da, 0, sizeof(da));
da.pval = (struct table_value *)ch->valuestate;
ipfw_objhash_foreach(vi, update_tvalue, &da);
done:
free(valuestate, M_IPFW);
ipfw_objhash_bitmap_free(new_idx, new_blocks);
return (0);
}
/*
* Drops reference for table value with index @kidx, stored in @pval and
* @vi. Frees value if it has no references.
*/
static void
unref_table_value(struct namedobj_instance *vi, struct table_value *pval,
uint32_t kidx)
{
struct table_val_link *ptvl;
KASSERT(pval[kidx].refcnt > 0, ("Refcount is 0 on kidx %d", kidx));
if (--pval[kidx].refcnt > 0)
return;
/* Last reference, delete item */
ptvl = (struct table_val_link *)ipfw_objhash_lookup_kidx(vi, kidx);
KASSERT(ptvl != NULL, ("lookup on value kidx %d failed", kidx));
ipfw_objhash_del(vi, &ptvl->no);
ipfw_objhash_free_idx(vi, kidx);
free(ptvl, M_IPFW);
}
struct flush_args {
struct ip_fw_chain *ch;
struct table_algo *ta;
struct table_info *ti;
void *astate;
ipfw_obj_tentry tent;
};
static int
unref_table_value_cb(void *e, void *arg)
{
struct flush_args *fa;
struct ip_fw_chain *ch;
struct table_algo *ta;
ipfw_obj_tentry *tent;
int error;
fa = (struct flush_args *)arg;
ta = fa->ta;
memset(&fa->tent, 0, sizeof(fa->tent));
tent = &fa->tent;
error = ta->dump_tentry(fa->astate, fa->ti, e, tent);
if (error != 0)
return (error);
ch = fa->ch;
unref_table_value(CHAIN_TO_VI(ch),
(struct table_value *)ch->valuestate, tent->v.kidx);
return (0);
}
/*
* Drop references for each value used in @tc.
*/
void
ipfw_unref_table_values(struct ip_fw_chain *ch, struct table_config *tc,
struct table_algo *ta, void *astate, struct table_info *ti)
{
struct flush_args fa;
IPFW_UH_WLOCK_ASSERT(ch);
memset(&fa, 0, sizeof(fa));
fa.ch = ch;
fa.ta = ta;
fa.astate = astate;
fa.ti = ti;
ta->foreach(astate, ti, unref_table_value_cb, &fa);
}
/*
* Table operation state handler.
* Called when we are going to change something in @tc which
* may lead to inconsistencies in on-going table data addition.
*
* Here we rollback all already committed state (table values, currently)
* and set "modified" field to non-zero value to indicate
* that we need to restart original operation.
*/
void
rollback_table_values(struct tableop_state *ts)
{
struct ip_fw_chain *ch;
struct table_value *pval;
struct tentry_info *ptei;
struct namedobj_instance *vi;
int i;
ch = ts->ch;
IPFW_UH_WLOCK_ASSERT(ch);
/* Get current table value pointer */
get_value_ptrs(ch, ts->tc, ts->vshared, &pval, &vi);
for (i = 0; i < ts->count; i++) {
ptei = &ts->tei[i];
if (ptei->value == 0)
continue;
unref_table_value(vi, pval, ptei->value);
}
}
/*
* Allocate new value index in either shared or per-table array.
* Function may drop/reacquire UH lock.
*
* Returns 0 on success.
*/
static int
alloc_table_vidx(struct ip_fw_chain *ch, struct tableop_state *ts,
struct namedobj_instance *vi, uint16_t *pvidx)
{
int error, vlimit;
uint16_t vidx;
IPFW_UH_WLOCK_ASSERT(ch);
error = ipfw_objhash_alloc_idx(vi, &vidx);
if (error != 0) {
/*
* We need to resize array. This involves
* lock/unlock, so we need to check "modified"
* state.
*/
ts->opstate.func(ts->tc, &ts->opstate);
error = resize_shared_value_storage(ch);
return (error); /* ts->modified should be set, we will restart */
}
vlimit = ts->ta->vlimit;
if (vlimit != 0 && vidx >= vlimit) {
/*
* Algorithm is not able to store given index.
* We have to rollback state, start using
* per-table value array or return error
* if we're already using it.
*
* TODO: do not rollback state if
* atomicity is not required.
*/
if (ts->vshared != 0) {
/* shared -> per-table */
return (ENOSPC); /* TODO: proper error */
}
/* per-table. Fail for now. */
return (ENOSPC); /* TODO: proper error */
}
*pvidx = vidx;
return (0);
}
/*
* Drops value reference for unused values (updates, deletes, partially
* successful adds or rollbacks).
*/
void
ipfw_garbage_table_values(struct ip_fw_chain *ch, struct table_config *tc,
struct tentry_info *tei, uint32_t count, int rollback)
{
int i;
struct tentry_info *ptei;
struct table_value *pval;
struct namedobj_instance *vi;
/*
* We have two slightly different ADD cases here:
* either (1) we are successful / partially successful,
* in that case we need
* * to ignore ADDED entries values
* * rollback every other values (either UPDATED since
* old value has been stored there, or some failure like
* EXISTS or LIMIT or simply "ignored" case.
*
* (2): atomic rollback of partially successful operation
* in that case we simply need to unref all entries.
*
* DELETE case is simpler: no atomic support there, so
* we simply unref all non-zero values.
*/
/*
* Get current table value pointers.
* XXX: Properly read vshared
*/
get_value_ptrs(ch, tc, 1, &pval, &vi);
for (i = 0; i < count; i++) {
ptei = &tei[i];
if (ptei->value == 0) {
/*
* We may be deleting non-existing record.
* Skip.
*/
continue;
}
if ((ptei->flags & TEI_FLAGS_ADDED) != 0 && rollback == 0) {
ptei->value = 0;
continue;
}
unref_table_value(vi, pval, ptei->value);
ptei->value = 0;
}
}
/*
* Main function used to link values of entries going to be added,
* to the index. Since we may perform many UH locks drops/acquires,
* handle changes by checking tablestate "modified" field.
*
* Success: return 0.
*/
int
ipfw_link_table_values(struct ip_fw_chain *ch, struct tableop_state *ts)
{
int error, i, found;
struct namedobj_instance *vi;
struct table_config *tc;
struct tentry_info *tei, *ptei;
uint32_t count, vlimit;
uint16_t vidx;
struct table_val_link *ptv;
struct table_value tval, *pval;
/*
* Stage 1: reference all existing values and
* save their indices.
*/
IPFW_UH_WLOCK_ASSERT(ch);
get_value_ptrs(ch, ts->tc, ts->vshared, &pval, &vi);
error = 0;
found = 0;
vlimit = ts->ta->vlimit;
vidx = 0;
tc = ts->tc;
tei = ts->tei;
count = ts->count;
for (i = 0; i < count; i++) {
ptei = &tei[i];
ptei->value = 0; /* Ensure value is always 0 in the beginning */
mask_table_value(ptei->pvalue, &tval, ts->vmask);
ptv = (struct table_val_link *)ipfw_objhash_lookup_name(vi, 0,
(char *)&tval);
if (ptv == NULL)
continue;
/* Deal with vlimit later */
if (vlimit > 0 && vlimit <= ptv->no.kidx)
continue;
/* Value found. Bump refcount */
ptv->pval->refcnt++;
ptei->value = ptv->no.kidx;
found++;
}
if (ts->count == found) {
/* We've found all values , no need ts create new ones */
return (0);
}
/*
* we have added some state here, let's attach operation
* state ts the list ts be able ts rollback if necessary.
*/
add_toperation_state(ch, ts);
/* Ensure table won't disappear */
tc_ref(tc);
IPFW_UH_WUNLOCK(ch);
/*
* Stage 2: allocate objects for non-existing values.
*/
for (i = 0; i < count; i++) {
ptei = &tei[i];
if (ptei->value != 0)
continue;
if (ptei->ptv != NULL)
continue;
ptei->ptv = malloc(sizeof(struct table_val_link), M_IPFW,
M_WAITOK | M_ZERO);
}
/*
* Stage 3: allocate index numbers for new values
* and link them to index.
*/
IPFW_UH_WLOCK(ch);
tc_unref(tc);
del_toperation_state(ch, ts);
if (ts->modified != 0) {
/*
* In general, we should free all state/indexes here
* and return. However, we keep allocated state instead
* to ensure we achieve some progress on each restart.
*/
return (0);
}
KASSERT(pval == ch->valuestate, ("resize_storage() notify failure"));
/* Let's try to link values */
for (i = 0; i < count; i++) {
ptei = &tei[i];
/* Check if record has appeared */
mask_table_value(ptei->pvalue, &tval, ts->vmask);
ptv = (struct table_val_link *)ipfw_objhash_lookup_name(vi, 0,
(char *)&tval);
if (ptv != NULL) {
ptv->pval->refcnt++;
ptei->value = ptv->no.kidx;
continue;
}
/* May perform UH unlock/lock */
error = alloc_table_vidx(ch, ts, vi, &vidx);
if (error != 0) {
ts->opstate.func(ts->tc, &ts->opstate);
return (error);
}
/* value storage resize has happened, return */
if (ts->modified != 0)
return (0);
/* Finally, we have allocated valid index, let's add entry */
ptei->value = vidx;
ptv = (struct table_val_link *)ptei->ptv;
ptei->ptv = NULL;
ptv->no.kidx = vidx;
ptv->no.name = (char *)&pval[vidx];
ptv->pval = &pval[vidx];
memcpy(ptv->pval, &tval, sizeof(struct table_value));
pval[vidx].refcnt = 1;
ipfw_objhash_add(vi, &ptv->no);
}
return (0);
}
/*
* Compatibility function used to import data from old
* IP_FW_TABLE_ADD / IP_FW_TABLE_XADD opcodes.
*/
void
ipfw_import_table_value_legacy(uint32_t value, struct table_value *v)
{
memset(v, 0, sizeof(*v));
v->tag = value;
v->pipe = value;
v->divert = value;
v->skipto = value;
v->netgraph = value;
v->fib = value;
v->nat = value;
v->nh4 = value; /* host format */
v->dscp = value;
v->limit = value;
}
/*
* Export data to legacy table dumps opcodes.
*/
uint32_t
ipfw_export_table_value_legacy(struct table_value *v)
{
/*
* TODO: provide more compatibility depending on
* vmask value.
*/
return (v->tag);
}
/*
* Imports table value from current userland format.
* Saves value in kernel format to the same place.
*/
void
ipfw_import_table_value_v1(ipfw_table_value *iv)
{
struct table_value v;
memset(&v, 0, sizeof(v));
v.tag = iv->tag;
v.pipe = iv->pipe;
v.divert = iv->divert;
v.skipto = iv->skipto;
v.netgraph = iv->netgraph;
v.fib = iv->fib;
v.nat = iv->nat;
v.dscp = iv->dscp;
v.nh4 = iv->nh4;
v.nh6 = iv->nh6;
v.limit = iv->limit;
v.zoneid = iv->zoneid;
memcpy(iv, &v, sizeof(ipfw_table_value));
}
/*
* Export real table value @v to current userland format.
* Note that @v and @piv may point to the same memory.
*/
void
ipfw_export_table_value_v1(struct table_value *v, ipfw_table_value *piv)
{
ipfw_table_value iv;
memset(&iv, 0, sizeof(iv));
iv.tag = v->tag;
iv.pipe = v->pipe;
iv.divert = v->divert;
iv.skipto = v->skipto;
iv.netgraph = v->netgraph;
iv.fib = v->fib;
iv.nat = v->nat;
iv.dscp = v->dscp;
iv.limit = v->limit;
iv.nh4 = v->nh4;
iv.nh6 = v->nh6;
iv.zoneid = v->zoneid;
memcpy(piv, &iv, sizeof(iv));
}
/*
* Exports real value data into ipfw_table_value structure.
* Utilizes "spare1" field to store kernel index.
*/
static int
dump_tvalue(struct namedobj_instance *ni, struct named_object *no, void *arg)
{
struct vdump_args *da;
struct table_val_link *ptv;
struct table_value *v;
da = (struct vdump_args *)arg;
ptv = (struct table_val_link *)no;
v = (struct table_value *)ipfw_get_sopt_space(da->sd, sizeof(*v));
/* Out of memory, returning */
if (v == NULL) {
da->error = ENOMEM;
return (ENOMEM);
}
memcpy(v, ptv->pval, sizeof(*v));
v->spare1 = ptv->no.kidx;
return (0);
}
/*
* Dumps all shared/table value data
* Data layout (v1)(current):
* Request: [ ipfw_obj_lheader ], size = ipfw_obj_lheader.size
* Reply: [ ipfw_obj_lheader ipfw_table_value x N ]
*
* Returns 0 on success
*/
static int
list_table_values(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
struct sockopt_data *sd)
{
struct _ipfw_obj_lheader *olh;
struct namedobj_instance *vi;
struct vdump_args da;
uint32_t count, size;
olh = (struct _ipfw_obj_lheader *)ipfw_get_sopt_header(sd,sizeof(*olh));
if (olh == NULL)
return (EINVAL);
if (sd->valsize < olh->size)
return (EINVAL);
IPFW_UH_RLOCK(ch);
vi = CHAIN_TO_VI(ch);
count = ipfw_objhash_count(vi);
size = count * sizeof(ipfw_table_value) + sizeof(ipfw_obj_lheader);
/* Fill in header regadless of buffer size */
olh->count = count;
olh->objsize = sizeof(ipfw_table_value);
if (size > olh->size) {
olh->size = size;
IPFW_UH_RUNLOCK(ch);
return (ENOMEM);
}
olh->size = size;
/*
* Do the actual value dump
*/
memset(&da, 0, sizeof(da));
da.ch = ch;
da.sd = sd;
ipfw_objhash_foreach(vi, dump_tvalue, &da);
IPFW_UH_RUNLOCK(ch);
return (0);
}
void
ipfw_table_value_init(struct ip_fw_chain *ch, int first)
{
struct tables_config *tcfg;
ch->valuestate = malloc(VALDATA_START_SIZE * sizeof(struct table_value),
M_IPFW, M_WAITOK | M_ZERO);
tcfg = ch->tblcfg;
tcfg->val_size = VALDATA_START_SIZE;
tcfg->valhash = ipfw_objhash_create(tcfg->val_size);
ipfw_objhash_set_funcs(tcfg->valhash, hash_table_value,
cmp_table_value);
IPFW_ADD_SOPT_HANDLER(first, scodes);
}
static int
destroy_value(struct namedobj_instance *ni, struct named_object *no,
void *arg)
{
free(no, M_IPFW);
return (0);
}
void
ipfw_table_value_destroy(struct ip_fw_chain *ch, int last)
{
IPFW_DEL_SOPT_HANDLER(last, scodes);
free(ch->valuestate, M_IPFW);
ipfw_objhash_foreach(CHAIN_TO_VI(ch), destroy_value, ch);
ipfw_objhash_destroy(CHAIN_TO_VI(ch));
}

View File

@@ -1,131 +0,0 @@
#include <machine/rtems-bsd-kernel-space.h>
/*-
* Copyright (c) 2015-2016 Yandex LLC
* Copyright (c) 2015-2016 Andrey V. Elsukov <ae@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/rwlock.h>
#include <sys/socket.h>
#include <sys/sysctl.h>
#include <net/if.h>
#include <net/vnet.h>
#include <netinet/in.h>
#include <netinet/ip_var.h>
#include <netinet/ip_fw.h>
#include <netpfil/ipfw/ip_fw_private.h>
#include <netpfil/ipfw/nat64/ip_fw_nat64.h>
#include <netpfil/ipfw/nat64/nat64_translate.h>
int nat64_debug = 0;
SYSCTL_DECL(_net_inet_ip_fw);
SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, nat64_debug, CTLFLAG_RW,
&nat64_debug, 0, "Debug level for NAT64 module");
int nat64_allow_private = 0;
SYSCTL_INT(_net_inet_ip_fw, OID_AUTO, nat64_allow_private, CTLFLAG_RW,
&nat64_allow_private, 0,
"Allow use of non-global IPv4 addresses with NAT64");
static int
vnet_ipfw_nat64_init(const void *arg __unused)
{
struct ip_fw_chain *ch;
int first, error;
ch = &V_layer3_chain;
first = IS_DEFAULT_VNET(curvnet) ? 1: 0;
error = nat64stl_init(ch, first);
if (error != 0)
return (error);
error = nat64lsn_init(ch, first);
if (error != 0) {
nat64stl_uninit(ch, first);
return (error);
}
return (0);
}
static int
vnet_ipfw_nat64_uninit(const void *arg __unused)
{
struct ip_fw_chain *ch;
int last;
ch = &V_layer3_chain;
last = IS_DEFAULT_VNET(curvnet) ? 1: 0;
nat64stl_uninit(ch, last);
nat64lsn_uninit(ch, last);
return (0);
}
static int
ipfw_nat64_modevent(module_t mod, int type, void *unused)
{
switch (type) {
case MOD_LOAD:
case MOD_UNLOAD:
break;
default:
return (EOPNOTSUPP);
}
return (0);
}
static moduledata_t ipfw_nat64_mod = {
"ipfw_nat64",
ipfw_nat64_modevent,
0
};
/* Define startup order. */
#define IPFW_NAT64_SI_SUB_FIREWALL SI_SUB_PROTO_IFATTACHDOMAIN
#define IPFW_NAT64_MODEVENT_ORDER (SI_ORDER_ANY - 128) /* after ipfw */
#define IPFW_NAT64_MODULE_ORDER (IPFW_NAT64_MODEVENT_ORDER + 1)
#define IPFW_NAT64_VNET_ORDER (IPFW_NAT64_MODEVENT_ORDER + 2)
DECLARE_MODULE(ipfw_nat64, ipfw_nat64_mod, IPFW_NAT64_SI_SUB_FIREWALL,
SI_ORDER_ANY);
MODULE_DEPEND(ipfw_nat64, ipfw, 3, 3, 3);
MODULE_VERSION(ipfw_nat64, 1);
VNET_SYSINIT(vnet_ipfw_nat64_init, IPFW_NAT64_SI_SUB_FIREWALL,
IPFW_NAT64_VNET_ORDER, vnet_ipfw_nat64_init, NULL);
VNET_SYSUNINIT(vnet_ipfw_nat64_uninit, IPFW_NAT64_SI_SUB_FIREWALL,
IPFW_NAT64_VNET_ORDER, vnet_ipfw_nat64_uninit, NULL);

View File

@@ -1,117 +0,0 @@
/*-
* Copyright (c) 2015-2016 Yandex LLC
* Copyright (c) 2015-2016 Andrey V. Elsukov <ae@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _IP_FW_NAT64_H_
#define _IP_FW_NAT64_H_
#define DPRINTF(mask, fmt, ...) \
if (nat64_debug & (mask)) \
printf("NAT64: %s: " fmt "\n", __func__, ## __VA_ARGS__)
#define DP_GENERIC 0x0001
#define DP_OBJ 0x0002
#define DP_JQUEUE 0x0004
#define DP_STATE 0x0008
#define DP_DROPS 0x0010
#define DP_ALL 0xFFFF
extern int nat64_debug;
#if 0
#define NAT64NOINLINE __noinline
#else
#define NAT64NOINLINE
#endif
int nat64stl_init(struct ip_fw_chain *ch, int first);
void nat64stl_uninit(struct ip_fw_chain *ch, int last);
int nat64lsn_init(struct ip_fw_chain *ch, int first);
void nat64lsn_uninit(struct ip_fw_chain *ch, int last);
struct ip_fw_nat64_stats {
counter_u64_t opcnt64; /* 6to4 of packets translated */
counter_u64_t opcnt46; /* 4to6 of packets translated */
counter_u64_t ofrags; /* number of fragments generated */
counter_u64_t ifrags; /* number of fragments received */
counter_u64_t oerrors; /* number of output errors */
counter_u64_t noroute4;
counter_u64_t noroute6;
counter_u64_t nomatch4; /* No addr/port match */
counter_u64_t noproto; /* Protocol not supported */
counter_u64_t nomem; /* mbufs allocation failed */
counter_u64_t dropped; /* number of packets silently
* dropped due to some errors/
* unsupported/etc.
*/
counter_u64_t jrequests; /* number of jobs requests queued */
counter_u64_t jcalls; /* number of jobs handler calls */
counter_u64_t jhostsreq; /* number of hosts requests */
counter_u64_t jportreq;
counter_u64_t jhostfails;
counter_u64_t jportfails;
counter_u64_t jmaxlen;
counter_u64_t jnomem;
counter_u64_t jreinjected;
counter_u64_t screated;
counter_u64_t sdeleted;
counter_u64_t spgcreated;
counter_u64_t spgdeleted;
};
#define IPFW_NAT64_VERSION 1
#define NAT64STATS (sizeof(struct ip_fw_nat64_stats) / sizeof(uint64_t))
typedef struct _nat64_stats_block {
counter_u64_t stats[NAT64STATS];
} nat64_stats_block;
#define NAT64STAT_ADD(s, f, v) \
counter_u64_add((s)->stats[ \
offsetof(struct ip_fw_nat64_stats, f) / sizeof(uint64_t)], (v))
#define NAT64STAT_INC(s, f) NAT64STAT_ADD(s, f, 1)
#define NAT64STAT_FETCH(s, f) \
counter_u64_fetch((s)->stats[ \
offsetof(struct ip_fw_nat64_stats, f) / sizeof(uint64_t)])
#define L3HDR(_ip, _t) ((_t)((u_int32_t *)(_ip) + (_ip)->ip_hl))
#define TCP(p) ((struct tcphdr *)(p))
#define UDP(p) ((struct udphdr *)(p))
#define ICMP(p) ((struct icmphdr *)(p))
#define ICMP6(p) ((struct icmp6_hdr *)(p))
#define NAT64SKIP 0
#define NAT64RETURN 1
#define NAT64MFREE -1
/* Well-known prefix 64:ff9b::/96 */
#define IPV6_ADDR_INT32_WKPFX htonl(0x64ff9b)
#define IN6_IS_ADDR_WKPFX(a) \
((a)->s6_addr32[0] == IPV6_ADDR_INT32_WKPFX && \
(a)->s6_addr32[1] == 0 && (a)->s6_addr32[2] == 0)
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -1,116 +0,0 @@
/*-
* Copyright (c) 2015-2016 Yandex LLC
* Copyright (c) 2015-2016 Andrey V. Elsukov <ae@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _IP_FW_NAT64_TRANSLATE_H_
#define _IP_FW_NAT64_TRANSLATE_H_
#ifdef RTALLOC_NOLOCK
#define IN_LOOKUP_ROUTE(ro, fib) rtalloc_fib_nolock((ro), 0, (fib))
#define IN6_LOOKUP_ROUTE(ro, fib) in6_rtalloc_nolock((ro), (fib))
#define FREE_ROUTE(ro)
#else
#define IN_LOOKUP_ROUTE(ro, fib) rtalloc_ign_fib((ro), 0, (fib))
#define IN6_LOOKUP_ROUTE(ro, fib) in6_rtalloc((ro), (fib))
#define FREE_ROUTE(ro) RO_RTFREE((ro))
#endif
static inline int
nat64_check_ip6(struct in6_addr *addr)
{
/* XXX: We should really check /8 */
if (addr->s6_addr16[0] == 0 || /* 0000::/8 Reserved by IETF */
IN6_IS_ADDR_MULTICAST(addr) || IN6_IS_ADDR_LINKLOCAL(addr))
return (1);
return (0);
}
extern int nat64_allow_private;
static inline int
nat64_check_private_ip4(in_addr_t ia)
{
if (nat64_allow_private)
return (0);
/* WKPFX must not be used to represent non-global IPv4 addresses */
// if (cfg->flags & NAT64_WKPFX) {
/* IN_PRIVATE */
if ((ia & htonl(0xff000000)) == htonl(0x0a000000) ||
(ia & htonl(0xfff00000)) == htonl(0xac100000) ||
(ia & htonl(0xffff0000)) == htonl(0xc0a80000))
return (1);
/*
* RFC 5735:
* 192.0.0.0/24 - reserved for IETF protocol assignments
* 192.88.99.0/24 - for use as 6to4 relay anycast addresses
* 198.18.0.0/15 - for use in benchmark tests
* 192.0.2.0/24, 198.51.100.0/24, 203.0.113.0/24 - for use
* in documentation and example code
*/
if ((ia & htonl(0xffffff00)) == htonl(0xc0000000) ||
(ia & htonl(0xffffff00)) == htonl(0xc0586300) ||
(ia & htonl(0xfffffe00)) == htonl(0xc6120000) ||
(ia & htonl(0xffffff00)) == htonl(0xc0000200) ||
(ia & htonl(0xfffffe00)) == htonl(0xc6336400) ||
(ia & htonl(0xffffff00)) == htonl(0xcb007100))
return (1);
// }
return (0);
}
static inline int
nat64_check_ip4(in_addr_t ia)
{
/* IN_LOOPBACK */
if ((ia & htonl(0xff000000)) == htonl(0x7f000000))
return (1);
/* IN_LINKLOCAL */
if ((ia & htonl(0xffff0000)) == htonl(0xa9fe0000))
return (1);
/* IN_MULTICAST & IN_EXPERIMENTAL */
if ((ia & htonl(0xe0000000)) == htonl(0xe0000000))
return (1);
return (0);
}
#define nat64_get_ip4(_ip6) ((_ip6)->s6_addr32[3])
#define nat64_set_ip4(_ip6, _ip4) (_ip6)->s6_addr32[3] = (_ip4)
int nat64_getlasthdr(struct mbuf *m, int *offset);
int nat64_do_handle_ip4(struct mbuf *m, struct in6_addr *saddr,
struct in6_addr *daddr, uint16_t lport, nat64_stats_block *stats,
void *logdata);
int nat64_do_handle_ip6(struct mbuf *m, uint32_t aaddr, uint16_t aport,
nat64_stats_block *stats, void *logdata);
int nat64_handle_icmp6(struct mbuf *m, int hlen, uint32_t aaddr, uint16_t aport,
nat64_stats_block *stats, void *logdata);
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -1,351 +0,0 @@
/*-
* Copyright (c) 2015 Yandex LLC
* Copyright (c) 2015 Alexander V. Chernikov <melifaro@FreeBSD.org>
* Copyright (c) 2016 Andrey V. Elsukov <ae@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _IP_FW_NAT64LSN_H_
#define _IP_FW_NAT64LSN_H_
#define NAT64_CHUNK_SIZE_BITS 6 /* 64 ports */
#define NAT64_CHUNK_SIZE (1 << NAT64_CHUNK_SIZE_BITS)
#define NAT64_MIN_PORT 1024
#define NAT64_MIN_CHUNK (NAT64_MIN_PORT >> NAT64_CHUNK_SIZE_BITS)
struct st_ptr {
uint8_t idx; /* index in nh->pg_ptr array.
* NOTE: it starts from 1.
*/
uint8_t off;
};
#define NAT64LSN_MAXPGPTR ((1 << (sizeof(uint8_t) * NBBY)) - 1)
#define NAT64LSN_PGPTRMASKBITS (sizeof(uint64_t) * NBBY)
#define NAT64LSN_PGPTRNMASK (roundup(NAT64LSN_MAXPGPTR, \
NAT64LSN_PGPTRMASKBITS) / NAT64LSN_PGPTRMASKBITS)
struct nat64lsn_portgroup;
/* sizeof(struct nat64lsn_host) = 64 + 64x2 + 8x8 = 256 bytes */
struct nat64lsn_host {
struct rwlock h_lock; /* Host states lock */
struct in6_addr addr;
struct nat64lsn_host *next;
uint16_t timestamp; /* Last altered */
uint16_t hsize; /* ports hash size */
uint16_t pg_used; /* Number of portgroups used */
#define NAT64LSN_REMAININGPG 8 /* Number of remaining PG before
* requesting of new chunk of indexes.
*/
uint16_t pg_allocated; /* Number of portgroups indexes
* allocated.
*/
#define NAT64LSN_HSIZE 64
struct st_ptr phash[NAT64LSN_HSIZE]; /* XXX: hardcoded size */
/*
* PG indexes are stored in chunks with 32 elements.
* The maximum count is limited to 255 due to st_ptr->idx is uint8_t.
*/
#define NAT64LSN_PGIDX_CHUNK 32
#define NAT64LSN_PGNIDX (roundup(NAT64LSN_MAXPGPTR, \
NAT64LSN_PGIDX_CHUNK) / NAT64LSN_PGIDX_CHUNK)
struct nat64lsn_portgroup **pg_ptr[NAT64LSN_PGNIDX]; /* PG indexes */
};
#define NAT64_RLOCK_ASSERT(h) rw_assert(&(h)->h_lock, RA_RLOCKED)
#define NAT64_WLOCK_ASSERT(h) rw_assert(&(h)->h_lock, RA_WLOCKED)
#define NAT64_RLOCK(h) rw_rlock(&(h)->h_lock)
#define NAT64_RUNLOCK(h) rw_runlock(&(h)->h_lock)
#define NAT64_WLOCK(h) rw_wlock(&(h)->h_lock)
#define NAT64_WUNLOCK(h) rw_wunlock(&(h)->h_lock)
#define NAT64_LOCK(h) NAT64_WLOCK(h)
#define NAT64_UNLOCK(h) NAT64_WUNLOCK(h)
#define NAT64_LOCK_INIT(h) do { \
rw_init(&(h)->h_lock, "NAT64 host lock"); \
} while (0)
#define NAT64_LOCK_DESTROY(h) do { \
rw_destroy(&(h)->h_lock); \
} while (0)
/* Internal proto index */
#define NAT_PROTO_TCP 1
#define NAT_PROTO_UDP 2
#define NAT_PROTO_ICMP 3
#define NAT_MAX_PROTO 4
extern uint8_t nat64lsn_rproto_map[NAT_MAX_PROTO];
VNET_DECLARE(uint16_t, nat64lsn_eid);
#define V_nat64lsn_eid VNET(nat64lsn_eid)
#define IPFW_TLV_NAT64LSN_NAME IPFW_TLV_EACTION_NAME(V_nat64lsn_eid)
/* Timestamp macro */
#define _CT ((int)time_uptime % 65536)
#define SET_AGE(x) (x) = _CT
#define GET_AGE(x) ((_CT >= (x)) ? _CT - (x) : \
(int)65536 + _CT - (x))
#ifdef __LP64__
/* ffsl() is capable of checking 64-bit ints */
#define _FFS64
#endif
/* 16 bytes */
struct nat64lsn_state {
union {
struct {
in_addr_t faddr; /* Remote IPv4 address */
uint16_t fport; /* Remote IPv4 port */
uint16_t lport; /* Local IPv6 port */
}s;
uint64_t hkey;
} u;
uint8_t nat_proto;
uint8_t flags;
uint16_t timestamp;
struct st_ptr cur; /* Index of portgroup in nat64lsn_host */
struct st_ptr next; /* Next entry index */
};
/*
* 1024+32 bytes per 64 states, used to store state
* AND for outside-in state lookup
*/
struct nat64lsn_portgroup {
struct nat64lsn_host *host; /* IPv6 source host info */
in_addr_t aaddr; /* Alias addr, network format */
uint16_t aport; /* Base port */
uint16_t timestamp;
uint8_t nat_proto;
uint8_t spare[3];
uint32_t idx;
#ifdef _FFS64
uint64_t freemask; /* Mask of free entries */
#else
uint32_t freemask[2]; /* Mask of free entries */
#endif
struct nat64lsn_state states[NAT64_CHUNK_SIZE]; /* State storage */
};
#ifdef _FFS64
#define PG_MARK_BUSY_IDX(_pg, _idx) (_pg)->freemask &= ~((uint64_t)1<<(_idx))
#define PG_MARK_FREE_IDX(_pg, _idx) (_pg)->freemask |= ((uint64_t)1<<(_idx))
#define PG_IS_FREE_IDX(_pg, _idx) ((_pg)->freemask & ((uint64_t)1<<(_idx)))
#define PG_IS_BUSY_IDX(_pg, _idx) (PG_IS_FREE_IDX(_pg, _idx) == 0)
#define PG_GET_FREE_IDX(_pg) (ffsll((_pg)->freemask))
#define PG_IS_EMPTY(_pg) (((_pg)->freemask + 1) == 0)
#else
#define PG_MARK_BUSY_IDX(_pg, _idx) \
(_pg)->freemask[(_idx) / 32] &= ~((u_long)1<<((_idx) % 32))
#define PG_MARK_FREE_IDX(_pg, _idx) \
(_pg)->freemask[(_idx) / 32] |= ((u_long)1<<((_idx) % 32))
#define PG_IS_FREE_IDX(_pg, _idx) \
((_pg)->freemask[(_idx) / 32] & ((u_long)1<<((_idx) % 32)))
#define PG_IS_BUSY_IDX(_pg, _idx) (PG_IS_FREE_IDX(_pg, _idx) == 0)
#define PG_GET_FREE_IDX(_pg) _pg_get_free_idx(_pg)
#define PG_IS_EMPTY(_pg) \
((((_pg)->freemask[0] + 1) == 0 && ((_pg)->freemask[1] + 1) == 0))
static inline int
_pg_get_free_idx(const struct nat64lsn_portgroup *pg)
{
int i;
if ((i = ffsl(pg->freemask[0])) != 0)
return (i);
if ((i = ffsl(pg->freemask[1])) != 0)
return (i + 32);
return (0);
}
#endif
TAILQ_HEAD(nat64lsn_job_head, nat64lsn_job_item);
#define NAT64LSN_FLAGSMASK (NAT64_LOG)
struct nat64lsn_cfg {
struct named_object no;
//struct nat64_exthost *ex; /* Pointer to external addr array */
struct nat64lsn_portgroup **pg; /* XXX: array of pointers */
struct nat64lsn_host **ih; /* Host hash */
uint32_t prefix4; /* IPv4 prefix */
uint32_t pmask4; /* IPv4 prefix mask */
uint32_t ihsize; /* IPv6 host hash size */
uint8_t plen4;
uint8_t plen6;
uint8_t nomatch_verdict;/* What to return to ipfw on no-match */
uint8_t nomatch_final; /* Exit outer loop? */
struct in6_addr prefix6; /* IPv6 prefix to embed IPv4 hosts */
uint32_t ihcount; /* Number of items in host hash */
int max_chunks; /* Max chunks per client */
int agg_prefix_len; /* Prefix length to count */
int agg_prefix_max; /* Max hosts per agg prefix */
uint32_t jmaxlen; /* Max jobqueue length */
uint32_t flags;
uint16_t min_chunk; /* Min port group # to use */
uint16_t max_chunk; /* Max port group # to use */
uint16_t nh_delete_delay; /* Stale host delete delay */
uint16_t pg_delete_delay; /* Stale portgroup del delay */
uint16_t st_syn_ttl; /* TCP syn expire */
uint16_t st_close_ttl; /* TCP fin expire */
uint16_t st_estab_ttl; /* TCP established expire */
uint16_t st_udp_ttl; /* UDP expire */
uint16_t st_icmp_ttl; /* ICMP expire */
uint32_t protochunks[NAT_MAX_PROTO];/* Number of chunks used */
struct callout periodic;
struct callout jcallout;
struct ip_fw_chain *ch;
struct vnet *vp;
struct nat64lsn_job_head jhead;
int jlen;
char name[64]; /* Nat instance name */
nat64_stats_block stats;
};
struct nat64lsn_cfg *nat64lsn_init_instance(struct ip_fw_chain *ch,
size_t numaddr);
void nat64lsn_destroy_instance(struct nat64lsn_cfg *cfg);
void nat64lsn_start_instance(struct nat64lsn_cfg *cfg);
void nat64lsn_init_internal(void);
void nat64lsn_uninit_internal(void);
int ipfw_nat64lsn(struct ip_fw_chain *ch, struct ip_fw_args *args,
ipfw_insn *cmd, int *done);
void
nat64lsn_dump_state(const struct nat64lsn_cfg *cfg,
const struct nat64lsn_portgroup *pg, const struct nat64lsn_state *st,
const char *px, int off);
/*
* Portgroup layout
* addr x nat_proto x port_off
*
*/
#define _ADDR_PG_PROTO_COUNT (65536 >> NAT64_CHUNK_SIZE_BITS)
#define _ADDR_PG_COUNT (_ADDR_PG_PROTO_COUNT * NAT_MAX_PROTO)
#define GET_ADDR_IDX(_cfg, _addr) ((_addr) - ((_cfg)->prefix4))
#define __GET_PORTGROUP_IDX(_proto, _port) \
((_proto - 1) * _ADDR_PG_PROTO_COUNT + \
((_port) >> NAT64_CHUNK_SIZE_BITS))
#define _GET_PORTGROUP_IDX(_cfg, _addr, _proto, _port) \
GET_ADDR_IDX(_cfg, _addr) * _ADDR_PG_COUNT + \
__GET_PORTGROUP_IDX(_proto, _port)
#define GET_PORTGROUP(_cfg, _addr, _proto, _port) \
((_cfg)->pg[_GET_PORTGROUP_IDX(_cfg, _addr, _proto, _port)])
#define PORTGROUP_CHUNK(_nh, _idx) \
((_nh)->pg_ptr[(_idx)])
#define PORTGROUP_BYSIDX(_cfg, _nh, _idx) \
(PORTGROUP_CHUNK(_nh, (_idx - 1) / NAT64LSN_PGIDX_CHUNK) \
[((_idx) - 1) % NAT64LSN_PGIDX_CHUNK])
/* Chained hash table */
#define CHT_FIND(_ph, _hsize, _PX, _x, _key) do { \
unsigned int _buck = _PX##hash(_key) & (_hsize - 1); \
_PX##lock(_ph, _buck); \
_x = _PX##first(_ph, _buck); \
for ( ; _x != NULL; _x = _PX##next(_x)) { \
if (_PX##cmp(_key, _PX##val(_x))) \
break; \
} \
if (_x == NULL) \
_PX##unlock(_ph, _buck); \
} while(0)
#define CHT_UNLOCK_BUCK(_ph, _PX, _buck) \
_PX##unlock(_ph, _buck);
#define CHT_UNLOCK_KEY(_ph, _hsize, _PX, _key) do { \
unsigned int _buck = _PX##hash(_key) & (_hsize - 1); \
_PX##unlock(_ph, _buck); \
} while(0)
#define CHT_INSERT_HEAD(_ph, _hsize, _PX, _i) do { \
unsigned int _buck = _PX##hash(_PX##val(_i)) & (_hsize - 1); \
_PX##lock(_ph, _buck); \
_PX##next(_i) = _PX##first(_ph, _buck); \
_PX##first(_ph, _buck) = _i; \
_PX##unlock(_ph, _buck); \
} while(0)
#define CHT_REMOVE(_ph, _hsize, _PX, _x, _tmp, _key) do { \
unsigned int _buck = _PX##hash(_key) & (_hsize - 1); \
_PX##lock(_ph, _buck); \
_x = _PX##first(_ph, _buck); \
_tmp = NULL; \
for ( ; _x != NULL; _tmp = _x, _x = _PX##next(_x)) { \
if (_PX##cmp(_key, _PX##val(_x))) \
break; \
} \
if (_x != NULL) { \
if (_tmp == NULL) \
_PX##first(_ph, _buck) = _PX##next(_x); \
else \
_PX##next(_tmp) = _PX##next(_x); \
} \
_PX##unlock(_ph, _buck); \
} while(0)
#define CHT_FOREACH_SAFE(_ph, _hsize, _PX, _x, _tmp, _cb, _arg) do { \
for (unsigned int _i = 0; _i < _hsize; _i++) { \
_PX##lock(_ph, _i); \
_x = _PX##first(_ph, _i); \
_tmp = NULL; \
for (; _x != NULL; _tmp = _x, _x = _PX##next(_x)) { \
if (_cb(_x, _arg) == 0) \
continue; \
if (_tmp == NULL) \
_PX##first(_ph, _i) = _PX##next(_x); \
else \
_tmp = _PX##next(_x); \
} \
_PX##unlock(_ph, _i); \
} \
} while(0)
#define CHT_RESIZE(_ph, _hsize, _nph, _nhsize, _PX, _x, _y) do { \
unsigned int _buck; \
for (unsigned int _i = 0; _i < _hsize; _i++) { \
_x = _PX##first(_ph, _i); \
_y = _x; \
while (_y != NULL) { \
_buck = _PX##hash(_PX##val(_x)) & (_nhsize - 1);\
_y = _PX##next(_x); \
_PX##next(_x) = _PX##first(_nph, _buck); \
_PX##first(_nph, _buck) = _x; \
} \
} \
} while(0)
#endif /* _IP_FW_NAT64LSN_H_ */

View File

@@ -1,919 +0,0 @@
#include <machine/rtems-bsd-kernel-space.h>
/*-
* Copyright (c) 2015 Yandex LLC
* Copyright (c) 2015 Alexander V. Chernikov <melifaro@FreeBSD.org>
* Copyright (c) 2016 Andrey V. Elsukov <ae@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/counter.h>
#include <sys/errno.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/rmlock.h>
#include <sys/rwlock.h>
#include <sys/socket.h>
#include <sys/sockopt.h>
#include <sys/queue.h>
#include <net/if.h>
#include <net/pfil.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/ip_var.h>
#include <netinet/ip_fw.h>
#include <netpfil/ipfw/ip_fw_private.h>
#include <netpfil/ipfw/nat64/ip_fw_nat64.h>
#include <netpfil/ipfw/nat64/nat64lsn.h>
#include <netinet6/ip_fw_nat64.h>
VNET_DEFINE(uint16_t, nat64lsn_eid) = 0;
static struct nat64lsn_cfg *
nat64lsn_find(struct namedobj_instance *ni, const char *name, uint8_t set)
{
struct nat64lsn_cfg *cfg;
cfg = (struct nat64lsn_cfg *)ipfw_objhash_lookup_name_type(ni, set,
IPFW_TLV_NAT64LSN_NAME, name);
return (cfg);
}
static void
nat64lsn_default_config(ipfw_nat64lsn_cfg *uc)
{
if (uc->max_ports == 0)
uc->max_ports = NAT64LSN_MAX_PORTS;
else
uc->max_ports = roundup(uc->max_ports, NAT64_CHUNK_SIZE);
if (uc->max_ports > NAT64_CHUNK_SIZE * NAT64LSN_MAXPGPTR)
uc->max_ports = NAT64_CHUNK_SIZE * NAT64LSN_MAXPGPTR;
if (uc->jmaxlen == 0)
uc->jmaxlen = NAT64LSN_JMAXLEN;
if (uc->jmaxlen > 65536)
uc->jmaxlen = 65536;
if (uc->nh_delete_delay == 0)
uc->nh_delete_delay = NAT64LSN_HOST_AGE;
if (uc->pg_delete_delay == 0)
uc->pg_delete_delay = NAT64LSN_PG_AGE;
if (uc->st_syn_ttl == 0)
uc->st_syn_ttl = NAT64LSN_TCP_SYN_AGE;
if (uc->st_close_ttl == 0)
uc->st_close_ttl = NAT64LSN_TCP_FIN_AGE;
if (uc->st_estab_ttl == 0)
uc->st_estab_ttl = NAT64LSN_TCP_EST_AGE;
if (uc->st_udp_ttl == 0)
uc->st_udp_ttl = NAT64LSN_UDP_AGE;
if (uc->st_icmp_ttl == 0)
uc->st_icmp_ttl = NAT64LSN_ICMP_AGE;
}
/*
* Creates new nat64lsn instance.
* Data layout (v0)(current):
* Request: [ ipfw_obj_lheader ipfw_nat64lsn_cfg ]
*
* Returns 0 on success
*/
static int
nat64lsn_create(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
struct sockopt_data *sd)
{
ipfw_obj_lheader *olh;
ipfw_nat64lsn_cfg *uc;
struct nat64lsn_cfg *cfg;
struct namedobj_instance *ni;
uint32_t addr4, mask4;
if (sd->valsize != sizeof(*olh) + sizeof(*uc))
return (EINVAL);
olh = (ipfw_obj_lheader *)sd->kbuf;
uc = (ipfw_nat64lsn_cfg *)(olh + 1);
if (ipfw_check_object_name_generic(uc->name) != 0)
return (EINVAL);
if (uc->agg_prefix_len > 127 || uc->set >= IPFW_MAX_SETS)
return (EINVAL);
if (uc->plen4 > 32)
return (EINVAL);
if (uc->plen6 > 128 || ((uc->plen6 % 8) != 0))
return (EINVAL);
/* XXX: Check prefix4 to be global */
addr4 = ntohl(uc->prefix4.s_addr);
mask4 = ~((1 << (32 - uc->plen4)) - 1);
if ((addr4 & mask4) != addr4)
return (EINVAL);
/* XXX: Check prefix6 */
if (uc->min_port == 0)
uc->min_port = NAT64_MIN_PORT;
if (uc->max_port == 0)
uc->max_port = 65535;
if (uc->min_port > uc->max_port)
return (EINVAL);
uc->min_port = roundup(uc->min_port, NAT64_CHUNK_SIZE);
uc->max_port = roundup(uc->max_port, NAT64_CHUNK_SIZE);
nat64lsn_default_config(uc);
ni = CHAIN_TO_SRV(ch);
IPFW_UH_RLOCK(ch);
if (nat64lsn_find(ni, uc->name, uc->set) != NULL) {
IPFW_UH_RUNLOCK(ch);
return (EEXIST);
}
IPFW_UH_RUNLOCK(ch);
cfg = nat64lsn_init_instance(ch, 1 << (32 - uc->plen4));
strlcpy(cfg->name, uc->name, sizeof(cfg->name));
cfg->no.name = cfg->name;
cfg->no.etlv = IPFW_TLV_NAT64LSN_NAME;
cfg->no.set = uc->set;
cfg->prefix4 = addr4;
cfg->pmask4 = addr4 | ~mask4;
/* XXX: Copy 96 bits */
cfg->plen6 = 96;
memcpy(&cfg->prefix6, &uc->prefix6, cfg->plen6 / 8);
cfg->plen4 = uc->plen4;
cfg->flags = uc->flags & NAT64LSN_FLAGSMASK;
cfg->max_chunks = uc->max_ports / NAT64_CHUNK_SIZE;
cfg->agg_prefix_len = uc->agg_prefix_len;
cfg->agg_prefix_max = uc->agg_prefix_max;
cfg->min_chunk = uc->min_port / NAT64_CHUNK_SIZE;
cfg->max_chunk = uc->max_port / NAT64_CHUNK_SIZE;
cfg->jmaxlen = uc->jmaxlen;
cfg->nh_delete_delay = uc->nh_delete_delay;
cfg->pg_delete_delay = uc->pg_delete_delay;
cfg->st_syn_ttl = uc->st_syn_ttl;
cfg->st_close_ttl = uc->st_close_ttl;
cfg->st_estab_ttl = uc->st_estab_ttl;
cfg->st_udp_ttl = uc->st_udp_ttl;
cfg->st_icmp_ttl = uc->st_icmp_ttl;
cfg->nomatch_verdict = IP_FW_DENY;
cfg->nomatch_final = 1; /* Exit outer loop by default */
IPFW_UH_WLOCK(ch);
if (nat64lsn_find(ni, uc->name, uc->set) != NULL) {
IPFW_UH_WUNLOCK(ch);
nat64lsn_destroy_instance(cfg);
return (EEXIST);
}
if (ipfw_objhash_alloc_idx(CHAIN_TO_SRV(ch), &cfg->no.kidx) != 0) {
IPFW_UH_WUNLOCK(ch);
nat64lsn_destroy_instance(cfg);
return (ENOSPC);
}
ipfw_objhash_add(CHAIN_TO_SRV(ch), &cfg->no);
/* Okay, let's link data */
IPFW_WLOCK(ch);
SRV_OBJECT(ch, cfg->no.kidx) = cfg;
IPFW_WUNLOCK(ch);
nat64lsn_start_instance(cfg);
IPFW_UH_WUNLOCK(ch);
return (0);
}
static void
nat64lsn_detach_config(struct ip_fw_chain *ch, struct nat64lsn_cfg *cfg)
{
IPFW_UH_WLOCK_ASSERT(ch);
ipfw_objhash_del(CHAIN_TO_SRV(ch), &cfg->no);
ipfw_objhash_free_idx(CHAIN_TO_SRV(ch), cfg->no.kidx);
}
/*
* Destroys nat64 instance.
* Data layout (v0)(current):
* Request: [ ipfw_obj_header ]
*
* Returns 0 on success
*/
static int
nat64lsn_destroy(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
struct sockopt_data *sd)
{
struct nat64lsn_cfg *cfg;
ipfw_obj_header *oh;
if (sd->valsize != sizeof(*oh))
return (EINVAL);
oh = (ipfw_obj_header *)op3;
IPFW_UH_WLOCK(ch);
cfg = nat64lsn_find(CHAIN_TO_SRV(ch), oh->ntlv.name, oh->ntlv.set);
if (cfg == NULL) {
IPFW_UH_WUNLOCK(ch);
return (ESRCH);
}
if (cfg->no.refcnt > 0) {
IPFW_UH_WUNLOCK(ch);
return (EBUSY);
}
IPFW_WLOCK(ch);
SRV_OBJECT(ch, cfg->no.kidx) = NULL;
IPFW_WUNLOCK(ch);
nat64lsn_detach_config(ch, cfg);
IPFW_UH_WUNLOCK(ch);
nat64lsn_destroy_instance(cfg);
return (0);
}
#define __COPY_STAT_FIELD(_cfg, _stats, _field) \
(_stats)->_field = NAT64STAT_FETCH(&(_cfg)->stats, _field)
static void
export_stats(struct ip_fw_chain *ch, struct nat64lsn_cfg *cfg,
struct ipfw_nat64lsn_stats *stats)
{
__COPY_STAT_FIELD(cfg, stats, opcnt64);
__COPY_STAT_FIELD(cfg, stats, opcnt46);
__COPY_STAT_FIELD(cfg, stats, ofrags);
__COPY_STAT_FIELD(cfg, stats, ifrags);
__COPY_STAT_FIELD(cfg, stats, oerrors);
__COPY_STAT_FIELD(cfg, stats, noroute4);
__COPY_STAT_FIELD(cfg, stats, noroute6);
__COPY_STAT_FIELD(cfg, stats, nomatch4);
__COPY_STAT_FIELD(cfg, stats, noproto);
__COPY_STAT_FIELD(cfg, stats, nomem);
__COPY_STAT_FIELD(cfg, stats, dropped);
__COPY_STAT_FIELD(cfg, stats, jcalls);
__COPY_STAT_FIELD(cfg, stats, jrequests);
__COPY_STAT_FIELD(cfg, stats, jhostsreq);
__COPY_STAT_FIELD(cfg, stats, jportreq);
__COPY_STAT_FIELD(cfg, stats, jhostfails);
__COPY_STAT_FIELD(cfg, stats, jportfails);
__COPY_STAT_FIELD(cfg, stats, jmaxlen);
__COPY_STAT_FIELD(cfg, stats, jnomem);
__COPY_STAT_FIELD(cfg, stats, jreinjected);
__COPY_STAT_FIELD(cfg, stats, screated);
__COPY_STAT_FIELD(cfg, stats, sdeleted);
__COPY_STAT_FIELD(cfg, stats, spgcreated);
__COPY_STAT_FIELD(cfg, stats, spgdeleted);
stats->hostcount = cfg->ihcount;
stats->tcpchunks = cfg->protochunks[NAT_PROTO_TCP];
stats->udpchunks = cfg->protochunks[NAT_PROTO_UDP];
stats->icmpchunks = cfg->protochunks[NAT_PROTO_ICMP];
}
#undef __COPY_STAT_FIELD
static void
nat64lsn_export_config(struct ip_fw_chain *ch, struct nat64lsn_cfg *cfg,
ipfw_nat64lsn_cfg *uc)
{
uc->flags = cfg->flags & NAT64LSN_FLAGSMASK;
uc->max_ports = cfg->max_chunks * NAT64_CHUNK_SIZE;
uc->agg_prefix_len = cfg->agg_prefix_len;
uc->agg_prefix_max = cfg->agg_prefix_max;
uc->jmaxlen = cfg->jmaxlen;
uc->nh_delete_delay = cfg->nh_delete_delay;
uc->pg_delete_delay = cfg->pg_delete_delay;
uc->st_syn_ttl = cfg->st_syn_ttl;
uc->st_close_ttl = cfg->st_close_ttl;
uc->st_estab_ttl = cfg->st_estab_ttl;
uc->st_udp_ttl = cfg->st_udp_ttl;
uc->st_icmp_ttl = cfg->st_icmp_ttl;
uc->prefix4.s_addr = htonl(cfg->prefix4);
uc->prefix6 = cfg->prefix6;
uc->plen4 = cfg->plen4;
uc->plen6 = cfg->plen6;
uc->set = cfg->no.set;
strlcpy(uc->name, cfg->no.name, sizeof(uc->name));
}
struct nat64_dump_arg {
struct ip_fw_chain *ch;
struct sockopt_data *sd;
};
static int
export_config_cb(struct namedobj_instance *ni, struct named_object *no,
void *arg)
{
struct nat64_dump_arg *da = (struct nat64_dump_arg *)arg;
ipfw_nat64lsn_cfg *uc;
uc = (struct _ipfw_nat64lsn_cfg *)ipfw_get_sopt_space(da->sd,
sizeof(*uc));
nat64lsn_export_config(da->ch, (struct nat64lsn_cfg *)no, uc);
return (0);
}
/*
* Lists all nat64 lsn instances currently available in kernel.
* Data layout (v0)(current):
* Request: [ ipfw_obj_lheader ]
* Reply: [ ipfw_obj_lheader ipfw_nat64lsn_cfg x N ]
*
* Returns 0 on success
*/
static int
nat64lsn_list(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
struct sockopt_data *sd)
{
ipfw_obj_lheader *olh;
struct nat64_dump_arg da;
/* Check minimum header size */
if (sd->valsize < sizeof(ipfw_obj_lheader))
return (EINVAL);
olh = (ipfw_obj_lheader *)ipfw_get_sopt_header(sd, sizeof(*olh));
IPFW_UH_RLOCK(ch);
olh->count = ipfw_objhash_count_type(CHAIN_TO_SRV(ch),
IPFW_TLV_NAT64LSN_NAME);
olh->objsize = sizeof(ipfw_nat64lsn_cfg);
olh->size = sizeof(*olh) + olh->count * olh->objsize;
if (sd->valsize < olh->size) {
IPFW_UH_RUNLOCK(ch);
return (ENOMEM);
}
memset(&da, 0, sizeof(da));
da.ch = ch;
da.sd = sd;
ipfw_objhash_foreach_type(CHAIN_TO_SRV(ch), export_config_cb, &da,
IPFW_TLV_NAT64LSN_NAME);
IPFW_UH_RUNLOCK(ch);
return (0);
}
/*
* Change existing nat64lsn instance configuration.
* Data layout (v0)(current):
* Request: [ ipfw_obj_header ipfw_nat64lsn_cfg ]
* Reply: [ ipfw_obj_header ipfw_nat64lsn_cfg ]
*
* Returns 0 on success
*/
static int
nat64lsn_config(struct ip_fw_chain *ch, ip_fw3_opheader *op,
struct sockopt_data *sd)
{
ipfw_obj_header *oh;
ipfw_nat64lsn_cfg *uc;
struct nat64lsn_cfg *cfg;
struct namedobj_instance *ni;
if (sd->valsize != sizeof(*oh) + sizeof(*uc))
return (EINVAL);
oh = (ipfw_obj_header *)ipfw_get_sopt_space(sd,
sizeof(*oh) + sizeof(*uc));
uc = (ipfw_nat64lsn_cfg *)(oh + 1);
if (ipfw_check_object_name_generic(oh->ntlv.name) != 0 ||
oh->ntlv.set >= IPFW_MAX_SETS)
return (EINVAL);
ni = CHAIN_TO_SRV(ch);
if (sd->sopt->sopt_dir == SOPT_GET) {
IPFW_UH_RLOCK(ch);
cfg = nat64lsn_find(ni, oh->ntlv.name, oh->ntlv.set);
if (cfg == NULL) {
IPFW_UH_RUNLOCK(ch);
return (EEXIST);
}
nat64lsn_export_config(ch, cfg, uc);
IPFW_UH_RUNLOCK(ch);
return (0);
}
nat64lsn_default_config(uc);
IPFW_UH_WLOCK(ch);
cfg = nat64lsn_find(ni, oh->ntlv.name, oh->ntlv.set);
if (cfg == NULL) {
IPFW_UH_WUNLOCK(ch);
return (EEXIST);
}
/*
* For now allow to change only following values:
* jmaxlen, nh_del_age, pg_del_age, tcp_syn_age, tcp_close_age,
* tcp_est_age, udp_age, icmp_age, flags, max_ports.
*/
cfg->max_chunks = uc->max_ports / NAT64_CHUNK_SIZE;
cfg->jmaxlen = uc->jmaxlen;
cfg->nh_delete_delay = uc->nh_delete_delay;
cfg->pg_delete_delay = uc->pg_delete_delay;
cfg->st_syn_ttl = uc->st_syn_ttl;
cfg->st_close_ttl = uc->st_close_ttl;
cfg->st_estab_ttl = uc->st_estab_ttl;
cfg->st_udp_ttl = uc->st_udp_ttl;
cfg->st_icmp_ttl = uc->st_icmp_ttl;
cfg->flags = uc->flags & NAT64LSN_FLAGSMASK;
IPFW_UH_WUNLOCK(ch);
return (0);
}
/*
* Get nat64lsn statistics.
* Data layout (v0)(current):
* Request: [ ipfw_obj_header ]
* Reply: [ ipfw_obj_header ipfw_counter_tlv ]
*
* Returns 0 on success
*/
static int
nat64lsn_stats(struct ip_fw_chain *ch, ip_fw3_opheader *op,
struct sockopt_data *sd)
{
struct ipfw_nat64lsn_stats stats;
struct nat64lsn_cfg *cfg;
ipfw_obj_header *oh;
ipfw_obj_ctlv *ctlv;
size_t sz;
sz = sizeof(ipfw_obj_header) + sizeof(ipfw_obj_ctlv) + sizeof(stats);
if (sd->valsize % sizeof(uint64_t))
return (EINVAL);
if (sd->valsize < sz)
return (ENOMEM);
oh = (ipfw_obj_header *)ipfw_get_sopt_header(sd, sz);
if (oh == NULL)
return (EINVAL);
memset(&stats, 0, sizeof(stats));
IPFW_UH_RLOCK(ch);
cfg = nat64lsn_find(CHAIN_TO_SRV(ch), oh->ntlv.name, oh->ntlv.set);
if (cfg == NULL) {
IPFW_UH_RUNLOCK(ch);
return (ESRCH);
}
export_stats(ch, cfg, &stats);
IPFW_UH_RUNLOCK(ch);
ctlv = (ipfw_obj_ctlv *)(oh + 1);
memset(ctlv, 0, sizeof(*ctlv));
ctlv->head.type = IPFW_TLV_COUNTERS;
ctlv->head.length = sz - sizeof(ipfw_obj_header);
ctlv->count = sizeof(stats) / sizeof(uint64_t);
ctlv->objsize = sizeof(uint64_t);
ctlv->version = IPFW_NAT64_VERSION;
memcpy(ctlv + 1, &stats, sizeof(stats));
return (0);
}
/*
* Reset nat64lsn statistics.
* Data layout (v0)(current):
* Request: [ ipfw_obj_header ]
*
* Returns 0 on success
*/
static int
nat64lsn_reset_stats(struct ip_fw_chain *ch, ip_fw3_opheader *op,
struct sockopt_data *sd)
{
struct nat64lsn_cfg *cfg;
ipfw_obj_header *oh;
if (sd->valsize != sizeof(*oh))
return (EINVAL);
oh = (ipfw_obj_header *)sd->kbuf;
if (ipfw_check_object_name_generic(oh->ntlv.name) != 0 ||
oh->ntlv.set >= IPFW_MAX_SETS)
return (EINVAL);
IPFW_UH_WLOCK(ch);
cfg = nat64lsn_find(CHAIN_TO_SRV(ch), oh->ntlv.name, oh->ntlv.set);
if (cfg == NULL) {
IPFW_UH_WUNLOCK(ch);
return (ESRCH);
}
COUNTER_ARRAY_ZERO(cfg->stats.stats, NAT64STATS);
IPFW_UH_WUNLOCK(ch);
return (0);
}
/*
* Reply: [ ipfw_obj_header ipfw_obj_data [ ipfw_nat64lsn_stg
* ipfw_nat64lsn_state x count, ... ] ]
*/
static int
export_pg_states(struct nat64lsn_cfg *cfg, struct nat64lsn_portgroup *pg,
ipfw_nat64lsn_stg *stg, struct sockopt_data *sd)
{
ipfw_nat64lsn_state *ste;
struct nat64lsn_state *st;
int i, count;
NAT64_LOCK(pg->host);
count = 0;
for (i = 0; i < 64; i++) {
if (PG_IS_BUSY_IDX(pg, i))
count++;
}
DPRINTF(DP_STATE, "EXPORT PG %d, count %d", pg->idx, count);
if (count == 0) {
stg->count = 0;
NAT64_UNLOCK(pg->host);
return (0);
}
ste = (ipfw_nat64lsn_state *)ipfw_get_sopt_space(sd,
count * sizeof(ipfw_nat64lsn_state));
if (ste == NULL) {
NAT64_UNLOCK(pg->host);
return (1);
}
stg->alias4.s_addr = pg->aaddr;
stg->proto = nat64lsn_rproto_map[pg->nat_proto];
stg->flags = 0;
stg->host6 = pg->host->addr;
stg->count = count;
for (i = 0; i < 64; i++) {
if (PG_IS_FREE_IDX(pg, i))
continue;
st = &pg->states[i];
ste->daddr.s_addr = st->u.s.faddr;
ste->dport = st->u.s.fport;
ste->aport = pg->aport + i;
ste->sport = st->u.s.lport;
ste->flags = st->flags; /* XXX filter flags */
ste->idle = GET_AGE(st->timestamp);
ste++;
}
NAT64_UNLOCK(pg->host);
return (0);
}
static int
get_next_idx(struct nat64lsn_cfg *cfg, uint32_t *addr, uint8_t *nat_proto,
uint16_t *port)
{
if (*port < 65536 - NAT64_CHUNK_SIZE) {
*port += NAT64_CHUNK_SIZE;
return (0);
}
*port = 0;
if (*nat_proto < NAT_MAX_PROTO - 1) {
*nat_proto += 1;
return (0);
}
*nat_proto = 1;
if (*addr < cfg->pmask4) {
*addr += 1;
return (0);
}
/* End of space. */
return (1);
}
#define PACK_IDX(addr, proto, port) \
((uint64_t)addr << 32) | ((uint32_t)port << 16) | (proto << 8)
#define UNPACK_IDX(idx, addr, proto, port) \
(addr) = (uint32_t)((idx) >> 32); \
(port) = (uint16_t)(((idx) >> 16) & 0xFFFF); \
(proto) = (uint8_t)(((idx) >> 8) & 0xFF)
static struct nat64lsn_portgroup *
get_next_pg(struct nat64lsn_cfg *cfg, uint32_t *addr, uint8_t *nat_proto,
uint16_t *port)
{
struct nat64lsn_portgroup *pg;
uint64_t pre_pack, post_pack;
pg = NULL;
pre_pack = PACK_IDX(*addr, *nat_proto, *port);
for (;;) {
if (get_next_idx(cfg, addr, nat_proto, port) != 0) {
/* End of states */
return (pg);
}
pg = GET_PORTGROUP(cfg, *addr, *nat_proto, *port);
if (pg != NULL)
break;
}
post_pack = PACK_IDX(*addr, *nat_proto, *port);
if (pre_pack == post_pack)
DPRINTF(DP_STATE, "XXX: PACK_IDX %u %d %d",
*addr, *nat_proto, *port);
return (pg);
}
static NAT64NOINLINE struct nat64lsn_portgroup *
get_first_pg(struct nat64lsn_cfg *cfg, uint32_t *addr, uint8_t *nat_proto,
uint16_t *port)
{
struct nat64lsn_portgroup *pg;
pg = GET_PORTGROUP(cfg, *addr, *nat_proto, *port);
if (pg == NULL)
pg = get_next_pg(cfg, addr, nat_proto, port);
return (pg);
}
/*
* Lists nat64lsn states.
* Data layout (v0)(current):
* Request: [ ipfw_obj_header ipfw_obj_data [ uint64_t ]]
* Reply: [ ipfw_obj_header ipfw_obj_data [
* ipfw_nat64lsn_stg ipfw_nat64lsn_state x N] ]
*
* Returns 0 on success
*/
static int
nat64lsn_states(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
struct sockopt_data *sd)
{
ipfw_obj_header *oh;
ipfw_obj_data *od;
ipfw_nat64lsn_stg *stg;
struct nat64lsn_cfg *cfg;
struct nat64lsn_portgroup *pg, *pg_next;
uint64_t next_idx;
size_t sz;
uint32_t addr, states;
uint16_t port;
uint8_t nat_proto;
sz = sizeof(ipfw_obj_header) + sizeof(ipfw_obj_data) +
sizeof(uint64_t);
/* Check minimum header size */
if (sd->valsize < sz)
return (EINVAL);
oh = (ipfw_obj_header *)sd->kbuf;
od = (ipfw_obj_data *)(oh + 1);
if (od->head.type != IPFW_TLV_OBJDATA ||
od->head.length != sz - sizeof(ipfw_obj_header))
return (EINVAL);
next_idx = *(uint64_t *)(od + 1);
/* Translate index to the request position to start from */
UNPACK_IDX(next_idx, addr, nat_proto, port);
if (nat_proto >= NAT_MAX_PROTO)
return (EINVAL);
if (nat_proto == 0 && addr != 0)
return (EINVAL);
IPFW_UH_RLOCK(ch);
cfg = nat64lsn_find(CHAIN_TO_SRV(ch), oh->ntlv.name, oh->ntlv.set);
if (cfg == NULL) {
IPFW_UH_RUNLOCK(ch);
return (ESRCH);
}
/* Fill in starting point */
if (addr == 0) {
addr = cfg->prefix4;
nat_proto = 1;
port = 0;
}
if (addr < cfg->prefix4 || addr > cfg->pmask4) {
IPFW_UH_RUNLOCK(ch);
DPRINTF(DP_GENERIC | DP_STATE, "XXX: %ju %u %u",
(uintmax_t)next_idx, addr, cfg->pmask4);
return (EINVAL);
}
sz = sizeof(ipfw_obj_header) + sizeof(ipfw_obj_data) +
sizeof(ipfw_nat64lsn_stg);
if (sd->valsize < sz)
return (ENOMEM);
oh = (ipfw_obj_header *)ipfw_get_sopt_space(sd, sz);
od = (ipfw_obj_data *)(oh + 1);
od->head.type = IPFW_TLV_OBJDATA;
od->head.length = sz - sizeof(ipfw_obj_header);
stg = (ipfw_nat64lsn_stg *)(od + 1);
pg = get_first_pg(cfg, &addr, &nat_proto, &port);
if (pg == NULL) {
/* No states */
stg->next_idx = 0xFF;
stg->count = 0;
IPFW_UH_RUNLOCK(ch);
return (0);
}
states = 0;
pg_next = NULL;
while (pg != NULL) {
pg_next = get_next_pg(cfg, &addr, &nat_proto, &port);
if (pg_next == NULL)
stg->next_idx = 0xFF;
else
stg->next_idx = PACK_IDX(addr, nat_proto, port);
if (export_pg_states(cfg, pg, stg, sd) != 0) {
IPFW_UH_RUNLOCK(ch);
return (states == 0 ? ENOMEM: 0);
}
states += stg->count;
od->head.length += stg->count * sizeof(ipfw_nat64lsn_state);
sz += stg->count * sizeof(ipfw_nat64lsn_state);
if (pg_next != NULL) {
sz += sizeof(ipfw_nat64lsn_stg);
if (sd->valsize < sz)
break;
stg = (ipfw_nat64lsn_stg *)ipfw_get_sopt_space(sd,
sizeof(ipfw_nat64lsn_stg));
}
pg = pg_next;
}
IPFW_UH_RUNLOCK(ch);
return (0);
}
static struct ipfw_sopt_handler scodes[] = {
{ IP_FW_NAT64LSN_CREATE, 0, HDIR_BOTH, nat64lsn_create },
{ IP_FW_NAT64LSN_DESTROY,0, HDIR_SET, nat64lsn_destroy },
{ IP_FW_NAT64LSN_CONFIG, 0, HDIR_BOTH, nat64lsn_config },
{ IP_FW_NAT64LSN_LIST, 0, HDIR_GET, nat64lsn_list },
{ IP_FW_NAT64LSN_STATS, 0, HDIR_GET, nat64lsn_stats },
{ IP_FW_NAT64LSN_RESET_STATS,0, HDIR_SET, nat64lsn_reset_stats },
{ IP_FW_NAT64LSN_LIST_STATES,0, HDIR_GET, nat64lsn_states },
};
static int
nat64lsn_classify(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype)
{
ipfw_insn *icmd;
icmd = cmd - 1;
if (icmd->opcode != O_EXTERNAL_ACTION ||
icmd->arg1 != V_nat64lsn_eid)
return (1);
*puidx = cmd->arg1;
*ptype = 0;
return (0);
}
static void
nat64lsn_update_arg1(ipfw_insn *cmd, uint16_t idx)
{
cmd->arg1 = idx;
}
static int
nat64lsn_findbyname(struct ip_fw_chain *ch, struct tid_info *ti,
struct named_object **pno)
{
int err;
err = ipfw_objhash_find_type(CHAIN_TO_SRV(ch), ti,
IPFW_TLV_NAT64LSN_NAME, pno);
return (err);
}
static struct named_object *
nat64lsn_findbykidx(struct ip_fw_chain *ch, uint16_t idx)
{
struct namedobj_instance *ni;
struct named_object *no;
IPFW_UH_WLOCK_ASSERT(ch);
ni = CHAIN_TO_SRV(ch);
no = ipfw_objhash_lookup_kidx(ni, idx);
KASSERT(no != NULL, ("NAT64LSN with index %d not found", idx));
return (no);
}
static int
nat64lsn_manage_sets(struct ip_fw_chain *ch, uint16_t set, uint8_t new_set,
enum ipfw_sets_cmd cmd)
{
return (ipfw_obj_manage_sets(CHAIN_TO_SRV(ch), IPFW_TLV_NAT64LSN_NAME,
set, new_set, cmd));
}
static struct opcode_obj_rewrite opcodes[] = {
{
.opcode = O_EXTERNAL_INSTANCE,
.etlv = IPFW_TLV_EACTION /* just show it isn't table */,
.classifier = nat64lsn_classify,
.update = nat64lsn_update_arg1,
.find_byname = nat64lsn_findbyname,
.find_bykidx = nat64lsn_findbykidx,
.manage_sets = nat64lsn_manage_sets,
},
};
static int
destroy_config_cb(struct namedobj_instance *ni, struct named_object *no,
void *arg)
{
struct nat64lsn_cfg *cfg;
struct ip_fw_chain *ch;
ch = (struct ip_fw_chain *)arg;
cfg = (struct nat64lsn_cfg *)SRV_OBJECT(ch, no->kidx);
SRV_OBJECT(ch, no->kidx) = NULL;
nat64lsn_detach_config(ch, cfg);
nat64lsn_destroy_instance(cfg);
return (0);
}
int
nat64lsn_init(struct ip_fw_chain *ch, int first)
{
if (first != 0)
nat64lsn_init_internal();
V_nat64lsn_eid = ipfw_add_eaction(ch, ipfw_nat64lsn, "nat64lsn");
if (V_nat64lsn_eid == 0)
return (ENXIO);
IPFW_ADD_SOPT_HANDLER(first, scodes);
IPFW_ADD_OBJ_REWRITER(first, opcodes);
return (0);
}
void
nat64lsn_uninit(struct ip_fw_chain *ch, int last)
{
IPFW_DEL_OBJ_REWRITER(last, opcodes);
IPFW_DEL_SOPT_HANDLER(last, scodes);
ipfw_del_eaction(ch, V_nat64lsn_eid);
/*
* Since we already have deregistered external action,
* our named objects become unaccessible via rules, because
* all rules were truncated by ipfw_del_eaction().
* So, we can unlink and destroy our named objects without holding
* IPFW_WLOCK().
*/
IPFW_UH_WLOCK(ch);
ipfw_objhash_foreach_type(CHAIN_TO_SRV(ch), destroy_config_cb, ch,
IPFW_TLV_NAT64LSN_NAME);
V_nat64lsn_eid = 0;
IPFW_UH_WUNLOCK(ch);
if (last != 0)
nat64lsn_uninit_internal();
}

View File

@@ -1,264 +0,0 @@
#include <machine/rtems-bsd-kernel-space.h>
/*-
* Copyright (c) 2015-2016 Yandex LLC
* Copyright (c) 2015-2016 Andrey V. Elsukov <ae@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/counter.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/rmlock.h>
#include <sys/rwlock.h>
#include <sys/socket.h>
#include <sys/sysctl.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/if_pflog.h>
#include <net/pfil.h>
#include <netinet/in.h>
#include <netinet/ip.h>
#include <netinet/ip_icmp.h>
#include <netinet/ip_var.h>
#include <netinet/ip_fw.h>
#include <netinet/ip6.h>
#include <netinet/icmp6.h>
#include <netinet6/ip_fw_nat64.h>
#include <netpfil/ipfw/ip_fw_private.h>
#include <netpfil/ipfw/nat64/ip_fw_nat64.h>
#include <netpfil/ipfw/nat64/nat64_translate.h>
#include <netpfil/ipfw/nat64/nat64stl.h>
#include <netpfil/pf/pf.h>
#define NAT64_LOOKUP(chain, cmd) \
(struct nat64stl_cfg *)SRV_OBJECT((chain), (cmd)->arg1)
static void
nat64stl_log(struct pfloghdr *plog, struct mbuf *m, sa_family_t family,
uint32_t kidx)
{
static uint32_t pktid = 0;
memset(plog, 0, sizeof(*plog));
plog->length = PFLOG_REAL_HDRLEN;
plog->af = family;
plog->action = PF_NAT;
plog->dir = PF_IN;
plog->rulenr = htonl(kidx);
plog->subrulenr = htonl(++pktid);
plog->ruleset[0] = '\0';
strlcpy(plog->ifname, "NAT64STL", sizeof(plog->ifname));
ipfw_bpf_mtap2(plog, PFLOG_HDRLEN, m);
}
static int
nat64stl_handle_ip4(struct ip_fw_chain *chain, struct nat64stl_cfg *cfg,
struct mbuf *m, uint32_t tablearg)
{
struct pfloghdr loghdr, *logdata;
struct in6_addr saddr, daddr;
struct ip *ip;
ip = mtod(m, struct ip*);
if (nat64_check_ip4(ip->ip_src.s_addr) != 0 ||
nat64_check_ip4(ip->ip_dst.s_addr) != 0 ||
nat64_check_private_ip4(ip->ip_src.s_addr) != 0 ||
nat64_check_private_ip4(ip->ip_dst.s_addr) != 0)
return (NAT64SKIP);
daddr = TARG_VAL(chain, tablearg, nh6);
if (nat64_check_ip6(&daddr) != 0)
return (NAT64MFREE);
saddr = cfg->prefix6;
nat64_set_ip4(&saddr, ip->ip_src.s_addr);
if (cfg->flags & NAT64_LOG) {
logdata = &loghdr;
nat64stl_log(logdata, m, AF_INET, cfg->no.kidx);
} else
logdata = NULL;
return (nat64_do_handle_ip4(m, &saddr, &daddr, 0, &cfg->stats,
logdata));
}
static int
nat64stl_handle_ip6(struct ip_fw_chain *chain, struct nat64stl_cfg *cfg,
struct mbuf *m, uint32_t tablearg)
{
struct pfloghdr loghdr, *logdata;
struct ip6_hdr *ip6;
uint32_t aaddr;
aaddr = htonl(TARG_VAL(chain, tablearg, nh4));
/*
* NOTE: we expect ipfw_chk() did m_pullup() up to upper level
* protocol's headers. Also we skip some checks, that ip6_input(),
* ip6_forward(), ip6_fastfwd() and ipfw_chk() already did.
*/
ip6 = mtod(m, struct ip6_hdr *);
/* Check ip6_dst matches configured prefix */
if (bcmp(&ip6->ip6_dst, &cfg->prefix6, cfg->plen6 / 8) != 0)
return (NAT64SKIP);
if (cfg->flags & NAT64_LOG) {
logdata = &loghdr;
nat64stl_log(logdata, m, AF_INET6, cfg->no.kidx);
} else
logdata = NULL;
return (nat64_do_handle_ip6(m, aaddr, 0, &cfg->stats, logdata));
}
static int
nat64stl_handle_icmp6(struct ip_fw_chain *chain, struct nat64stl_cfg *cfg,
struct mbuf *m)
{
struct pfloghdr loghdr, *logdata;
nat64_stats_block *stats;
struct ip6_hdr *ip6i;
struct icmp6_hdr *icmp6;
uint32_t tablearg;
int hlen, proto;
hlen = 0;
stats = &cfg->stats;
proto = nat64_getlasthdr(m, &hlen);
if (proto != IPPROTO_ICMPV6) {
NAT64STAT_INC(stats, dropped);
return (NAT64MFREE);
}
icmp6 = mtodo(m, hlen);
switch (icmp6->icmp6_type) {
case ICMP6_DST_UNREACH:
case ICMP6_PACKET_TOO_BIG:
case ICMP6_TIME_EXCEED_TRANSIT:
case ICMP6_PARAM_PROB:
break;
default:
NAT64STAT_INC(stats, dropped);
return (NAT64MFREE);
}
hlen += sizeof(struct icmp6_hdr);
if (m->m_pkthdr.len < hlen + sizeof(struct ip6_hdr) + ICMP_MINLEN) {
NAT64STAT_INC(stats, dropped);
return (NAT64MFREE);
}
if (m->m_len < hlen + sizeof(struct ip6_hdr) + ICMP_MINLEN)
m = m_pullup(m, hlen + sizeof(struct ip6_hdr) + ICMP_MINLEN);
if (m == NULL) {
NAT64STAT_INC(stats, nomem);
return (NAT64RETURN);
}
/*
* Use destination address from inner IPv6 header to determine
* IPv4 mapped address.
*/
ip6i = mtodo(m, hlen);
if (ipfw_lookup_table(chain, cfg->map64,
sizeof(struct in6_addr), &ip6i->ip6_dst, &tablearg) == 0) {
m_freem(m);
return (NAT64RETURN);
}
if (cfg->flags & NAT64_LOG) {
logdata = &loghdr;
nat64stl_log(logdata, m, AF_INET6, cfg->no.kidx);
} else
logdata = NULL;
return (nat64_handle_icmp6(m, 0,
htonl(TARG_VAL(chain, tablearg, nh4)), 0, stats, logdata));
}
int
ipfw_nat64stl(struct ip_fw_chain *chain, struct ip_fw_args *args,
ipfw_insn *cmd, int *done)
{
ipfw_insn *icmd;
struct nat64stl_cfg *cfg;
in_addr_t dst4;
uint32_t tablearg;
int ret;
IPFW_RLOCK_ASSERT(chain);
*done = 0; /* try next rule if not matched */
icmd = cmd + 1;
if (cmd->opcode != O_EXTERNAL_ACTION ||
cmd->arg1 != V_nat64stl_eid ||
icmd->opcode != O_EXTERNAL_INSTANCE ||
(cfg = NAT64_LOOKUP(chain, icmd)) == NULL)
return (0);
switch (args->f_id.addr_type) {
case 4:
dst4 = htonl(args->f_id.dst_ip);
ret = ipfw_lookup_table(chain, cfg->map46, sizeof(in_addr_t),
&dst4, &tablearg);
break;
case 6:
ret = ipfw_lookup_table(chain, cfg->map64,
sizeof(struct in6_addr), &args->f_id.src_ip6, &tablearg);
break;
default:
return (0);
}
if (ret == 0) {
/*
* In case when packet is ICMPv6 message from an intermediate
* router, the source address of message will not match the
* addresses from our map64 table.
*/
if (args->f_id.proto != IPPROTO_ICMPV6)
return (0);
ret = nat64stl_handle_icmp6(chain, cfg, args->m);
} else {
if (args->f_id.addr_type == 4)
ret = nat64stl_handle_ip4(chain, cfg, args->m,
tablearg);
else
ret = nat64stl_handle_ip6(chain, cfg, args->m,
tablearg);
}
if (ret == NAT64SKIP)
return (0);
*done = 1; /* terminate the search */
if (ret == NAT64MFREE)
m_freem(args->m);
args->m = NULL;
return (IP_FW_DENY);
}

View File

@@ -1,58 +0,0 @@
/*-
* Copyright (c) 2015-2016 Yandex LLC
* Copyright (c) 2015-2016 Andrey V. Elsukov <ae@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _IP_FW_NAT64STL_H_
#define _IP_FW_NAT64STL_H_
struct nat64stl_cfg {
struct named_object no;
uint16_t map64; /* table with 6to4 mapping */
uint16_t map46; /* table with 4to6 mapping */
struct in6_addr prefix6;/* IPv6 prefix */
uint8_t plen6; /* prefix length */
uint8_t flags; /* flags for internal use */
#define NAT64STL_KIDX 0x0100
#define NAT64STL_46T 0x0200
#define NAT64STL_64T 0x0400
#define NAT64STL_FLAGSMASK (NAT64_LOG) /* flags to pass to userland */
char name[64];
nat64_stats_block stats;
};
VNET_DECLARE(uint16_t, nat64stl_eid);
#define V_nat64stl_eid VNET(nat64stl_eid)
#define IPFW_TLV_NAT64STL_NAME IPFW_TLV_EACTION_NAME(V_nat64stl_eid)
int ipfw_nat64stl(struct ip_fw_chain *chain, struct ip_fw_args *args,
ipfw_insn *cmd, int *done);
#endif

View File

@@ -1,623 +0,0 @@
#include <machine/rtems-bsd-kernel-space.h>
/*-
* Copyright (c) 2015-2016 Yandex LLC
* Copyright (c) 2015-2016 Andrey V. Elsukov <ae@FreeBSD.org>
* Copyright (c) 2015 Alexander V. Chernikov <melifaro@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/counter.h>
#include <sys/errno.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/rmlock.h>
#include <sys/rwlock.h>
#include <sys/socket.h>
#include <sys/sockopt.h>
#include <sys/queue.h>
#include <sys/syslog.h>
#include <sys/sysctl.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/pfil.h>
#include <net/route.h>
#include <net/vnet.h>
#include <netinet/in.h>
#include <netinet/ip_var.h>
#include <netinet/ip_fw.h>
#include <netinet6/in6_var.h>
#include <netinet6/ip6_var.h>
#include <netpfil/ipfw/ip_fw_private.h>
#include <netpfil/ipfw/nat64/ip_fw_nat64.h>
#include <netpfil/ipfw/nat64/nat64stl.h>
#include <netinet6/ip_fw_nat64.h>
VNET_DEFINE(uint16_t, nat64stl_eid) = 0;
static struct nat64stl_cfg *nat64stl_alloc_config(const char *name, uint8_t set);
static void nat64stl_free_config(struct nat64stl_cfg *cfg);
static struct nat64stl_cfg *nat64stl_find(struct namedobj_instance *ni,
const char *name, uint8_t set);
static struct nat64stl_cfg *
nat64stl_alloc_config(const char *name, uint8_t set)
{
struct nat64stl_cfg *cfg;
cfg = malloc(sizeof(struct nat64stl_cfg), M_IPFW, M_WAITOK | M_ZERO);
COUNTER_ARRAY_ALLOC(cfg->stats.stats, NAT64STATS, M_WAITOK);
cfg->no.name = cfg->name;
cfg->no.etlv = IPFW_TLV_NAT64STL_NAME;
cfg->no.set = set;
strlcpy(cfg->name, name, sizeof(cfg->name));
return (cfg);
}
static void
nat64stl_free_config(struct nat64stl_cfg *cfg)
{
COUNTER_ARRAY_FREE(cfg->stats.stats, NAT64STATS);
free(cfg, M_IPFW);
}
static void
nat64stl_export_config(struct ip_fw_chain *ch, struct nat64stl_cfg *cfg,
ipfw_nat64stl_cfg *uc)
{
struct named_object *no;
uc->prefix6 = cfg->prefix6;
uc->plen6 = cfg->plen6;
uc->flags = cfg->flags & NAT64STL_FLAGSMASK;
uc->set = cfg->no.set;
strlcpy(uc->name, cfg->no.name, sizeof(uc->name));
no = ipfw_objhash_lookup_table_kidx(ch, cfg->map64);
ipfw_export_obj_ntlv(no, &uc->ntlv6);
no = ipfw_objhash_lookup_table_kidx(ch, cfg->map46);
ipfw_export_obj_ntlv(no, &uc->ntlv4);
}
struct nat64stl_dump_arg {
struct ip_fw_chain *ch;
struct sockopt_data *sd;
};
static int
export_config_cb(struct namedobj_instance *ni, struct named_object *no,
void *arg)
{
struct nat64stl_dump_arg *da = (struct nat64stl_dump_arg *)arg;
ipfw_nat64stl_cfg *uc;
uc = (ipfw_nat64stl_cfg *)ipfw_get_sopt_space(da->sd, sizeof(*uc));
nat64stl_export_config(da->ch, (struct nat64stl_cfg *)no, uc);
return (0);
}
static struct nat64stl_cfg *
nat64stl_find(struct namedobj_instance *ni, const char *name, uint8_t set)
{
struct nat64stl_cfg *cfg;
cfg = (struct nat64stl_cfg *)ipfw_objhash_lookup_name_type(ni, set,
IPFW_TLV_NAT64STL_NAME, name);
return (cfg);
}
static int
nat64stl_create_internal(struct ip_fw_chain *ch, struct nat64stl_cfg *cfg,
ipfw_nat64stl_cfg *i)
{
IPFW_UH_WLOCK_ASSERT(ch);
if (ipfw_objhash_alloc_idx(CHAIN_TO_SRV(ch), &cfg->no.kidx) != 0)
return (ENOSPC);
cfg->flags |= NAT64STL_KIDX;
if (ipfw_ref_table(ch, &i->ntlv4, &cfg->map46) != 0)
return (EINVAL);
cfg->flags |= NAT64STL_46T;
if (ipfw_ref_table(ch, &i->ntlv6, &cfg->map64) != 0)
return (EINVAL);
cfg->flags |= NAT64STL_64T;
ipfw_objhash_add(CHAIN_TO_SRV(ch), &cfg->no);
return (0);
}
/*
* Creates new nat64 instance.
* Data layout (v0)(current):
* Request: [ ipfw_obj_lheader ipfw_nat64stl_cfg ]
*
* Returns 0 on success
*/
static int
nat64stl_create(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
struct sockopt_data *sd)
{
ipfw_obj_lheader *olh;
ipfw_nat64stl_cfg *uc;
struct namedobj_instance *ni;
struct nat64stl_cfg *cfg;
int error;
if (sd->valsize != sizeof(*olh) + sizeof(*uc))
return (EINVAL);
olh = (ipfw_obj_lheader *)sd->kbuf;
uc = (ipfw_nat64stl_cfg *)(olh + 1);
if (ipfw_check_object_name_generic(uc->name) != 0)
return (EINVAL);
if (!IN6_IS_ADDR_WKPFX(&uc->prefix6))
return (EINVAL);
if (uc->plen6 != 96 || uc->set >= IPFW_MAX_SETS)
return (EINVAL);
/* XXX: check types of tables */
ni = CHAIN_TO_SRV(ch);
error = 0;
IPFW_UH_RLOCK(ch);
if (nat64stl_find(ni, uc->name, uc->set) != NULL) {
IPFW_UH_RUNLOCK(ch);
return (EEXIST);
}
IPFW_UH_RUNLOCK(ch);
cfg = nat64stl_alloc_config(uc->name, uc->set);
cfg->prefix6 = uc->prefix6;
cfg->plen6 = uc->plen6;
cfg->flags = uc->flags & NAT64STL_FLAGSMASK;
IPFW_UH_WLOCK(ch);
if (nat64stl_find(ni, uc->name, uc->set) != NULL) {
IPFW_UH_WUNLOCK(ch);
nat64stl_free_config(cfg);
return (EEXIST);
}
error = nat64stl_create_internal(ch, cfg, uc);
if (error == 0) {
/* Okay, let's link data */
IPFW_WLOCK(ch);
SRV_OBJECT(ch, cfg->no.kidx) = cfg;
IPFW_WUNLOCK(ch);
IPFW_UH_WUNLOCK(ch);
return (0);
}
if (cfg->flags & NAT64STL_KIDX)
ipfw_objhash_free_idx(ni, cfg->no.kidx);
if (cfg->flags & NAT64STL_46T)
ipfw_unref_table(ch, cfg->map46);
if (cfg->flags & NAT64STL_64T)
ipfw_unref_table(ch, cfg->map64);
IPFW_UH_WUNLOCK(ch);
nat64stl_free_config(cfg);
return (error);
}
/*
* Change existing nat64stl instance configuration.
* Data layout (v0)(current):
* Request: [ ipfw_obj_header ipfw_nat64stl_cfg ]
* Reply: [ ipfw_obj_header ipfw_nat64stl_cfg ]
*
* Returns 0 on success
*/
static int
nat64stl_config(struct ip_fw_chain *ch, ip_fw3_opheader *op,
struct sockopt_data *sd)
{
ipfw_obj_header *oh;
ipfw_nat64stl_cfg *uc;
struct nat64stl_cfg *cfg;
struct namedobj_instance *ni;
if (sd->valsize != sizeof(*oh) + sizeof(*uc))
return (EINVAL);
oh = (ipfw_obj_header *)ipfw_get_sopt_space(sd,
sizeof(*oh) + sizeof(*uc));
uc = (ipfw_nat64stl_cfg *)(oh + 1);
if (ipfw_check_object_name_generic(oh->ntlv.name) != 0 ||
oh->ntlv.set >= IPFW_MAX_SETS)
return (EINVAL);
ni = CHAIN_TO_SRV(ch);
if (sd->sopt->sopt_dir == SOPT_GET) {
IPFW_UH_RLOCK(ch);
cfg = nat64stl_find(ni, oh->ntlv.name, oh->ntlv.set);
if (cfg == NULL) {
IPFW_UH_RUNLOCK(ch);
return (EEXIST);
}
nat64stl_export_config(ch, cfg, uc);
IPFW_UH_RUNLOCK(ch);
return (0);
}
IPFW_UH_WLOCK(ch);
cfg = nat64stl_find(ni, oh->ntlv.name, oh->ntlv.set);
if (cfg == NULL) {
IPFW_UH_WUNLOCK(ch);
return (EEXIST);
}
/*
* For now allow to change only following values:
* flags.
*/
cfg->flags = uc->flags & NAT64STL_FLAGSMASK;
IPFW_UH_WUNLOCK(ch);
return (0);
}
static void
nat64stl_detach_config(struct ip_fw_chain *ch, struct nat64stl_cfg *cfg)
{
IPFW_UH_WLOCK_ASSERT(ch);
ipfw_objhash_del(CHAIN_TO_SRV(ch), &cfg->no);
ipfw_objhash_free_idx(CHAIN_TO_SRV(ch), cfg->no.kidx);
ipfw_unref_table(ch, cfg->map46);
ipfw_unref_table(ch, cfg->map64);
}
/*
* Destroys nat64 instance.
* Data layout (v0)(current):
* Request: [ ipfw_obj_header ]
*
* Returns 0 on success
*/
static int
nat64stl_destroy(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
struct sockopt_data *sd)
{
ipfw_obj_header *oh;
struct nat64stl_cfg *cfg;
if (sd->valsize != sizeof(*oh))
return (EINVAL);
oh = (ipfw_obj_header *)sd->kbuf;
if (ipfw_check_object_name_generic(oh->ntlv.name) != 0)
return (EINVAL);
IPFW_UH_WLOCK(ch);
cfg = nat64stl_find(CHAIN_TO_SRV(ch), oh->ntlv.name, oh->ntlv.set);
if (cfg == NULL) {
IPFW_UH_WUNLOCK(ch);
return (ESRCH);
}
if (cfg->no.refcnt > 0) {
IPFW_UH_WUNLOCK(ch);
return (EBUSY);
}
IPFW_WLOCK(ch);
SRV_OBJECT(ch, cfg->no.kidx) = NULL;
IPFW_WUNLOCK(ch);
nat64stl_detach_config(ch, cfg);
IPFW_UH_WUNLOCK(ch);
nat64stl_free_config(cfg);
return (0);
}
/*
* Lists all nat64stl instances currently available in kernel.
* Data layout (v0)(current):
* Request: [ ipfw_obj_lheader ]
* Reply: [ ipfw_obj_lheader ipfw_nat64stl_cfg x N ]
*
* Returns 0 on success
*/
static int
nat64stl_list(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
struct sockopt_data *sd)
{
ipfw_obj_lheader *olh;
struct nat64stl_dump_arg da;
/* Check minimum header size */
if (sd->valsize < sizeof(ipfw_obj_lheader))
return (EINVAL);
olh = (ipfw_obj_lheader *)ipfw_get_sopt_header(sd, sizeof(*olh));
IPFW_UH_RLOCK(ch);
olh->count = ipfw_objhash_count_type(CHAIN_TO_SRV(ch),
IPFW_TLV_NAT64STL_NAME);
olh->objsize = sizeof(ipfw_nat64stl_cfg);
olh->size = sizeof(*olh) + olh->count * olh->objsize;
if (sd->valsize < olh->size) {
IPFW_UH_RUNLOCK(ch);
return (ENOMEM);
}
memset(&da, 0, sizeof(da));
da.ch = ch;
da.sd = sd;
ipfw_objhash_foreach_type(CHAIN_TO_SRV(ch), export_config_cb,
&da, IPFW_TLV_NAT64STL_NAME);
IPFW_UH_RUNLOCK(ch);
return (0);
}
#define __COPY_STAT_FIELD(_cfg, _stats, _field) \
(_stats)->_field = NAT64STAT_FETCH(&(_cfg)->stats, _field)
static void
export_stats(struct ip_fw_chain *ch, struct nat64stl_cfg *cfg,
struct ipfw_nat64stl_stats *stats)
{
__COPY_STAT_FIELD(cfg, stats, opcnt64);
__COPY_STAT_FIELD(cfg, stats, opcnt46);
__COPY_STAT_FIELD(cfg, stats, ofrags);
__COPY_STAT_FIELD(cfg, stats, ifrags);
__COPY_STAT_FIELD(cfg, stats, oerrors);
__COPY_STAT_FIELD(cfg, stats, noroute4);
__COPY_STAT_FIELD(cfg, stats, noroute6);
__COPY_STAT_FIELD(cfg, stats, noproto);
__COPY_STAT_FIELD(cfg, stats, nomem);
__COPY_STAT_FIELD(cfg, stats, dropped);
}
/*
* Get nat64stl statistics.
* Data layout (v0)(current):
* Request: [ ipfw_obj_header ]
* Reply: [ ipfw_obj_header ipfw_obj_ctlv [ uint64_t x N ]]
*
* Returns 0 on success
*/
static int
nat64stl_stats(struct ip_fw_chain *ch, ip_fw3_opheader *op,
struct sockopt_data *sd)
{
struct ipfw_nat64stl_stats stats;
struct nat64stl_cfg *cfg;
ipfw_obj_header *oh;
ipfw_obj_ctlv *ctlv;
size_t sz;
sz = sizeof(ipfw_obj_header) + sizeof(ipfw_obj_ctlv) + sizeof(stats);
if (sd->valsize % sizeof(uint64_t))
return (EINVAL);
if (sd->valsize < sz)
return (ENOMEM);
oh = (ipfw_obj_header *)ipfw_get_sopt_header(sd, sz);
if (oh == NULL)
return (EINVAL);
memset(&stats, 0, sizeof(stats));
IPFW_UH_RLOCK(ch);
cfg = nat64stl_find(CHAIN_TO_SRV(ch), oh->ntlv.name, oh->ntlv.set);
if (cfg == NULL) {
IPFW_UH_RUNLOCK(ch);
return (ESRCH);
}
export_stats(ch, cfg, &stats);
IPFW_UH_RUNLOCK(ch);
ctlv = (ipfw_obj_ctlv *)(oh + 1);
memset(ctlv, 0, sizeof(*ctlv));
ctlv->head.type = IPFW_TLV_COUNTERS;
ctlv->head.length = sz - sizeof(ipfw_obj_header);
ctlv->count = sizeof(stats) / sizeof(uint64_t);
ctlv->objsize = sizeof(uint64_t);
ctlv->version = IPFW_NAT64_VERSION;
memcpy(ctlv + 1, &stats, sizeof(stats));
return (0);
}
/*
* Reset nat64stl statistics.
* Data layout (v0)(current):
* Request: [ ipfw_obj_header ]
*
* Returns 0 on success
*/
static int
nat64stl_reset_stats(struct ip_fw_chain *ch, ip_fw3_opheader *op,
struct sockopt_data *sd)
{
struct nat64stl_cfg *cfg;
ipfw_obj_header *oh;
if (sd->valsize != sizeof(*oh))
return (EINVAL);
oh = (ipfw_obj_header *)sd->kbuf;
if (ipfw_check_object_name_generic(oh->ntlv.name) != 0 ||
oh->ntlv.set >= IPFW_MAX_SETS)
return (EINVAL);
IPFW_UH_WLOCK(ch);
cfg = nat64stl_find(CHAIN_TO_SRV(ch), oh->ntlv.name, oh->ntlv.set);
if (cfg == NULL) {
IPFW_UH_WUNLOCK(ch);
return (ESRCH);
}
COUNTER_ARRAY_ZERO(cfg->stats.stats, NAT64STATS);
IPFW_UH_WUNLOCK(ch);
return (0);
}
static struct ipfw_sopt_handler scodes[] = {
{ IP_FW_NAT64STL_CREATE, 0, HDIR_SET, nat64stl_create },
{ IP_FW_NAT64STL_DESTROY,0, HDIR_SET, nat64stl_destroy },
{ IP_FW_NAT64STL_CONFIG, 0, HDIR_BOTH, nat64stl_config },
{ IP_FW_NAT64STL_LIST, 0, HDIR_GET, nat64stl_list },
{ IP_FW_NAT64STL_STATS, 0, HDIR_GET, nat64stl_stats },
{ IP_FW_NAT64STL_RESET_STATS,0, HDIR_SET, nat64stl_reset_stats },
};
static int
nat64stl_classify(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype)
{
ipfw_insn *icmd;
icmd = cmd - 1;
if (icmd->opcode != O_EXTERNAL_ACTION ||
icmd->arg1 != V_nat64stl_eid)
return (1);
*puidx = cmd->arg1;
*ptype = 0;
return (0);
}
static void
nat64stl_update_arg1(ipfw_insn *cmd, uint16_t idx)
{
cmd->arg1 = idx;
}
static int
nat64stl_findbyname(struct ip_fw_chain *ch, struct tid_info *ti,
struct named_object **pno)
{
int err;
err = ipfw_objhash_find_type(CHAIN_TO_SRV(ch), ti,
IPFW_TLV_NAT64STL_NAME, pno);
return (err);
}
static struct named_object *
nat64stl_findbykidx(struct ip_fw_chain *ch, uint16_t idx)
{
struct namedobj_instance *ni;
struct named_object *no;
IPFW_UH_WLOCK_ASSERT(ch);
ni = CHAIN_TO_SRV(ch);
no = ipfw_objhash_lookup_kidx(ni, idx);
KASSERT(no != NULL, ("NAT with index %d not found", idx));
return (no);
}
static int
nat64stl_manage_sets(struct ip_fw_chain *ch, uint16_t set, uint8_t new_set,
enum ipfw_sets_cmd cmd)
{
return (ipfw_obj_manage_sets(CHAIN_TO_SRV(ch), IPFW_TLV_NAT64STL_NAME,
set, new_set, cmd));
}
static struct opcode_obj_rewrite opcodes[] = {
{
.opcode = O_EXTERNAL_INSTANCE,
.etlv = IPFW_TLV_EACTION /* just show it isn't table */,
.classifier = nat64stl_classify,
.update = nat64stl_update_arg1,
.find_byname = nat64stl_findbyname,
.find_bykidx = nat64stl_findbykidx,
.manage_sets = nat64stl_manage_sets,
},
};
static int
destroy_config_cb(struct namedobj_instance *ni, struct named_object *no,
void *arg)
{
struct nat64stl_cfg *cfg;
struct ip_fw_chain *ch;
ch = (struct ip_fw_chain *)arg;
cfg = (struct nat64stl_cfg *)SRV_OBJECT(ch, no->kidx);
SRV_OBJECT(ch, no->kidx) = NULL;
nat64stl_detach_config(ch, cfg);
nat64stl_free_config(cfg);
return (0);
}
int
nat64stl_init(struct ip_fw_chain *ch, int first)
{
V_nat64stl_eid = ipfw_add_eaction(ch, ipfw_nat64stl, "nat64stl");
if (V_nat64stl_eid == 0)
return (ENXIO);
IPFW_ADD_SOPT_HANDLER(first, scodes);
IPFW_ADD_OBJ_REWRITER(first, opcodes);
return (0);
}
void
nat64stl_uninit(struct ip_fw_chain *ch, int last)
{
IPFW_DEL_OBJ_REWRITER(last, opcodes);
IPFW_DEL_SOPT_HANDLER(last, scodes);
ipfw_del_eaction(ch, V_nat64stl_eid);
/*
* Since we already have deregistered external action,
* our named objects become unaccessible via rules, because
* all rules were truncated by ipfw_del_eaction().
* So, we can unlink and destroy our named objects without holding
* IPFW_WLOCK().
*/
IPFW_UH_WLOCK(ch);
ipfw_objhash_foreach_type(CHAIN_TO_SRV(ch), destroy_config_cb, ch,
IPFW_TLV_NAT64STL_NAME);
V_nat64stl_eid = 0;
IPFW_UH_WUNLOCK(ch);
}

View File

@@ -1,101 +0,0 @@
#include <machine/rtems-bsd-kernel-space.h>
/*-
* Copyright (c) 2016 Yandex LLC
* Copyright (c) 2016 Andrey V. Elsukov <ae@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/rwlock.h>
#include <sys/socket.h>
#include <net/if.h>
#include <net/vnet.h>
#include <netinet/in.h>
#include <netinet/ip_var.h>
#include <netinet/ip_fw.h>
#include <netpfil/ipfw/ip_fw_private.h>
#include <netpfil/ipfw/nptv6/nptv6.h>
static int
vnet_ipfw_nptv6_init(const void *arg __unused)
{
return (nptv6_init(&V_layer3_chain, IS_DEFAULT_VNET(curvnet)));
}
static int
vnet_ipfw_nptv6_uninit(const void *arg __unused)
{
nptv6_uninit(&V_layer3_chain, IS_DEFAULT_VNET(curvnet));
return (0);
}
static int
ipfw_nptv6_modevent(module_t mod, int type, void *unused)
{
switch (type) {
case MOD_LOAD:
case MOD_UNLOAD:
break;
default:
return (EOPNOTSUPP);
}
return (0);
}
static moduledata_t ipfw_nptv6_mod = {
"ipfw_nptv6",
ipfw_nptv6_modevent,
0
};
/* Define startup order. */
#define IPFW_NPTV6_SI_SUB_FIREWALL SI_SUB_PROTO_IFATTACHDOMAIN
#define IPFW_NPTV6_MODEVENT_ORDER (SI_ORDER_ANY - 128) /* after ipfw */
#define IPFW_NPTV6_MODULE_ORDER (IPFW_NPTV6_MODEVENT_ORDER + 1)
#define IPFW_NPTV6_VNET_ORDER (IPFW_NPTV6_MODEVENT_ORDER + 2)
DECLARE_MODULE(ipfw_nptv6, ipfw_nptv6_mod, IPFW_NPTV6_SI_SUB_FIREWALL,
IPFW_NPTV6_MODULE_ORDER);
MODULE_DEPEND(ipfw_nptv6, ipfw, 3, 3, 3);
MODULE_VERSION(ipfw_nptv6, 1);
VNET_SYSINIT(vnet_ipfw_nptv6_init, IPFW_NPTV6_SI_SUB_FIREWALL,
IPFW_NPTV6_VNET_ORDER, vnet_ipfw_nptv6_init, NULL);
VNET_SYSUNINIT(vnet_ipfw_nptv6_uninit, IPFW_NPTV6_SI_SUB_FIREWALL,
IPFW_NPTV6_VNET_ORDER, vnet_ipfw_nptv6_uninit, NULL);

View File

@@ -1,896 +0,0 @@
#include <machine/rtems-bsd-kernel-space.h>
/*-
* Copyright (c) 2016 Yandex LLC
* Copyright (c) 2016 Andrey V. Elsukov <ae@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/counter.h>
#include <sys/errno.h>
#include <sys/kernel.h>
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/module.h>
#include <sys/rmlock.h>
#include <sys/rwlock.h>
#include <sys/socket.h>
#include <sys/queue.h>
#include <sys/syslog.h>
#include <sys/sysctl.h>
#include <net/if.h>
#include <net/if_var.h>
#include <net/netisr.h>
#include <net/pfil.h>
#include <net/vnet.h>
#include <netinet/in.h>
#include <netinet/ip_var.h>
#include <netinet/ip_fw.h>
#include <netinet/ip6.h>
#include <netinet/icmp6.h>
#include <netinet6/in6_var.h>
#include <netinet6/ip6_var.h>
#include <netpfil/ipfw/ip_fw_private.h>
#include <netpfil/ipfw/nptv6/nptv6.h>
static VNET_DEFINE(uint16_t, nptv6_eid) = 0;
#define V_nptv6_eid VNET(nptv6_eid)
#define IPFW_TLV_NPTV6_NAME IPFW_TLV_EACTION_NAME(V_nptv6_eid)
static struct nptv6_cfg *nptv6_alloc_config(const char *name, uint8_t set);
static void nptv6_free_config(struct nptv6_cfg *cfg);
static struct nptv6_cfg *nptv6_find(struct namedobj_instance *ni,
const char *name, uint8_t set);
static int nptv6_rewrite_internal(struct nptv6_cfg *cfg, struct mbuf **mp,
int offset);
static int nptv6_rewrite_external(struct nptv6_cfg *cfg, struct mbuf **mp,
int offset);
#define NPTV6_LOOKUP(chain, cmd) \
(struct nptv6_cfg *)SRV_OBJECT((chain), (cmd)->arg1)
#ifndef IN6_MASK_ADDR
#define IN6_MASK_ADDR(a, m) do { \
(a)->s6_addr32[0] &= (m)->s6_addr32[0]; \
(a)->s6_addr32[1] &= (m)->s6_addr32[1]; \
(a)->s6_addr32[2] &= (m)->s6_addr32[2]; \
(a)->s6_addr32[3] &= (m)->s6_addr32[3]; \
} while (0)
#endif
#ifndef IN6_ARE_MASKED_ADDR_EQUAL
#define IN6_ARE_MASKED_ADDR_EQUAL(d, a, m) ( \
(((d)->s6_addr32[0] ^ (a)->s6_addr32[0]) & (m)->s6_addr32[0]) == 0 && \
(((d)->s6_addr32[1] ^ (a)->s6_addr32[1]) & (m)->s6_addr32[1]) == 0 && \
(((d)->s6_addr32[2] ^ (a)->s6_addr32[2]) & (m)->s6_addr32[2]) == 0 && \
(((d)->s6_addr32[3] ^ (a)->s6_addr32[3]) & (m)->s6_addr32[3]) == 0 )
#endif
#if 0
#define NPTV6_DEBUG(fmt, ...) do { \
printf("%s: " fmt "\n", __func__, ## __VA_ARGS__); \
} while (0)
#define NPTV6_IPDEBUG(fmt, ...) do { \
char _s[INET6_ADDRSTRLEN], _d[INET6_ADDRSTRLEN]; \
printf("%s: " fmt "\n", __func__, ## __VA_ARGS__); \
} while (0)
#else
#define NPTV6_DEBUG(fmt, ...)
#define NPTV6_IPDEBUG(fmt, ...)
#endif
static int
nptv6_getlasthdr(struct nptv6_cfg *cfg, struct mbuf *m, int *offset)
{
struct ip6_hdr *ip6;
struct ip6_hbh *hbh;
int proto, hlen;
hlen = (offset == NULL) ? 0: *offset;
if (m->m_len < hlen)
return (-1);
ip6 = mtodo(m, hlen);
hlen += sizeof(*ip6);
proto = ip6->ip6_nxt;
while (proto == IPPROTO_HOPOPTS || proto == IPPROTO_ROUTING ||
proto == IPPROTO_DSTOPTS) {
hbh = mtodo(m, hlen);
if (m->m_len < hlen)
return (-1);
proto = hbh->ip6h_nxt;
hlen += hbh->ip6h_len << 3;
}
if (offset != NULL)
*offset = hlen;
return (proto);
}
static int
nptv6_translate_icmpv6(struct nptv6_cfg *cfg, struct mbuf **mp, int offset)
{
struct icmp6_hdr *icmp6;
struct ip6_hdr *ip6;
struct mbuf *m;
m = *mp;
if (offset > m->m_len)
return (-1);
icmp6 = mtodo(m, offset);
NPTV6_DEBUG("ICMPv6 type %d", icmp6->icmp6_type);
switch (icmp6->icmp6_type) {
case ICMP6_DST_UNREACH:
case ICMP6_PACKET_TOO_BIG:
case ICMP6_TIME_EXCEEDED:
case ICMP6_PARAM_PROB:
break;
case ICMP6_ECHO_REQUEST:
case ICMP6_ECHO_REPLY:
/* nothing to translate */
return (0);
default:
/*
* XXX: We can add some checks to not translate NDP and MLD
* messages. Currently user must explicitly allow these message
* types, otherwise packets will be dropped.
*/
return (-1);
}
offset += sizeof(*icmp6);
if (offset + sizeof(*ip6) > m->m_pkthdr.len)
return (-1);
if (offset + sizeof(*ip6) > m->m_len)
*mp = m = m_pullup(m, offset + sizeof(*ip6));
if (m == NULL)
return (-1);
ip6 = mtodo(m, offset);
NPTV6_IPDEBUG("offset %d, %s -> %s %d", offset,
inet_ntop(AF_INET6, &ip6->ip6_src, _s, sizeof(_s)),
inet_ntop(AF_INET6, &ip6->ip6_dst, _d, sizeof(_d)),
ip6->ip6_nxt);
if (IN6_ARE_MASKED_ADDR_EQUAL(&ip6->ip6_src,
&cfg->external, &cfg->mask))
return (nptv6_rewrite_external(cfg, mp, offset));
else if (IN6_ARE_MASKED_ADDR_EQUAL(&ip6->ip6_dst,
&cfg->internal, &cfg->mask))
return (nptv6_rewrite_internal(cfg, mp, offset));
/*
* Addresses in the inner IPv6 header doesn't matched to
* our prefixes.
*/
return (-1);
}
static int
nptv6_search_index(struct nptv6_cfg *cfg, struct in6_addr *a)
{
int idx;
if (cfg->flags & NPTV6_48PLEN)
return (3);
/* Search suitable word index for adjustment */
for (idx = 4; idx < 8; idx++)
if (a->s6_addr16[idx] != 0xffff)
break;
/*
* RFC 6296 p3.7: If an NPTv6 Translator discovers a datagram with
* an IID of all-zeros while performing address mapping, that
* datagram MUST be dropped, and an ICMPv6 Parameter Problem error
* SHOULD be generated.
*/
if (idx == 8 ||
(a->s6_addr32[2] == 0 && a->s6_addr32[3] == 0))
return (-1);
return (idx);
}
static void
nptv6_copy_addr(struct in6_addr *src, struct in6_addr *dst,
struct in6_addr *mask)
{
int i;
for (i = 0; i < 8 && mask->s6_addr8[i] != 0; i++) {
dst->s6_addr8[i] &= ~mask->s6_addr8[i];
dst->s6_addr8[i] |= src->s6_addr8[i] & mask->s6_addr8[i];
}
}
static int
nptv6_rewrite_internal(struct nptv6_cfg *cfg, struct mbuf **mp, int offset)
{
struct in6_addr *addr;
struct ip6_hdr *ip6;
int idx, proto;
uint16_t adj;
ip6 = mtodo(*mp, offset);
NPTV6_IPDEBUG("offset %d, %s -> %s %d", offset,
inet_ntop(AF_INET6, &ip6->ip6_src, _s, sizeof(_s)),
inet_ntop(AF_INET6, &ip6->ip6_dst, _d, sizeof(_d)),
ip6->ip6_nxt);
if (offset == 0)
addr = &ip6->ip6_src;
else {
/*
* When we rewriting inner IPv6 header, we need to rewrite
* destination address back to external prefix. The datagram in
* the ICMPv6 payload should looks like it was send from
* external prefix.
*/
addr = &ip6->ip6_dst;
}
idx = nptv6_search_index(cfg, addr);
if (idx < 0) {
/*
* Do not send ICMPv6 error when offset isn't zero.
* This means we are rewriting inner IPv6 header in the
* ICMPv6 error message.
*/
if (offset == 0) {
icmp6_error2(*mp, ICMP6_DST_UNREACH,
ICMP6_DST_UNREACH_ADDR, 0, (*mp)->m_pkthdr.rcvif);
*mp = NULL;
}
return (IP_FW_DENY);
}
adj = addr->s6_addr16[idx];
nptv6_copy_addr(&cfg->external, addr, &cfg->mask);
adj = cksum_add(adj, cfg->adjustment);
if (adj == 0xffff)
adj = 0;
addr->s6_addr16[idx] = adj;
if (offset == 0) {
/*
* We may need to translate addresses in the inner IPv6
* header for ICMPv6 error messages.
*/
proto = nptv6_getlasthdr(cfg, *mp, &offset);
if (proto < 0 || (proto == IPPROTO_ICMPV6 &&
nptv6_translate_icmpv6(cfg, mp, offset) != 0))
return (IP_FW_DENY);
NPTV6STAT_INC(cfg, in2ex);
}
return (0);
}
static int
nptv6_rewrite_external(struct nptv6_cfg *cfg, struct mbuf **mp, int offset)
{
struct in6_addr *addr;
struct ip6_hdr *ip6;
int idx, proto;
uint16_t adj;
ip6 = mtodo(*mp, offset);
NPTV6_IPDEBUG("offset %d, %s -> %s %d", offset,
inet_ntop(AF_INET6, &ip6->ip6_src, _s, sizeof(_s)),
inet_ntop(AF_INET6, &ip6->ip6_dst, _d, sizeof(_d)),
ip6->ip6_nxt);
if (offset == 0)
addr = &ip6->ip6_dst;
else {
/*
* When we rewriting inner IPv6 header, we need to rewrite
* source address back to internal prefix. The datagram in
* the ICMPv6 payload should looks like it was send from
* internal prefix.
*/
addr = &ip6->ip6_src;
}
idx = nptv6_search_index(cfg, addr);
if (idx < 0) {
/*
* Do not send ICMPv6 error when offset isn't zero.
* This means we are rewriting inner IPv6 header in the
* ICMPv6 error message.
*/
if (offset == 0) {
icmp6_error2(*mp, ICMP6_DST_UNREACH,
ICMP6_DST_UNREACH_ADDR, 0, (*mp)->m_pkthdr.rcvif);
*mp = NULL;
}
return (IP_FW_DENY);
}
adj = addr->s6_addr16[idx];
nptv6_copy_addr(&cfg->internal, addr, &cfg->mask);
adj = cksum_add(adj, ~cfg->adjustment);
if (adj == 0xffff)
adj = 0;
addr->s6_addr16[idx] = adj;
if (offset == 0) {
/*
* We may need to translate addresses in the inner IPv6
* header for ICMPv6 error messages.
*/
proto = nptv6_getlasthdr(cfg, *mp, &offset);
if (proto < 0 || (proto == IPPROTO_ICMPV6 &&
nptv6_translate_icmpv6(cfg, mp, offset) != 0))
return (IP_FW_DENY);
NPTV6STAT_INC(cfg, ex2in);
}
return (0);
}
/*
* ipfw external action handler.
*/
static int
ipfw_nptv6(struct ip_fw_chain *chain, struct ip_fw_args *args,
ipfw_insn *cmd, int *done)
{
struct ip6_hdr *ip6;
struct nptv6_cfg *cfg;
ipfw_insn *icmd;
int ret;
*done = 0; /* try next rule if not matched */
ret = IP_FW_DENY;
icmd = cmd + 1;
if (cmd->opcode != O_EXTERNAL_ACTION ||
cmd->arg1 != V_nptv6_eid ||
icmd->opcode != O_EXTERNAL_INSTANCE ||
(cfg = NPTV6_LOOKUP(chain, icmd)) == NULL)
return (ret);
/*
* We need act as router, so when forwarding is disabled -
* do nothing.
*/
if (V_ip6_forwarding == 0 || args->f_id.addr_type != 6)
return (ret);
/*
* NOTE: we expect ipfw_chk() did m_pullup() up to upper level
* protocol's headers. Also we skip some checks, that ip6_input(),
* ip6_forward(), ip6_fastfwd() and ipfw_chk() already did.
*/
ip6 = mtod(args->m, struct ip6_hdr *);
NPTV6_IPDEBUG("eid %u, oid %u, %s -> %s %d",
cmd->arg1, icmd->arg1,
inet_ntop(AF_INET6, &ip6->ip6_src, _s, sizeof(_s)),
inet_ntop(AF_INET6, &ip6->ip6_dst, _d, sizeof(_d)),
ip6->ip6_nxt);
if (IN6_ARE_MASKED_ADDR_EQUAL(&ip6->ip6_src,
&cfg->internal, &cfg->mask)) {
/*
* XXX: Do not translate packets when both src and dst
* are from internal prefix.
*/
if (IN6_ARE_MASKED_ADDR_EQUAL(&ip6->ip6_dst,
&cfg->internal, &cfg->mask))
return (ret);
ret = nptv6_rewrite_internal(cfg, &args->m, 0);
} else if (IN6_ARE_MASKED_ADDR_EQUAL(&ip6->ip6_dst,
&cfg->external, &cfg->mask))
ret = nptv6_rewrite_external(cfg, &args->m, 0);
else
return (ret);
/*
* If address wasn't rewrited - free mbuf and terminate the search.
*/
if (ret != 0) {
if (args->m != NULL) {
m_freem(args->m);
args->m = NULL; /* mark mbuf as consumed */
}
NPTV6STAT_INC(cfg, dropped);
*done = 1;
} else {
/* Terminate the search if one_pass is set */
*done = V_fw_one_pass;
/* Update args->f_id when one_pass is off */
if (*done == 0) {
ip6 = mtod(args->m, struct ip6_hdr *);
args->f_id.src_ip6 = ip6->ip6_src;
args->f_id.dst_ip6 = ip6->ip6_dst;
}
}
return (ret);
}
static struct nptv6_cfg *
nptv6_alloc_config(const char *name, uint8_t set)
{
struct nptv6_cfg *cfg;
cfg = malloc(sizeof(struct nptv6_cfg), M_IPFW, M_WAITOK | M_ZERO);
COUNTER_ARRAY_ALLOC(cfg->stats, NPTV6STATS, M_WAITOK);
cfg->no.name = cfg->name;
cfg->no.etlv = IPFW_TLV_NPTV6_NAME;
cfg->no.set = set;
strlcpy(cfg->name, name, sizeof(cfg->name));
return (cfg);
}
static void
nptv6_free_config(struct nptv6_cfg *cfg)
{
COUNTER_ARRAY_FREE(cfg->stats, NPTV6STATS);
free(cfg, M_IPFW);
}
static void
nptv6_export_config(struct ip_fw_chain *ch, struct nptv6_cfg *cfg,
ipfw_nptv6_cfg *uc)
{
uc->internal = cfg->internal;
uc->external = cfg->external;
uc->plen = cfg->plen;
uc->flags = cfg->flags & NPTV6_FLAGSMASK;
uc->set = cfg->no.set;
strlcpy(uc->name, cfg->no.name, sizeof(uc->name));
}
struct nptv6_dump_arg {
struct ip_fw_chain *ch;
struct sockopt_data *sd;
};
static int
export_config_cb(struct namedobj_instance *ni, struct named_object *no,
void *arg)
{
struct nptv6_dump_arg *da = (struct nptv6_dump_arg *)arg;
ipfw_nptv6_cfg *uc;
uc = (ipfw_nptv6_cfg *)ipfw_get_sopt_space(da->sd, sizeof(*uc));
nptv6_export_config(da->ch, (struct nptv6_cfg *)no, uc);
return (0);
}
static struct nptv6_cfg *
nptv6_find(struct namedobj_instance *ni, const char *name, uint8_t set)
{
struct nptv6_cfg *cfg;
cfg = (struct nptv6_cfg *)ipfw_objhash_lookup_name_type(ni, set,
IPFW_TLV_NPTV6_NAME, name);
return (cfg);
}
static void
nptv6_calculate_adjustment(struct nptv6_cfg *cfg)
{
uint16_t i, e;
uint16_t *p;
/* Calculate checksum of internal prefix */
for (i = 0, p = (uint16_t *)&cfg->internal;
p < (uint16_t *)(&cfg->internal + 1); p++)
i = cksum_add(i, *p);
/* Calculate checksum of external prefix */
for (e = 0, p = (uint16_t *)&cfg->external;
p < (uint16_t *)(&cfg->external + 1); p++)
e = cksum_add(e, *p);
/* Adjustment value for Int->Ext direction */
cfg->adjustment = cksum_add(~e, i);
}
/*
* Creates new NPTv6 instance.
* Data layout (v0)(current):
* Request: [ ipfw_obj_lheader ipfw_nptv6_cfg ]
*
* Returns 0 on success
*/
static int
nptv6_create(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
struct sockopt_data *sd)
{
struct in6_addr mask;
ipfw_obj_lheader *olh;
ipfw_nptv6_cfg *uc;
struct namedobj_instance *ni;
struct nptv6_cfg *cfg;
if (sd->valsize != sizeof(*olh) + sizeof(*uc))
return (EINVAL);
olh = (ipfw_obj_lheader *)sd->kbuf;
uc = (ipfw_nptv6_cfg *)(olh + 1);
if (ipfw_check_object_name_generic(uc->name) != 0)
return (EINVAL);
if (uc->plen < 8 || uc->plen > 64 || uc->set >= IPFW_MAX_SETS)
return (EINVAL);
if (IN6_IS_ADDR_MULTICAST(&uc->internal) ||
IN6_IS_ADDR_MULTICAST(&uc->external) ||
IN6_IS_ADDR_UNSPECIFIED(&uc->internal) ||
IN6_IS_ADDR_UNSPECIFIED(&uc->external) ||
IN6_IS_ADDR_LINKLOCAL(&uc->internal) ||
IN6_IS_ADDR_LINKLOCAL(&uc->external))
return (EINVAL);
in6_prefixlen2mask(&mask, uc->plen);
if (IN6_ARE_MASKED_ADDR_EQUAL(&uc->internal, &uc->external, &mask))
return (EINVAL);
ni = CHAIN_TO_SRV(ch);
IPFW_UH_RLOCK(ch);
if (nptv6_find(ni, uc->name, uc->set) != NULL) {
IPFW_UH_RUNLOCK(ch);
return (EEXIST);
}
IPFW_UH_RUNLOCK(ch);
cfg = nptv6_alloc_config(uc->name, uc->set);
cfg->plen = uc->plen;
if (cfg->plen <= 48)
cfg->flags |= NPTV6_48PLEN;
cfg->internal = uc->internal;
cfg->external = uc->external;
cfg->mask = mask;
IN6_MASK_ADDR(&cfg->internal, &mask);
IN6_MASK_ADDR(&cfg->external, &mask);
nptv6_calculate_adjustment(cfg);
IPFW_UH_WLOCK(ch);
if (ipfw_objhash_alloc_idx(ni, &cfg->no.kidx) != 0) {
IPFW_UH_WUNLOCK(ch);
nptv6_free_config(cfg);
return (ENOSPC);
}
ipfw_objhash_add(ni, &cfg->no);
IPFW_WLOCK(ch);
SRV_OBJECT(ch, cfg->no.kidx) = cfg;
IPFW_WUNLOCK(ch);
IPFW_UH_WUNLOCK(ch);
return (0);
}
/*
* Destroys NPTv6 instance.
* Data layout (v0)(current):
* Request: [ ipfw_obj_header ]
*
* Returns 0 on success
*/
static int
nptv6_destroy(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
struct sockopt_data *sd)
{
ipfw_obj_header *oh;
struct nptv6_cfg *cfg;
if (sd->valsize != sizeof(*oh))
return (EINVAL);
oh = (ipfw_obj_header *)sd->kbuf;
if (ipfw_check_object_name_generic(oh->ntlv.name) != 0)
return (EINVAL);
IPFW_UH_WLOCK(ch);
cfg = nptv6_find(CHAIN_TO_SRV(ch), oh->ntlv.name, oh->ntlv.set);
if (cfg == NULL) {
IPFW_UH_WUNLOCK(ch);
return (ESRCH);
}
if (cfg->no.refcnt > 0) {
IPFW_UH_WUNLOCK(ch);
return (EBUSY);
}
IPFW_WLOCK(ch);
SRV_OBJECT(ch, cfg->no.kidx) = NULL;
IPFW_WUNLOCK(ch);
ipfw_objhash_del(CHAIN_TO_SRV(ch), &cfg->no);
ipfw_objhash_free_idx(CHAIN_TO_SRV(ch), cfg->no.kidx);
IPFW_UH_WUNLOCK(ch);
nptv6_free_config(cfg);
return (0);
}
/*
* Get or change nptv6 instance config.
* Request: [ ipfw_obj_header [ ipfw_nptv6_cfg ] ]
*/
static int
nptv6_config(struct ip_fw_chain *chain, ip_fw3_opheader *op,
struct sockopt_data *sd)
{
return (EOPNOTSUPP);
}
/*
* Lists all NPTv6 instances currently available in kernel.
* Data layout (v0)(current):
* Request: [ ipfw_obj_lheader ]
* Reply: [ ipfw_obj_lheader ipfw_nptv6_cfg x N ]
*
* Returns 0 on success
*/
static int
nptv6_list(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
struct sockopt_data *sd)
{
ipfw_obj_lheader *olh;
struct nptv6_dump_arg da;
/* Check minimum header size */
if (sd->valsize < sizeof(ipfw_obj_lheader))
return (EINVAL);
olh = (ipfw_obj_lheader *)ipfw_get_sopt_header(sd, sizeof(*olh));
IPFW_UH_RLOCK(ch);
olh->count = ipfw_objhash_count_type(CHAIN_TO_SRV(ch),
IPFW_TLV_NPTV6_NAME);
olh->objsize = sizeof(ipfw_nptv6_cfg);
olh->size = sizeof(*olh) + olh->count * olh->objsize;
if (sd->valsize < olh->size) {
IPFW_UH_RUNLOCK(ch);
return (ENOMEM);
}
memset(&da, 0, sizeof(da));
da.ch = ch;
da.sd = sd;
ipfw_objhash_foreach_type(CHAIN_TO_SRV(ch), export_config_cb,
&da, IPFW_TLV_NPTV6_NAME);
IPFW_UH_RUNLOCK(ch);
return (0);
}
#define __COPY_STAT_FIELD(_cfg, _stats, _field) \
(_stats)->_field = NPTV6STAT_FETCH(_cfg, _field)
static void
export_stats(struct ip_fw_chain *ch, struct nptv6_cfg *cfg,
struct ipfw_nptv6_stats *stats)
{
__COPY_STAT_FIELD(cfg, stats, in2ex);
__COPY_STAT_FIELD(cfg, stats, ex2in);
__COPY_STAT_FIELD(cfg, stats, dropped);
}
/*
* Get NPTv6 statistics.
* Data layout (v0)(current):
* Request: [ ipfw_obj_header ]
* Reply: [ ipfw_obj_header ipfw_obj_ctlv [ uint64_t x N ]]
*
* Returns 0 on success
*/
static int
nptv6_stats(struct ip_fw_chain *ch, ip_fw3_opheader *op,
struct sockopt_data *sd)
{
struct ipfw_nptv6_stats stats;
struct nptv6_cfg *cfg;
ipfw_obj_header *oh;
ipfw_obj_ctlv *ctlv;
size_t sz;
sz = sizeof(ipfw_obj_header) + sizeof(ipfw_obj_ctlv) + sizeof(stats);
if (sd->valsize % sizeof(uint64_t))
return (EINVAL);
if (sd->valsize < sz)
return (ENOMEM);
oh = (ipfw_obj_header *)ipfw_get_sopt_header(sd, sz);
if (oh == NULL)
return (EINVAL);
if (ipfw_check_object_name_generic(oh->ntlv.name) != 0 ||
oh->ntlv.set >= IPFW_MAX_SETS)
return (EINVAL);
memset(&stats, 0, sizeof(stats));
IPFW_UH_RLOCK(ch);
cfg = nptv6_find(CHAIN_TO_SRV(ch), oh->ntlv.name, oh->ntlv.set);
if (cfg == NULL) {
IPFW_UH_RUNLOCK(ch);
return (ESRCH);
}
export_stats(ch, cfg, &stats);
IPFW_UH_RUNLOCK(ch);
ctlv = (ipfw_obj_ctlv *)(oh + 1);
memset(ctlv, 0, sizeof(*ctlv));
ctlv->head.type = IPFW_TLV_COUNTERS;
ctlv->head.length = sz - sizeof(ipfw_obj_header);
ctlv->count = sizeof(stats) / sizeof(uint64_t);
ctlv->objsize = sizeof(uint64_t);
ctlv->version = 1;
memcpy(ctlv + 1, &stats, sizeof(stats));
return (0);
}
/*
* Reset NPTv6 statistics.
* Data layout (v0)(current):
* Request: [ ipfw_obj_header ]
*
* Returns 0 on success
*/
static int
nptv6_reset_stats(struct ip_fw_chain *ch, ip_fw3_opheader *op,
struct sockopt_data *sd)
{
struct nptv6_cfg *cfg;
ipfw_obj_header *oh;
if (sd->valsize != sizeof(*oh))
return (EINVAL);
oh = (ipfw_obj_header *)sd->kbuf;
if (ipfw_check_object_name_generic(oh->ntlv.name) != 0 ||
oh->ntlv.set >= IPFW_MAX_SETS)
return (EINVAL);
IPFW_UH_WLOCK(ch);
cfg = nptv6_find(CHAIN_TO_SRV(ch), oh->ntlv.name, oh->ntlv.set);
if (cfg == NULL) {
IPFW_UH_WUNLOCK(ch);
return (ESRCH);
}
COUNTER_ARRAY_ZERO(cfg->stats, NPTV6STATS);
IPFW_UH_WUNLOCK(ch);
return (0);
}
static struct ipfw_sopt_handler scodes[] = {
{ IP_FW_NPTV6_CREATE, 0, HDIR_SET, nptv6_create },
{ IP_FW_NPTV6_DESTROY,0, HDIR_SET, nptv6_destroy },
{ IP_FW_NPTV6_CONFIG, 0, HDIR_BOTH, nptv6_config },
{ IP_FW_NPTV6_LIST, 0, HDIR_GET, nptv6_list },
{ IP_FW_NPTV6_STATS, 0, HDIR_GET, nptv6_stats },
{ IP_FW_NPTV6_RESET_STATS,0, HDIR_SET, nptv6_reset_stats },
};
static int
nptv6_classify(ipfw_insn *cmd, uint16_t *puidx, uint8_t *ptype)
{
ipfw_insn *icmd;
icmd = cmd - 1;
NPTV6_DEBUG("opcode %d, arg1 %d, opcode0 %d, arg1 %d",
cmd->opcode, cmd->arg1, icmd->opcode, icmd->arg1);
if (icmd->opcode != O_EXTERNAL_ACTION ||
icmd->arg1 != V_nptv6_eid)
return (1);
*puidx = cmd->arg1;
*ptype = 0;
return (0);
}
static void
nptv6_update_arg1(ipfw_insn *cmd, uint16_t idx)
{
cmd->arg1 = idx;
NPTV6_DEBUG("opcode %d, arg1 -> %d", cmd->opcode, cmd->arg1);
}
static int
nptv6_findbyname(struct ip_fw_chain *ch, struct tid_info *ti,
struct named_object **pno)
{
int err;
err = ipfw_objhash_find_type(CHAIN_TO_SRV(ch), ti,
IPFW_TLV_NPTV6_NAME, pno);
NPTV6_DEBUG("uidx %u, type %u, err %d", ti->uidx, ti->type, err);
return (err);
}
static struct named_object *
nptv6_findbykidx(struct ip_fw_chain *ch, uint16_t idx)
{
struct namedobj_instance *ni;
struct named_object *no;
IPFW_UH_WLOCK_ASSERT(ch);
ni = CHAIN_TO_SRV(ch);
no = ipfw_objhash_lookup_kidx(ni, idx);
KASSERT(no != NULL, ("NPT with index %d not found", idx));
NPTV6_DEBUG("kidx %u -> %s", idx, no->name);
return (no);
}
static int
nptv6_manage_sets(struct ip_fw_chain *ch, uint16_t set, uint8_t new_set,
enum ipfw_sets_cmd cmd)
{
return (ipfw_obj_manage_sets(CHAIN_TO_SRV(ch), IPFW_TLV_NPTV6_NAME,
set, new_set, cmd));
}
static struct opcode_obj_rewrite opcodes[] = {
{
.opcode = O_EXTERNAL_INSTANCE,
.etlv = IPFW_TLV_EACTION /* just show it isn't table */,
.classifier = nptv6_classify,
.update = nptv6_update_arg1,
.find_byname = nptv6_findbyname,
.find_bykidx = nptv6_findbykidx,
.manage_sets = nptv6_manage_sets,
},
};
static int
destroy_config_cb(struct namedobj_instance *ni, struct named_object *no,
void *arg)
{
struct nptv6_cfg *cfg;
struct ip_fw_chain *ch;
ch = (struct ip_fw_chain *)arg;
IPFW_UH_WLOCK_ASSERT(ch);
cfg = (struct nptv6_cfg *)SRV_OBJECT(ch, no->kidx);
SRV_OBJECT(ch, no->kidx) = NULL;
ipfw_objhash_del(ni, &cfg->no);
ipfw_objhash_free_idx(ni, cfg->no.kidx);
nptv6_free_config(cfg);
return (0);
}
int
nptv6_init(struct ip_fw_chain *ch, int first)
{
V_nptv6_eid = ipfw_add_eaction(ch, ipfw_nptv6, "nptv6");
if (V_nptv6_eid == 0)
return (ENXIO);
IPFW_ADD_SOPT_HANDLER(first, scodes);
IPFW_ADD_OBJ_REWRITER(first, opcodes);
return (0);
}
void
nptv6_uninit(struct ip_fw_chain *ch, int last)
{
IPFW_DEL_OBJ_REWRITER(last, opcodes);
IPFW_DEL_SOPT_HANDLER(last, scodes);
ipfw_del_eaction(ch, V_nptv6_eid);
/*
* Since we already have deregistered external action,
* our named objects become unaccessible via rules, because
* all rules were truncated by ipfw_del_eaction().
* So, we can unlink and destroy our named objects without holding
* IPFW_WLOCK().
*/
IPFW_UH_WLOCK(ch);
ipfw_objhash_foreach_type(CHAIN_TO_SRV(ch), destroy_config_cb, ch,
IPFW_TLV_NPTV6_NAME);
V_nptv6_eid = 0;
IPFW_UH_WUNLOCK(ch);
}

View File

@@ -1,65 +0,0 @@
/*-
* Copyright (c) 2016 Yandex LLC
* Copyright (c) 2016 Andrey V. Elsukov <ae@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _IP_FW_NPTV6_H_
#define _IP_FW_NPTV6_H_
#include <netinet6/ip_fw_nptv6.h>
#ifdef _KERNEL
#define NPTV6STATS (sizeof(struct ipfw_nptv6_stats) / sizeof(uint64_t))
#define NPTV6STAT_ADD(c, f, v) \
counter_u64_add((c)->stats[ \
offsetof(struct ipfw_nptv6_stats, f) / sizeof(uint64_t)], (v))
#define NPTV6STAT_INC(c, f) NPTV6STAT_ADD(c, f, 1)
#define NPTV6STAT_FETCH(c, f) \
counter_u64_fetch((c)->stats[ \
offsetof(struct ipfw_nptv6_stats, f) / sizeof(uint64_t)])
struct nptv6_cfg {
struct named_object no;
struct in6_addr internal; /* Internal IPv6 prefix */
struct in6_addr external; /* External IPv6 prefix */
struct in6_addr mask; /* IPv6 prefix mask */
uint16_t adjustment; /* Checksum adjustment value */
uint8_t plen; /* Prefix length */
uint8_t flags; /* Flags for internal use */
#define NPTV6_48PLEN 0x0001
char name[64]; /* Instance name */
counter_u64_t stats[NPTV6STATS]; /* Statistics counters */
};
#define NPTV6_FLAGSMASK 0
int nptv6_init(struct ip_fw_chain *ch, int first);
void nptv6_uninit(struct ip_fw_chain *ch, int last);
#endif /* _KERNEL */
#endif /* _IP_FW_NPTV6_H_ */

View File

@@ -2440,6 +2440,7 @@ class pf(builder.Module):
'sys/net/if_pflog.h', 'sys/net/if_pflog.h',
'sys/net/if_pfsync.h', 'sys/net/if_pfsync.h',
'sys/net/pfvar.h', 'sys/net/pfvar.h',
'sys/netpfil/ipfw/ip_fw_private.h',
'sys/netpfil/pf/pf_altq.h', 'sys/netpfil/pf/pf_altq.h',
'sys/netpfil/pf/pf.h', 'sys/netpfil/pf/pf.h',
'sys/netpfil/pf/pf_mtag.h', 'sys/netpfil/pf/pf_mtag.h',
@@ -2462,60 +2463,6 @@ class pf(builder.Module):
mm.generator['source']() mm.generator['source']()
) )
class ipfw(builder.Module):
def __init__(self, manager):
super(ipfw, self).__init__(manager, type(self).__name__)
def generate(self):
mm = self.manager
self.addKernelSpaceHeaderFiles(
[
'sys/netinet6/ip_fw_nat64.h',
'sys/netinet6/ip_fw_nptv6.h',
'sys/netpfil/ipfw/dn_aqm_codel.h',
'sys/netpfil/ipfw/dn_aqm.h',
'sys/netpfil/ipfw/dn_aqm_pie.h',
'sys/netpfil/ipfw/dn_heap.h',
'sys/netpfil/ipfw/dn_sched_fq_codel.h',
'sys/netpfil/ipfw/dn_sched_fq_codel_helper.h',
'sys/netpfil/ipfw/dn_sched.h',
'sys/netpfil/ipfw/ip_dn_private.h',
'sys/netpfil/ipfw/ip_fw_private.h',
'sys/netpfil/ipfw/ip_fw_table.h',
'sys/netpfil/ipfw/nat64/ip_fw_nat64.h',
'sys/netpfil/ipfw/nat64/nat64lsn.h',
'sys/netpfil/ipfw/nat64/nat64stl.h',
'sys/netpfil/ipfw/nat64/nat64_translate.h',
'sys/netpfil/ipfw/nptv6/nptv6.h',
]
)
self.addKernelSpaceSourceFiles(
[
'sys/netpfil/ipfw/ip_fw2.c',
'sys/netpfil/ipfw/ip_fw_bpf.c',
'sys/netpfil/ipfw/ip_fw_dynamic.c',
'sys/netpfil/ipfw/ip_fw_eaction.c',
'sys/netpfil/ipfw/ip_fw_iface.c',
'sys/netpfil/ipfw/ip_fw_log.c',
'sys/netpfil/ipfw/ip_fw_nat.c',
'sys/netpfil/ipfw/ip_fw_pfil.c',
'sys/netpfil/ipfw/ip_fw_sockopt.c',
'sys/netpfil/ipfw/ip_fw_table_algo.c',
'sys/netpfil/ipfw/ip_fw_table.c',
'sys/netpfil/ipfw/ip_fw_table_value.c',
'sys/netpfil/ipfw/nat64/ip_fw_nat64.c',
'sys/netpfil/ipfw/nat64/nat64lsn.c',
'sys/netpfil/ipfw/nat64/nat64lsn_control.c',
'sys/netpfil/ipfw/nat64/nat64stl.c',
'sys/netpfil/ipfw/nat64/nat64stl_control.c',
'sys/netpfil/ipfw/nat64/nat64_translate.c',
'sys/netpfil/ipfw/nptv6/ip_fw_nptv6.c',
'sys/netpfil/ipfw/nptv6/nptv6.c',
],
mm.generator['source']()
)
# #
# PCI # PCI
# #
@@ -4966,7 +4913,6 @@ def load(mm):
mm.addModule(crypto(mm)) mm.addModule(crypto(mm))
mm.addModule(altq(mm)) mm.addModule(altq(mm))
mm.addModule(pf(mm)) mm.addModule(pf(mm))
mm.addModule(ipfw(mm))
mm.addModule(dev_net(mm)) mm.addModule(dev_net(mm))
# Add PCI # Add PCI

View File

@@ -10,13 +10,8 @@
#define AddFragmentPtrLink _bsd_AddFragmentPtrLink #define AddFragmentPtrLink _bsd_AddFragmentPtrLink
#define AddLink _bsd_AddLink #define AddLink _bsd_AddLink
#define AddPptp _bsd_AddPptp #define AddPptp _bsd_AddPptp
#define addr_hash _bsd_addr_hash
#define addr_kfib _bsd_addr_kfib
#define addr_radix _bsd_addr_radix
#define addrsel_policy_init _bsd_addrsel_policy_init #define addrsel_policy_init _bsd_addrsel_policy_init
#define AddSeq _bsd_AddSeq #define AddSeq _bsd_AddSeq
#define add_table_entry _bsd_add_table_entry
#define add_toperation_state _bsd_add_toperation_state
#define AES_GMAC_Final _bsd_AES_GMAC_Final #define AES_GMAC_Final _bsd_AES_GMAC_Final
#define AES_GMAC_Init _bsd_AES_GMAC_Init #define AES_GMAC_Init _bsd_AES_GMAC_Init
#define AES_GMAC_Reinit _bsd_AES_GMAC_Reinit #define AES_GMAC_Reinit _bsd_AES_GMAC_Reinit
@@ -89,7 +84,6 @@
#define auth_hash_nist_gmac_aes_192 _bsd_auth_hash_nist_gmac_aes_192 #define auth_hash_nist_gmac_aes_192 _bsd_auth_hash_nist_gmac_aes_192
#define auth_hash_nist_gmac_aes_256 _bsd_auth_hash_nist_gmac_aes_256 #define auth_hash_nist_gmac_aes_256 _bsd_auth_hash_nist_gmac_aes_256
#define auth_hash_null _bsd_auth_hash_null #define auth_hash_null _bsd_auth_hash_null
#define autoinc_step _bsd_autoinc_step
#define badport_bandlim _bsd_badport_bandlim #define badport_bandlim _bsd_badport_bandlim
#define bcd2bin_data _bsd_bcd2bin_data #define bcd2bin_data _bsd_bcd2bin_data
#define bce_COM_b06FwBss _bsd_bce_COM_b06FwBss #define bce_COM_b06FwBss _bsd_bce_COM_b06FwBss
@@ -543,15 +537,12 @@
#define cc_register_algo _bsd_cc_register_algo #define cc_register_algo _bsd_cc_register_algo
#define cdevpriv_mtx _bsd_cdevpriv_mtx #define cdevpriv_mtx _bsd_cdevpriv_mtx
#define cgem_set_ref_clk _bsd_cgem_set_ref_clk #define cgem_set_ref_clk _bsd_cgem_set_ref_clk
#define classify_opcode_kidx _bsd_classify_opcode_kidx
#define clean_unrhdr _bsd_clean_unrhdr #define clean_unrhdr _bsd_clean_unrhdr
#define clean_unrhdrl _bsd_clean_unrhdrl #define clean_unrhdrl _bsd_clean_unrhdrl
#define ClearCheckNewLink _bsd_ClearCheckNewLink #define ClearCheckNewLink _bsd_ClearCheckNewLink
#define clk_intr_event _bsd_clk_intr_event #define clk_intr_event _bsd_clk_intr_event
#define comp_algo_deflate _bsd_comp_algo_deflate #define comp_algo_deflate _bsd_comp_algo_deflate
#define comp_algorithm_lookup _bsd_comp_algorithm_lookup #define comp_algorithm_lookup _bsd_comp_algorithm_lookup
#define compare_ifidx _bsd_compare_ifidx
#define compare_numarray _bsd_compare_numarray
#define config_intrhook_disestablish _bsd_config_intrhook_disestablish #define config_intrhook_disestablish _bsd_config_intrhook_disestablish
#define config_intrhook_establish _bsd_config_intrhook_establish #define config_intrhook_establish _bsd_config_intrhook_establish
#define copyiniov _bsd_copyiniov #define copyiniov _bsd_copyiniov
@@ -561,7 +552,6 @@
#define counter_u64_free _bsd_counter_u64_free #define counter_u64_free _bsd_counter_u64_free
#define counter_u64_zero _bsd_counter_u64_zero #define counter_u64_zero _bsd_counter_u64_zero
#define crc32_tab _bsd_crc32_tab #define crc32_tab _bsd_crc32_tab
#define create_objects_compat _bsd_create_objects_compat
#define crypto_apply _bsd_crypto_apply #define crypto_apply _bsd_crypto_apply
#define crypto_copyback _bsd_crypto_copyback #define crypto_copyback _bsd_crypto_copyback
#define crypto_copydata _bsd_crypto_copydata #define crypto_copydata _bsd_crypto_copydata
@@ -588,8 +578,6 @@
#define crypto_unregister _bsd_crypto_unregister #define crypto_unregister _bsd_crypto_unregister
#define crypto_unregister_all _bsd_crypto_unregister_all #define crypto_unregister_all _bsd_crypto_unregister_all
#define crypto_userasymcrypto _bsd_crypto_userasymcrypto #define crypto_userasymcrypto _bsd_crypto_userasymcrypto
#define ctl3_lock _bsd_ctl3_lock
#define ctl3_rewriters _bsd_ctl3_rewriters
#define ctl_subtype_name _bsd_ctl_subtype_name #define ctl_subtype_name _bsd_ctl_subtype_name
#define cuio_apply _bsd_cuio_apply #define cuio_apply _bsd_cuio_apply
#define cuio_copyback _bsd_cuio_copyback #define cuio_copyback _bsd_cuio_copyback
@@ -605,7 +593,6 @@
#define _cv_wait_unlock _bsd__cv_wait_unlock #define _cv_wait_unlock _bsd__cv_wait_unlock
#define deembed_scopeid _bsd_deembed_scopeid #define deembed_scopeid _bsd_deembed_scopeid
#define default_cc_ptr _bsd_default_cc_ptr #define default_cc_ptr _bsd_default_cc_ptr
#define default_eaction_typename _bsd_default_eaction_typename
#define deflate_global _bsd_deflate_global #define deflate_global _bsd_deflate_global
#define defrouter_del _bsd_defrouter_del #define defrouter_del _bsd_defrouter_del
#define defrouter_lookup _bsd_defrouter_lookup #define defrouter_lookup _bsd_defrouter_lookup
@@ -620,8 +607,6 @@
#define DELAY _bsd_DELAY #define DELAY _bsd_DELAY
#define delete_unrhdr _bsd_delete_unrhdr #define delete_unrhdr _bsd_delete_unrhdr
#define delist_dev _bsd_delist_dev #define delist_dev _bsd_delist_dev
#define del_table_entry _bsd_del_table_entry
#define del_toperation_state _bsd_del_toperation_state
#define deregister_tcp_functions _bsd_deregister_tcp_functions #define deregister_tcp_functions _bsd_deregister_tcp_functions
#define des_check_key _bsd_des_check_key #define des_check_key _bsd_des_check_key
#define des_check_key_parity _bsd_des_check_key_parity #define des_check_key_parity _bsd_des_check_key_parity
@@ -743,7 +728,6 @@
#define drbr_inuse_drv _bsd_drbr_inuse_drv #define drbr_inuse_drv _bsd_drbr_inuse_drv
#define drbr_needs_enqueue_drv _bsd_drbr_needs_enqueue_drv #define drbr_needs_enqueue_drv _bsd_drbr_needs_enqueue_drv
#define driver_module_handler _bsd_driver_module_handler #define driver_module_handler _bsd_driver_module_handler
#define dummy_def _bsd_dummy_def
#define dwc_driver _bsd_dwc_driver #define dwc_driver _bsd_dwc_driver
#define dwc_otg_attach _bsd_dwc_otg_attach #define dwc_otg_attach _bsd_dwc_otg_attach
#define dwc_otg_detach _bsd_dwc_otg_detach #define dwc_otg_detach _bsd_dwc_otg_detach
@@ -1236,17 +1220,11 @@
#define firmware_register _bsd_firmware_register #define firmware_register _bsd_firmware_register
#define firmware_unregister _bsd_firmware_unregister #define firmware_unregister _bsd_firmware_unregister
#define first_handler _bsd_first_handler #define first_handler _bsd_first_handler
#define flow_hash _bsd_flow_hash
#define flush_table _bsd_flush_table
#define frag6_drain _bsd_frag6_drain #define frag6_drain _bsd_frag6_drain
#define frag6_init _bsd_frag6_init #define frag6_init _bsd_frag6_init
#define frag6_input _bsd_frag6_input #define frag6_input _bsd_frag6_input
#define frag6_slowtimo _bsd_frag6_slowtimo #define frag6_slowtimo _bsd_frag6_slowtimo
#define free_unr _bsd_free_unr #define free_unr _bsd_free_unr
#define fw_one_pass _bsd_fw_one_pass
#define fw_tables_max _bsd_fw_tables_max
#define fw_tables_sets _bsd_fw_tables_sets
#define fw_verbose _bsd_fw_verbose
#define genkbd_commonioctl _bsd_genkbd_commonioctl #define genkbd_commonioctl _bsd_genkbd_commonioctl
#define genkbd_diag _bsd_genkbd_diag #define genkbd_diag _bsd_genkbd_diag
#define genkbd_get_fkeystr _bsd_genkbd_get_fkeystr #define genkbd_get_fkeystr _bsd_genkbd_get_fkeystr
@@ -1774,7 +1752,6 @@
#define ieee80211_wme_updateparams_locked _bsd_ieee80211_wme_updateparams_locked #define ieee80211_wme_updateparams_locked _bsd_ieee80211_wme_updateparams_locked
#define ifa_add_loopback_route _bsd_ifa_add_loopback_route #define ifa_add_loopback_route _bsd_ifa_add_loopback_route
#define ifa_alloc _bsd_ifa_alloc #define ifa_alloc _bsd_ifa_alloc
#define iface_idx _bsd_iface_idx
#define if_addgroup _bsd_if_addgroup #define if_addgroup _bsd_if_addgroup
#define if_addmulti _bsd_if_addmulti #define if_addmulti _bsd_if_addmulti
#define ifaddr_byindex _bsd_ifaddr_byindex #define ifaddr_byindex _bsd_ifaddr_byindex
@@ -2259,122 +2236,8 @@
#define ip_forward _bsd_ip_forward #define ip_forward _bsd_ip_forward
#define ipforwarding _bsd_ipforwarding #define ipforwarding _bsd_ipforwarding
#define ip_fragment _bsd_ip_fragment #define ip_fragment _bsd_ip_fragment
#define ipfw_add_eaction _bsd_ipfw_add_eaction
#define ipfw_add_obj_rewriter _bsd_ipfw_add_obj_rewriter
#define ipfw_add_sopt_handler _bsd_ipfw_add_sopt_handler
#define ipfw_add_table_algo _bsd_ipfw_add_table_algo
#define ipfw_alloc_rule _bsd_ipfw_alloc_rule
#define ipfw_attach_hooks _bsd_ipfw_attach_hooks
#define ipfw_bpf_init _bsd_ipfw_bpf_init
#define ipfw_bpf_mtap2 _bsd_ipfw_bpf_mtap2
#define ipfw_bpf_uninit _bsd_ipfw_bpf_uninit
#define ipfw_check_frame _bsd_ipfw_check_frame
#define ipfw_check_object_name_generic _bsd_ipfw_check_object_name_generic
#define ipfw_check_packet _bsd_ipfw_check_packet
#define ipfw_chg_hook _bsd_ipfw_chg_hook
#define ipfw_chk _bsd_ipfw_chk
#define ip_fw_chk_ptr _bsd_ip_fw_chk_ptr #define ip_fw_chk_ptr _bsd_ip_fw_chk_ptr
#define ipfw_count_table _bsd_ipfw_count_table
#define ipfw_count_xtable _bsd_ipfw_count_xtable
#define ipfw_ctl3 _bsd_ipfw_ctl3
#define ip_fw_ctl_ptr _bsd_ip_fw_ctl_ptr #define ip_fw_ctl_ptr _bsd_ip_fw_ctl_ptr
#define ipfw_del_eaction _bsd_ipfw_del_eaction
#define ipfw_del_obj_rewriter _bsd_ipfw_del_obj_rewriter
#define ipfw_del_sopt_handler _bsd_ipfw_del_sopt_handler
#define ipfw_del_table_algo _bsd_ipfw_del_table_algo
#define ipfw_destroy_counters _bsd_ipfw_destroy_counters
#define ipfw_destroy_obj_rewriter _bsd_ipfw_destroy_obj_rewriter
#define ipfw_destroy_skipto_cache _bsd_ipfw_destroy_skipto_cache
#define ipfw_destroy_sopt_handler _bsd_ipfw_destroy_sopt_handler
#define ipfw_destroy_srv _bsd_ipfw_destroy_srv
#define ipfw_destroy_tables _bsd_ipfw_destroy_tables
#define ipfw_dump_states _bsd_ipfw_dump_states
#define ipfw_dump_table_legacy _bsd_ipfw_dump_table_legacy
#define ipfw_dyn_get_count _bsd_ipfw_dyn_get_count
#define ipfw_dyn_init _bsd_ipfw_dyn_init
#define ipfw_dyn_len _bsd_ipfw_dyn_len
#define ipfw_dyn_uninit _bsd_ipfw_dyn_uninit
#define ipfw_dyn_unlock _bsd_ipfw_dyn_unlock
#define ipfw_eaction_init _bsd_ipfw_eaction_init
#define ipfw_eaction_uninit _bsd_ipfw_eaction_uninit
#define ipfw_expire_dyn_rules _bsd_ipfw_expire_dyn_rules
#define ipfw_export_obj_ntlv _bsd_ipfw_export_obj_ntlv
#define ipfw_export_table_ntlv _bsd_ipfw_export_table_ntlv
#define ipfw_export_table_value_legacy _bsd_ipfw_export_table_value_legacy
#define ipfw_export_table_value_v1 _bsd_ipfw_export_table_value_v1
#define ipfw_find_name_tlv_type _bsd_ipfw_find_name_tlv_type
#define ipfw_find_rule _bsd_ipfw_find_rule
#define ipfw_foreach_table_tentry _bsd_ipfw_foreach_table_tentry
#define ipfw_garbage_table_values _bsd_ipfw_garbage_table_values
#define ipfw_get_dynamic _bsd_ipfw_get_dynamic
#define ipfw_get_sopt_header _bsd_ipfw_get_sopt_header
#define ipfw_get_sopt_space _bsd_ipfw_get_sopt_space
#define ipfw_get_table_objhash _bsd_ipfw_get_table_objhash
#define ipfw_iface_add_notify _bsd_ipfw_iface_add_notify
#define ipfw_iface_del_notify _bsd_ipfw_iface_del_notify
#define ipfw_iface_destroy _bsd_ipfw_iface_destroy
#define ipfw_iface_init _bsd_ipfw_iface_init
#define ipfw_iface_ref _bsd_ipfw_iface_ref
#define ipfw_iface_unref _bsd_ipfw_iface_unref
#define ipfw_import_table_value_legacy _bsd_ipfw_import_table_value_legacy
#define ipfw_import_table_value_v1 _bsd_ipfw_import_table_value_v1
#define ipfw_init_counters _bsd_ipfw_init_counters
#define ipfw_init_obj_rewriter _bsd_ipfw_init_obj_rewriter
#define ipfw_init_skipto_cache _bsd_ipfw_init_skipto_cache
#define ipfw_init_sopt_handler _bsd_ipfw_init_sopt_handler
#define ipfw_init_srv _bsd_ipfw_init_srv
#define ipfw_init_tables _bsd_ipfw_init_tables
#define ipfw_install_state _bsd_ipfw_install_state
#define ipfw_is_dyn_rule _bsd_ipfw_is_dyn_rule
#define ipfw_link_table_values _bsd_ipfw_link_table_values
#define ipfw_log _bsd_ipfw_log
#define ipfw_lookup_dyn_rule _bsd_ipfw_lookup_dyn_rule
#define ipfw_lookup_table _bsd_ipfw_lookup_table
#define ipfw_match_range _bsd_ipfw_match_range
#define ipfw_nat64lsn _bsd_ipfw_nat64lsn
#define ipfw_nat64stl _bsd_ipfw_nat64stl
#define ipfw_nat_cfg_ptr _bsd_ipfw_nat_cfg_ptr
#define ipfw_nat_del_ptr _bsd_ipfw_nat_del_ptr
#define ipfw_nat_get_cfg_ptr _bsd_ipfw_nat_get_cfg_ptr
#define ipfw_nat_get_log_ptr _bsd_ipfw_nat_get_log_ptr
#define ipfw_nat_ptr _bsd_ipfw_nat_ptr
#define ipfw_nat_ready _bsd_ipfw_nat_ready
#define ipfw_objhash_add _bsd_ipfw_objhash_add
#define ipfw_objhash_alloc_idx _bsd_ipfw_objhash_alloc_idx
#define ipfw_objhash_bitmap_alloc _bsd_ipfw_objhash_bitmap_alloc
#define ipfw_objhash_bitmap_free _bsd_ipfw_objhash_bitmap_free
#define ipfw_objhash_bitmap_merge _bsd_ipfw_objhash_bitmap_merge
#define ipfw_objhash_bitmap_swap _bsd_ipfw_objhash_bitmap_swap
#define ipfw_objhash_count _bsd_ipfw_objhash_count
#define ipfw_objhash_count_type _bsd_ipfw_objhash_count_type
#define ipfw_objhash_create _bsd_ipfw_objhash_create
#define ipfw_objhash_del _bsd_ipfw_objhash_del
#define ipfw_objhash_destroy _bsd_ipfw_objhash_destroy
#define ipfw_objhash_find_type _bsd_ipfw_objhash_find_type
#define ipfw_objhash_foreach _bsd_ipfw_objhash_foreach
#define ipfw_objhash_foreach_type _bsd_ipfw_objhash_foreach_type
#define ipfw_objhash_free_idx _bsd_ipfw_objhash_free_idx
#define ipfw_objhash_lookup_kidx _bsd_ipfw_objhash_lookup_kidx
#define ipfw_objhash_lookup_name _bsd_ipfw_objhash_lookup_name
#define ipfw_objhash_lookup_name_type _bsd_ipfw_objhash_lookup_name_type
#define ipfw_objhash_lookup_table_kidx _bsd_ipfw_objhash_lookup_table_kidx
#define ipfw_objhash_same_name _bsd_ipfw_objhash_same_name
#define ipfw_objhash_set_funcs _bsd_ipfw_objhash_set_funcs
#define ipfw_obj_manage_sets _bsd_ipfw_obj_manage_sets
#define ipfw_reap_add _bsd_ipfw_reap_add
#define ipfw_reap_rules _bsd_ipfw_reap_rules
#define ipfw_ref_table _bsd_ipfw_ref_table
#define ipfw_resize_tables _bsd_ipfw_resize_tables
#define ipfw_run_eaction _bsd_ipfw_run_eaction
#define ipfw_send_pkt _bsd_ipfw_send_pkt
#define ipfw_switch_tables_namespace _bsd_ipfw_switch_tables_namespace
#define ipfw_table_algo_destroy _bsd_ipfw_table_algo_destroy
#define ipfw_table_algo_init _bsd_ipfw_table_algo_init
#define ipfw_table_value_destroy _bsd_ipfw_table_value_destroy
#define ipfw_table_value_init _bsd_ipfw_table_value_init
#define ipfw_unref_table _bsd_ipfw_unref_table
#define ipfw_unref_table_values _bsd_ipfw_unref_table_values
#define ipfw_vnet_ready _bsd_ipfw_vnet_ready
#define ip_gre_ttl _bsd_ip_gre_ttl #define ip_gre_ttl _bsd_ip_gre_ttl
#define ip_init _bsd_ip_init #define ip_init _bsd_ip_init
#define ip_input _bsd_ip_input #define ip_input _bsd_ip_input
@@ -2627,7 +2490,6 @@
#define lagg_input_p _bsd_lagg_input_p #define lagg_input_p _bsd_lagg_input_p
#define lagg_linkstate_p _bsd_lagg_linkstate_p #define lagg_linkstate_p _bsd_lagg_linkstate_p
#define lagg_list _bsd_lagg_list #define lagg_list _bsd_lagg_list
#define layer3_chain _bsd_layer3_chain
#define led_create _bsd_led_create #define led_create _bsd_led_create
#define led_create_state _bsd_led_create_state #define led_create_state _bsd_led_create_state
#define led_destroy _bsd_led_destroy #define led_destroy _bsd_led_destroy
@@ -2712,7 +2574,6 @@
#define log _bsd_log #define log _bsd_log
#define loif _bsd_loif #define loif _bsd_loif
#define loioctl _bsd_loioctl #define loioctl _bsd_loioctl
#define lookup_nat_ptr _bsd_lookup_nat_ptr
#define looutput _bsd_looutput #define looutput _bsd_looutput
#define lpc_pwr_read _bsd_lpc_pwr_read #define lpc_pwr_read _bsd_lpc_pwr_read
#define lpc_pwr_write _bsd_lpc_pwr_write #define lpc_pwr_write _bsd_lpc_pwr_write
@@ -2809,8 +2670,6 @@
#define mii_tick _bsd_mii_tick #define mii_tick _bsd_mii_tick
#define M_IOV _bsd_M_IOV #define M_IOV _bsd_M_IOV
#define M_IP6NDP _bsd_M_IP6NDP #define M_IP6NDP _bsd_M_IP6NDP
#define M_IPFW _bsd_M_IPFW
#define M_IPFW_TBL _bsd_M_IPFW_TBL
#define M_IPSEC_INPCB _bsd_M_IPSEC_INPCB #define M_IPSEC_INPCB _bsd_M_IPSEC_INPCB
#define M_IPSEC_MISC _bsd_M_IPSEC_MISC #define M_IPSEC_MISC _bsd_M_IPSEC_MISC
#define M_IPSEC_SA _bsd_M_IPSEC_SA #define M_IPSEC_SA _bsd_M_IPSEC_SA
@@ -2841,7 +2700,6 @@
#define mmc_wait_for_cmd _bsd_mmc_wait_for_cmd #define mmc_wait_for_cmd _bsd_mmc_wait_for_cmd
#define m_megapullup _bsd_m_megapullup #define m_megapullup _bsd_m_megapullup
#define m_move_pkthdr _bsd_m_move_pkthdr #define m_move_pkthdr _bsd_m_move_pkthdr
#define M_NAT64LSN _bsd_M_NAT64LSN
#define module_lookupbyname _bsd_module_lookupbyname #define module_lookupbyname _bsd_module_lookupbyname
#define module_register _bsd_module_register #define module_register _bsd_module_register
#define module_register_init _bsd_module_register_init #define module_register_init _bsd_module_register_init
@@ -2905,25 +2763,6 @@
#define musbotg_vbus_interrupt _bsd_musbotg_vbus_interrupt #define musbotg_vbus_interrupt _bsd_musbotg_vbus_interrupt
#define mutex_init _bsd_mutex_init #define mutex_init _bsd_mutex_init
#define M_XDATA _bsd_M_XDATA #define M_XDATA _bsd_M_XDATA
#define nat64_allow_private _bsd_nat64_allow_private
#define nat64_debug _bsd_nat64_debug
#define nat64_do_handle_ip4 _bsd_nat64_do_handle_ip4
#define nat64_do_handle_ip6 _bsd_nat64_do_handle_ip6
#define nat64_getlasthdr _bsd_nat64_getlasthdr
#define nat64_handle_icmp6 _bsd_nat64_handle_icmp6
#define nat64lsn_destroy_instance _bsd_nat64lsn_destroy_instance
#define nat64lsn_dump_state _bsd_nat64lsn_dump_state
#define nat64lsn_eid _bsd_nat64lsn_eid
#define nat64lsn_init _bsd_nat64lsn_init
#define nat64lsn_init_instance _bsd_nat64lsn_init_instance
#define nat64lsn_init_internal _bsd_nat64lsn_init_internal
#define nat64lsn_rproto_map _bsd_nat64lsn_rproto_map
#define nat64lsn_start_instance _bsd_nat64lsn_start_instance
#define nat64lsn_uninit _bsd_nat64lsn_uninit
#define nat64lsn_uninit_internal _bsd_nat64lsn_uninit_internal
#define nat64stl_eid _bsd_nat64stl_eid
#define nat64stl_init _bsd_nat64stl_init
#define nat64stl_uninit _bsd_nat64stl_uninit
#define natt_cksum_policy _bsd_natt_cksum_policy #define natt_cksum_policy _bsd_natt_cksum_policy
#define nd6_add_ifa_lle _bsd_nd6_add_ifa_lle #define nd6_add_ifa_lle _bsd_nd6_add_ifa_lle
#define nd6_alloc _bsd_nd6_alloc #define nd6_alloc _bsd_nd6_alloc
@@ -3004,14 +2843,10 @@
#define ng_gif_input_orphan_p _bsd_ng_gif_input_orphan_p #define ng_gif_input_orphan_p _bsd_ng_gif_input_orphan_p
#define ng_gif_input_p _bsd_ng_gif_input_p #define ng_gif_input_p _bsd_ng_gif_input_p
#define ng_ipfw_input_p _bsd_ng_ipfw_input_p #define ng_ipfw_input_p _bsd_ng_ipfw_input_p
#define norule_counter _bsd_norule_counter
#define nousrreqs _bsd_nousrreqs #define nousrreqs _bsd_nousrreqs
#define nptv6_init _bsd_nptv6_init
#define nptv6_uninit _bsd_nptv6_uninit
#define null_class _bsd_null_class #define null_class _bsd_null_class
#define null_filtops _bsd_null_filtops #define null_filtops _bsd_null_filtops
#define nullop _bsd_nullop #define nullop _bsd_nullop
#define number_array _bsd_number_array
#define OF_call_method _bsd_OF_call_method #define OF_call_method _bsd_OF_call_method
#define OF_canon _bsd_OF_canon #define OF_canon _bsd_OF_canon
#define OF_child _bsd_OF_child #define OF_child _bsd_OF_child
@@ -3851,8 +3686,6 @@
#define rn_refines _bsd_rn_refines #define rn_refines _bsd_rn_refines
#define rn_walktree _bsd_rn_walktree #define rn_walktree _bsd_rn_walktree
#define rn_walktree_from _bsd_rn_walktree_from #define rn_walktree_from _bsd_rn_walktree_from
#define rollback_table_values _bsd_rollback_table_values
#define rollback_toperation_state _bsd_rollback_toperation_state
#define root_bus _bsd_root_bus #define root_bus _bsd_root_bus
#define root_bus_configure _bsd_root_bus_configure #define root_bus_configure _bsd_root_bus_configure
#define root_devclass _bsd_root_devclass #define root_devclass _bsd_root_devclass
@@ -4478,7 +4311,6 @@
#define SetAckModified _bsd_SetAckModified #define SetAckModified _bsd_SetAckModified
#define SetDefaultAliasAddress _bsd_SetDefaultAliasAddress #define SetDefaultAliasAddress _bsd_SetDefaultAliasAddress
#define SetDestCallId _bsd_SetDestCallId #define SetDestCallId _bsd_SetDestCallId
#define set_disable _bsd_set_disable
#define SetExpire _bsd_SetExpire #define SetExpire _bsd_SetExpire
#define SetFragmentAddr _bsd_SetFragmentAddr #define SetFragmentAddr _bsd_SetFragmentAddr
#define SetFragmentPtr _bsd_SetFragmentPtr #define SetFragmentPtr _bsd_SetFragmentPtr
@@ -4761,7 +4593,6 @@
#define sysctl___net_inet6 _bsd_sysctl___net_inet6 #define sysctl___net_inet6 _bsd_sysctl___net_inet6
#define sysctl___net_inet6_icmp6 _bsd_sysctl___net_inet6_icmp6 #define sysctl___net_inet6_icmp6 _bsd_sysctl___net_inet6_icmp6
#define sysctl___net_inet6_ip6 _bsd_sysctl___net_inet6_ip6 #define sysctl___net_inet6_ip6 _bsd_sysctl___net_inet6_ip6
#define sysctl___net_inet6_ip6_fw _bsd_sysctl___net_inet6_ip6_fw
#define sysctl___net_inet6_ipsec6 _bsd_sysctl___net_inet6_ipsec6 #define sysctl___net_inet6_ipsec6 _bsd_sysctl___net_inet6_ipsec6
#define sysctl___net_inet6_mld _bsd_sysctl___net_inet6_mld #define sysctl___net_inet6_mld _bsd_sysctl___net_inet6_mld
#define sysctl___net_inet6_tcp6 _bsd_sysctl___net_inet6_tcp6 #define sysctl___net_inet6_tcp6 _bsd_sysctl___net_inet6_tcp6
@@ -4775,7 +4606,6 @@
#define sysctl___net_inet_ip _bsd_sysctl___net_inet_ip #define sysctl___net_inet_ip _bsd_sysctl___net_inet_ip
#define sysctl___net_inet_ip_alias _bsd_sysctl___net_inet_ip_alias #define sysctl___net_inet_ip_alias _bsd_sysctl___net_inet_ip_alias
#define sysctl___net_inet_ipcomp _bsd_sysctl___net_inet_ipcomp #define sysctl___net_inet_ipcomp _bsd_sysctl___net_inet_ipcomp
#define sysctl___net_inet_ip_fw _bsd_sysctl___net_inet_ip_fw
#define sysctl___net_inet_ipip _bsd_sysctl___net_inet_ipip #define sysctl___net_inet_ipip _bsd_sysctl___net_inet_ipip
#define sysctl___net_inet_ipsec _bsd_sysctl___net_inet_ipsec #define sysctl___net_inet_ipsec _bsd_sysctl___net_inet_ipsec
#define sysctl___net_inet_pim _bsd_sysctl___net_inet_pim #define sysctl___net_inet_pim _bsd_sysctl___net_inet_pim
@@ -4955,10 +4785,8 @@
#define tcp_usrreqs _bsd_tcp_usrreqs #define tcp_usrreqs _bsd_tcp_usrreqs
#define tcp_v6mssdflt _bsd_tcp_v6mssdflt #define tcp_v6mssdflt _bsd_tcp_v6mssdflt
#define tcp_xmit_timer _bsd_tcp_xmit_timer #define tcp_xmit_timer _bsd_tcp_xmit_timer
#define tc_ref _bsd_tc_ref
#define tc_tick_bt _bsd_tc_tick_bt #define tc_tick_bt _bsd_tc_tick_bt
#define tc_tick_sbt _bsd_tc_tick_sbt #define tc_tick_sbt _bsd_tc_tick_sbt
#define tc_unref _bsd_tc_unref
#define t_functions _bsd_t_functions #define t_functions _bsd_t_functions
#define t_functions_inited _bsd_t_functions_inited #define t_functions_inited _bsd_t_functions_inited
#define ti_am335x_clk_devmap _bsd_ti_am335x_clk_devmap #define ti_am335x_clk_devmap _bsd_ti_am335x_clk_devmap
@@ -5152,7 +4980,6 @@
#define uma_zone_set_zinit _bsd_uma_zone_set_zinit #define uma_zone_set_zinit _bsd_uma_zone_set_zinit
#define uma_zsecond_create _bsd_uma_zsecond_create #define uma_zsecond_create _bsd_uma_zsecond_create
#define untimeout _bsd_untimeout #define untimeout _bsd_untimeout
#define update_opcode_kidx _bsd_update_opcode_kidx
#define usb_alloc_device _bsd_usb_alloc_device #define usb_alloc_device _bsd_usb_alloc_device
#define usb_alloc_mbufs _bsd_usb_alloc_mbufs #define usb_alloc_mbufs _bsd_usb_alloc_mbufs
#define usb_alloc_symlink _bsd_usb_alloc_symlink #define usb_alloc_symlink _bsd_usb_alloc_symlink
@@ -5431,7 +5258,6 @@
#define usb_ugen_methods _bsd_usb_ugen_methods #define usb_ugen_methods _bsd_usb_ugen_methods
#define uuid_ether_add _bsd_uuid_ether_add #define uuid_ether_add _bsd_uuid_ether_add
#define uuid_ether_del _bsd_uuid_ether_del #define uuid_ether_del _bsd_uuid_ether_del
#define verbose_limit _bsd_verbose_limit
#define vht80_chan_ranges _bsd_vht80_chan_ranges #define vht80_chan_ranges _bsd_vht80_chan_ranges
#define vlan_cookie_p _bsd_vlan_cookie_p #define vlan_cookie_p _bsd_vlan_cookie_p
#define vlan_devat_p _bsd_vlan_devat_p #define vlan_devat_p _bsd_vlan_devat_p
@@ -5443,7 +5269,6 @@
#define vlan_trunkdev_p _bsd_vlan_trunkdev_p #define vlan_trunkdev_p _bsd_vlan_trunkdev_p
#define vlog _bsd_vlog #define vlog _bsd_vlog
#define vnet_if_clone_init _bsd_vnet_if_clone_init #define vnet_if_clone_init _bsd_vnet_if_clone_init
#define vnet_ipfw_iface_destroy _bsd_vnet_ipfw_iface_destroy
#define vprintf _bsd_vprintf #define vprintf _bsd_vprintf
#define vsnprintf _bsd_vsnprintf #define vsnprintf _bsd_vsnprintf
#define vsnrprintf _bsd_vsnrprintf #define vsnrprintf _bsd_vsnrprintf