BPF(4): Add support

This commit is contained in:
Sebastian Huber
2013-12-13 10:57:03 +01:00
parent 1fea8c46c1
commit e10d1cdc45
5 changed files with 294 additions and 0 deletions

View File

@@ -88,6 +88,11 @@ __FBSDID("$FreeBSD$");
#include <net80211/ieee80211_freebsd.h>
#include <security/mac/mac_framework.h>
#ifdef __rtems__
#include <rtems/imfs.h>
#define devfs_get_cdevpriv(x) 0
#define devtoname(x) "bpf"
#endif /* __rtems__ */
MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
@@ -173,9 +178,11 @@ SYSCTL_NODE(_net, OID_AUTO, bpf, CTLFLAG_RW, 0, "bpf sysctl");
int bpf_maxinsns = BPF_MAXINSNS;
SYSCTL_INT(_net_bpf, OID_AUTO, maxinsns, CTLFLAG_RW,
&bpf_maxinsns, 0, "Maximum bpf program instructions");
#ifndef __rtems__
static int bpf_zerocopy_enable = 0;
SYSCTL_INT(_net_bpf, OID_AUTO, zerocopy_enable, CTLFLAG_RW,
&bpf_zerocopy_enable, 0, "Enable new zero-copy BPF buffer sessions");
#endif /* __rtems__ */
static SYSCTL_NODE(_net_bpf, OID_AUTO, stats, CTLFLAG_MPSAFE | CTLFLAG_RW,
bpf_stats_sysctl, "bpf statistics portal");
@@ -185,6 +192,7 @@ SYSCTL_VNET_INT(_net_bpf, OID_AUTO, optimize_writers,
CTLFLAG_RW, &VNET_NAME(bpf_optimize_writers), 0,
"Do not send packets until BPF program is set");
#ifndef __rtems__
static d_open_t bpfopen;
static d_read_t bpfread;
static d_write_t bpfwrite;
@@ -202,6 +210,7 @@ static struct cdevsw bpf_cdevsw = {
.d_name = "bpf",
.d_kqfilter = bpfkqfilter,
};
#endif /* __rtems__ */
static struct filterops bpfread_filtops = {
.f_isfd = 1,
@@ -256,9 +265,11 @@ bpf_append_bytes(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
case BPF_BUFMODE_BUFFER:
return (bpf_buffer_append_bytes(d, buf, offset, src, len));
#ifndef __rtems__
case BPF_BUFMODE_ZBUF:
d->bd_zcopy++;
return (bpf_zerocopy_append_bytes(d, buf, offset, src, len));
#endif /* __rtems__ */
default:
panic("bpf_buf_append_bytes");
@@ -276,9 +287,11 @@ bpf_append_mbuf(struct bpf_d *d, caddr_t buf, u_int offset, void *src,
case BPF_BUFMODE_BUFFER:
return (bpf_buffer_append_mbuf(d, buf, offset, src, len));
#ifndef __rtems__
case BPF_BUFMODE_ZBUF:
d->bd_zcopy++;
return (bpf_zerocopy_append_mbuf(d, buf, offset, src, len));
#endif /* __rtems__ */
default:
panic("bpf_buf_append_mbuf");
@@ -298,9 +311,11 @@ bpf_buf_reclaimed(struct bpf_d *d)
case BPF_BUFMODE_BUFFER:
return;
#ifndef __rtems__
case BPF_BUFMODE_ZBUF:
bpf_zerocopy_buf_reclaimed(d);
return;
#endif /* __rtems__ */
default:
panic("bpf_buf_reclaimed");
@@ -318,10 +333,12 @@ bpf_canfreebuf(struct bpf_d *d)
BPFD_LOCK_ASSERT(d);
#ifndef __rtems__
switch (d->bd_bufmode) {
case BPF_BUFMODE_ZBUF:
return (bpf_zerocopy_canfreebuf(d));
}
#endif /* __rtems__ */
return (0);
}
@@ -335,10 +352,12 @@ bpf_canwritebuf(struct bpf_d *d)
{
BPFD_LOCK_ASSERT(d);
#ifndef __rtems__
switch (d->bd_bufmode) {
case BPF_BUFMODE_ZBUF:
return (bpf_zerocopy_canwritebuf(d));
}
#endif /* __rtems__ */
return (1);
}
@@ -353,11 +372,13 @@ bpf_buffull(struct bpf_d *d)
BPFD_LOCK_ASSERT(d);
#ifndef __rtems__
switch (d->bd_bufmode) {
case BPF_BUFMODE_ZBUF:
bpf_zerocopy_buffull(d);
break;
}
#endif /* __rtems__ */
}
/*
@@ -369,11 +390,13 @@ bpf_bufheld(struct bpf_d *d)
BPFD_LOCK_ASSERT(d);
#ifndef __rtems__
switch (d->bd_bufmode) {
case BPF_BUFMODE_ZBUF:
bpf_zerocopy_bufheld(d);
break;
}
#endif /* __rtems__ */
}
static void
@@ -384,8 +407,10 @@ bpf_free(struct bpf_d *d)
case BPF_BUFMODE_BUFFER:
return (bpf_buffer_free(d));
#ifndef __rtems__
case BPF_BUFMODE_ZBUF:
return (bpf_zerocopy_free(d));
#endif /* __rtems__ */
default:
panic("bpf_buf_free");
@@ -414,27 +439,39 @@ static int
bpf_ioctl_getzmax(struct thread *td, struct bpf_d *d, size_t *i)
{
#ifndef __rtems__
if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
return (EOPNOTSUPP);
return (bpf_zerocopy_ioctl_getzmax(td, d, i));
#else /* __rtems__ */
return (EOPNOTSUPP);
#endif /* __rtems__ */
}
static int
bpf_ioctl_rotzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
{
#ifndef __rtems__
if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
return (EOPNOTSUPP);
return (bpf_zerocopy_ioctl_rotzbuf(td, d, bz));
#else /* __rtems__ */
return (EOPNOTSUPP);
#endif /* __rtems__ */
}
static int
bpf_ioctl_setzbuf(struct thread *td, struct bpf_d *d, struct bpf_zbuf *bz)
{
#ifndef __rtems__
if (d->bd_bufmode != BPF_BUFMODE_ZBUF)
return (EOPNOTSUPP);
return (bpf_zerocopy_ioctl_setzbuf(td, d, bz));
#else /* __rtems__ */
return (EOPNOTSUPP);
#endif /* __rtems__ */
}
/*
@@ -802,10 +839,15 @@ bpf_dtor(void *data)
* EBUSY if file is open by another process.
*/
/* ARGSUSED */
#ifndef __rtems__
static int
#else /* __rtems__ */
static struct bpf_d *
#endif /* __rtems__ */
bpfopen(struct cdev *dev, int flags, int fmt, struct thread *td)
{
struct bpf_d *d;
#ifndef __rtems__
int error, size;
d = malloc(sizeof(*d), M_BPF, M_WAITOK | M_ZERO);
@@ -814,6 +856,14 @@ bpfopen(struct cdev *dev, int flags, int fmt, struct thread *td)
free(d, M_BPF);
return (error);
}
#else /* __rtems__ */
u_int size;
d = malloc(sizeof(*d), M_BPF, M_NOWAIT | M_ZERO);
if (d == NULL) {
return (d);
}
#endif /* __rtems__ */
/*
* For historical reasons, perform a one-time initialization call to
@@ -838,16 +888,26 @@ bpfopen(struct cdev *dev, int flags, int fmt, struct thread *td)
size = d->bd_bufsize;
bpf_buffer_ioctl_sblen(d, &size);
#ifndef __rtems__
return (0);
#else /* __rtems__ */
return (d);
#endif /* __rtems__ */
}
/*
* bpfread - read next chunk of packets from buffers
*/
static int
#ifndef __rtems__
bpfread(struct cdev *dev, struct uio *uio, int ioflag)
#else /* __rtems__ */
bpfread(struct bpf_d *d, struct uio *uio, int ioflag)
#endif /* __rtems__ */
{
#ifndef __rtems__
struct bpf_d *d;
#endif /* __rtems__ */
int error;
int non_block;
int timed_out;
@@ -990,8 +1050,10 @@ bpf_wakeup(struct bpf_d *d)
d->bd_state = BPF_IDLE;
}
wakeup(d);
#ifndef __rtems__
if (d->bd_async && d->bd_sig && d->bd_sigio)
pgsigio(&d->bd_sigio, d->bd_sig, 0);
#endif /* __rtems__ */
selwakeuppri(&d->bd_sel, PRINET);
KNOTE_LOCKED(&d->bd_sel.si_note, 0);
@@ -1028,9 +1090,15 @@ bpf_ready(struct bpf_d *d)
}
static int
#ifndef __rtems__
bpfwrite(struct cdev *dev, struct uio *uio, int ioflag)
#else /* __rtems__ */
bpfwrite(struct bpf_d *d, struct uio *uio, int ioflag)
#endif /* __rtems__ */
{
#ifndef __rtems__
struct bpf_d *d;
#endif /* __rtems__ */
struct ifnet *ifp;
struct mbuf *m, *mc;
struct sockaddr dst;
@@ -1180,10 +1248,17 @@ reset_d(struct bpf_d *d)
*/
/* ARGSUSED */
static int
#ifndef __rtems__
bpfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
struct thread *td)
#else /* __rtems__ */
bpfioctl(struct bpf_d *d, u_long cmd, caddr_t addr, int flags,
struct thread *td)
#endif /* __rtems__ */
{
#ifndef __rtems__
struct bpf_d *d;
#endif /* __rtems__ */
int error;
error = devfs_get_cdevpriv((void **)&d);
@@ -1615,11 +1690,13 @@ bpfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
case FIONBIO: /* Non-blocking I/O */
break;
#ifndef __rtems__
case FIOASYNC: /* Send signal on receive packets */
BPFD_LOCK(d);
d->bd_async = *(int *)addr;
BPFD_UNLOCK(d);
break;
#endif /* __rtems__ */
case FIOSETOWN:
/*
@@ -1684,10 +1761,12 @@ bpfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags,
case BPF_BUFMODE_BUFFER:
break;
#ifndef __rtems__
case BPF_BUFMODE_ZBUF:
if (bpf_zerocopy_enable)
break;
/* FALLSTHROUGH */
#endif /* __rtems__ */
default:
CURVNET_RESTORE();
@@ -1885,7 +1964,9 @@ bpf_setif(struct bpf_d *d, struct ifreq *ifr)
*/
switch (d->bd_bufmode) {
case BPF_BUFMODE_BUFFER:
#ifndef __rtems__
case BPF_BUFMODE_ZBUF:
#endif /* __rtems__ */
if (d->bd_sbuf == NULL)
return (EINVAL);
break;
@@ -1908,9 +1989,15 @@ bpf_setif(struct bpf_d *d, struct ifreq *ifr)
* Otherwise, return false but make a note that a selwakeup() must be done.
*/
static int
#ifndef __rtems__
bpfpoll(struct cdev *dev, int events, struct thread *td)
#else /* __rtems__ */
bpfpoll(struct bpf_d *d, int events, struct thread *td)
#endif /* __rtems__ */
{
#ifndef __rtems__
struct bpf_d *d;
#endif /* __rtems__ */
int revents;
if (devfs_get_cdevpriv((void **)&d) != 0 || d->bd_bif == NULL)
@@ -1944,10 +2031,19 @@ bpfpoll(struct cdev *dev, int events, struct thread *td)
* Support for kevent() system call. Register EVFILT_READ filters and
* reject all others.
*/
#ifdef __rtems__
static
#endif /* __rtems__ */
int
#ifndef __rtems__
bpfkqfilter(struct cdev *dev, struct knote *kn)
#else /* __rtems__ */
bpfkqfilter(struct bpf_d *d, struct knote *kn)
#endif /* __rtems__ */
{
#ifndef __rtems__
struct bpf_d *d;
#endif /* __rtems__ */
if (devfs_get_cdevpriv((void **)&d) != 0 ||
kn->kn_filter != EVFILT_READ)
@@ -2671,18 +2767,200 @@ bpf_setdlt(struct bpf_d *d, u_int dlt)
}
return (bp == NULL ? EINVAL : 0);
}
#ifdef __rtems__
static struct bpf_d *
bpf_imfs_get_context_by_iop(const rtems_libio_t *iop)
{
return iop->data1;
}
static int
bpf_imfs_open(rtems_libio_t *iop, const char *path, int oflag, mode_t mode)
{
struct bpf_d *d;
d = bpfopen(NULL, 0, 0, NULL);
iop->data1 = d;
if (d != NULL) {
return (0);
} else {
rtems_set_errno_and_return_minus_one(ENOMEM);
}
}
static int
bpf_imfs_close(rtems_libio_t *iop)
{
struct bpf_d *d = bpf_imfs_get_context_by_iop(iop);
bpf_dtor(d);
return (0);
}
static ssize_t
bpf_imfs_readv(rtems_libio_t *iop, const struct iovec *iov, int iovcnt, ssize_t total)
{
struct bpf_d *d = bpf_imfs_get_context_by_iop(iop);
struct thread *td = rtems_bsd_get_curthread_or_null();
struct uio uio = {
.uio_iov = iov,
.uio_iovcnt = iovcnt,
.uio_offset = 0,
.uio_resid = total,
.uio_segflg = UIO_USERSPACE,
.uio_rw = UIO_READ,
.uio_td = td
};
int error;
if (td != NULL) {
error = bpfread(d, &uio,
rtems_libio_to_fcntl_flags(iop->flags));
} else {
error = ENOMEM;
}
if (error == 0) {
return (total - uio.uio_resid);
} else {
rtems_set_errno_and_return_minus_one(error);
}
}
static ssize_t
bpf_imfs_read(rtems_libio_t *iop, void *buffer, size_t count)
{
struct iovec iov = {
.iov_base = buffer,
.iov_len = count
};
return bpf_imfs_readv(iop, &iov, 1, count);
}
static ssize_t
bpf_imfs_writev(rtems_libio_t *iop, const struct iovec *iov, int iovcnt, ssize_t total)
{
struct bpf_d *d = bpf_imfs_get_context_by_iop(iop);
struct thread *td = rtems_bsd_get_curthread_or_null();
struct uio uio = {
.uio_iov = iov,
.uio_iovcnt = iovcnt,
.uio_offset = 0,
.uio_resid = total,
.uio_segflg = UIO_USERSPACE,
.uio_rw = UIO_WRITE,
.uio_td = td
};
int error;
if (td != NULL) {
error = bpfwrite(d, &uio,
rtems_libio_to_fcntl_flags(iop->flags));
} else {
error = ENOMEM;
}
if (error == 0) {
return (total - uio.uio_resid);
} else {
rtems_set_errno_and_return_minus_one(error);
}
}
static ssize_t
bpf_imfs_write(rtems_libio_t *iop, const void *buffer, size_t count)
{
struct iovec iov = {
.iov_base = buffer,
.iov_len = count
};
return bpf_imfs_writev(iop, &iov, 1, count);
}
static int
bpf_imfs_ioctl(rtems_libio_t *iop, ioctl_command_t request, void *buffer)
{
struct bpf_d *d = bpf_imfs_get_context_by_iop(iop);
struct thread *td = rtems_bsd_get_curthread_or_null();
int error;
if (td != 0) {
error = bpfioctl(d, request, buffer, 0, td);
} else {
error = ENOMEM;
}
return rtems_bsd_error_to_status_and_errno(error);
}
static int
bpf_imfs_poll(rtems_libio_t *iop, int events)
{
struct bpf_d *d = bpf_imfs_get_context_by_iop(iop);
return (bpfpoll(d, events, rtems_bsd_get_curthread_or_wait_forever()));
}
static int
bpf_imfs_kqfilter(rtems_libio_t *iop, struct knote *kn)
{
struct bpf_d *d = bpf_imfs_get_context_by_iop(iop);
return (bpfkqfilter(d, kn));
}
static const rtems_filesystem_file_handlers_r bpf_imfs_handlers = {
.open_h = bpf_imfs_open,
.close_h = bpf_imfs_close,
.read_h = bpf_imfs_read,
.write_h = bpf_imfs_write,
.ioctl_h = bpf_imfs_ioctl,
.lseek_h = rtems_filesystem_default_lseek_file,
.fstat_h = rtems_filesystem_default_fstat,
.ftruncate_h = rtems_filesystem_default_ftruncate,
.fsync_h = rtems_filesystem_default_fsync_or_fdatasync,
.fdatasync_h = rtems_filesystem_default_fsync_or_fdatasync,
.fcntl_h = rtems_filesystem_default_fcntl,
.poll_h = bpf_imfs_poll,
.kqfilter_h = bpf_imfs_kqfilter,
.readv_h = bpf_imfs_readv,
.writev_h = bpf_imfs_writev
};
static const IMFS_node_control bpf_imfs_control = {
.imfs_type = IMFS_GENERIC,
.handlers = &bpf_imfs_handlers,
.node_initialize = IMFS_node_initialize_default,
.node_remove = IMFS_node_remove_default,
.node_destroy = IMFS_node_destroy_default
};
#endif /* __rtems__ */
static void
bpf_drvinit(void *unused)
{
#ifndef __rtems__
struct cdev *dev;
#else /* __rtems__ */
mode_t mode = S_IFCHR | S_IRWXU | S_IRWXG | S_IRWXO;
int rv;
#endif /* __rtems__ */
mtx_init(&bpf_mtx, "bpf global lock", NULL, MTX_DEF);
LIST_INIT(&bpf_iflist);
#ifndef __rtems__
dev = make_dev(&bpf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "bpf");
/* For compatibility */
make_dev_alias(dev, "bpf0");
#else /* __rtems__ */
rv = IMFS_make_generic_node("/dev/bpf", mode, &bpf_imfs_control, NULL);
BSD_ASSERT(rv == 0);
#endif /* __rtems__ */
/* Register interface departure handler */
bpf_ifdetach_cookie = EVENTHANDLER_REGISTER(
@@ -2735,7 +3013,9 @@ bpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd)
d->bd_hdrcmplt = bd->bd_hdrcmplt;
d->bd_direction = bd->bd_direction;
d->bd_feedback = bd->bd_feedback;
#ifndef __rtems__
d->bd_async = bd->bd_async;
#endif /* __rtems__ */
d->bd_rcount = bd->bd_rcount;
d->bd_dcount = bd->bd_dcount;
d->bd_fcount = bd->bd_fcount;

View File

@@ -85,7 +85,9 @@ struct bpf_d {
int bd_direction; /* select packet direction */
int bd_tstamp; /* select time stamping function */
int bd_feedback; /* true to feed back sent packets */
#ifndef __rtems__
int bd_async; /* non-zero if packet reception should generate signal */
#endif /* __rtems__ */
int bd_sig; /* signal to send upon packet reception */
struct sigio * bd_sigio; /* information for async I/O */
struct selinfo bd_sel; /* bsd select info */
@@ -112,8 +114,13 @@ struct bpf_d {
#define BPFD_UNLOCK(bd) mtx_unlock(&(bd)->bd_lock)
#define BPFD_LOCK_ASSERT(bd) mtx_assert(&(bd)->bd_lock, MA_OWNED)
#ifndef __rtems__
#define BPF_PID_REFRESH(bd, td) (bd)->bd_pid = (td)->td_proc->p_pid
#define BPF_PID_REFRESH_CUR(bd) (bd)->bd_pid = curthread->td_proc->p_pid
#else /* __rtems__ */
#define BPF_PID_REFRESH(bd, td) do { } while (0)
#define BPF_PID_REFRESH_CUR(bd) do { } while (0)
#endif /* __rtems__ */
#define BPF_LOCK() mtx_lock(&bpf_mtx)
#define BPF_UNLOCK() mtx_unlock(&bpf_mtx)
@@ -129,7 +136,9 @@ struct xbpf_d {
int bd_hdrcmplt;
int bd_direction;
int bd_feedback;
#ifndef __rtems__
int bd_async;
#endif /* __rtems__ */
u_int64_t bd_rcount;
u_int64_t bd_dcount;
u_int64_t bd_fcount;

View File

@@ -89,7 +89,9 @@ bpf_flags(struct xbpf_d *bd, char *flagbuf)
*flagbuf++ = (bd->bd_direction == BPF_D_IN) ? '-' :
((bd->bd_direction == BPF_D_OUT) ? 'o' : 's');
*flagbuf++ = bd->bd_feedback ? 'b' : '-';
#ifndef __rtems__
*flagbuf++ = bd->bd_async ? 'a' : '-';
#endif /* __rtems__ */
*flagbuf++ = bd->bd_locked ? 'l' : '-';
*flagbuf++ = '\0';
}

View File

@@ -109,6 +109,8 @@ the current Git submodule commit is this
* <sys/param.h>: Update Newlib to use a MSIZE of 256.
* BPF(4): Add support for zero-copy buffers.
[listing]
----
/* sysinit section? */

View File

@@ -0,0 +1 @@
#define DEV_BPF 1