Use kqueue() and kevent() from FreeBSD

This commit is contained in:
Sebastian Huber 2013-10-28 15:40:53 +01:00
parent bae343a308
commit 0c9f27ba24
13 changed files with 408 additions and 49 deletions

View File

@ -87,8 +87,6 @@ LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-timesupport.c
LIB_C_FILES += rtemsbsd/rtems/rtems-kvm.c
LIB_C_FILES += rtemsbsd/rtems/rtems-net-setup.c
LIB_C_FILES += rtemsbsd/rtems/rtems-syslog-initialize.c
LIB_C_FILES += rtemsbsd/rtems/rtems-uthread_kevent.c
LIB_C_FILES += rtemsbsd/rtems/rtems-uthread_kqueue.c
LIB_C_FILES += rtemsbsd/rtems/syslog.c
LIB_C_FILES += rtemsbsd/sys/dev/usb/controller/ehci_mpc83xx.c
LIB_C_FILES += rtemsbsd/sys/dev/usb/controller/ohci_lpc24xx.c

View File

@ -644,8 +644,6 @@ rtems.addRTEMSSourceFiles(
'rtems/rtems-kvm.c',
'rtems/rtems-net-setup.c',
'rtems/rtems-syslog-initialize.c',
'rtems/rtems-uthread_kevent.c',
'rtems/rtems-uthread_kqueue.c',
'rtems/syslog.c',
'sys/dev/usb/controller/ehci_mpc83xx.c',
'sys/dev/usb/controller/ohci_lpc24xx.c',

View File

@ -66,8 +66,12 @@ __FBSDID("$FreeBSD$");
#include <sys/ktrace.h>
#endif
#ifndef __rtems__
##include <vm/uma.h>
#include <vm/uma.h>
#ifdef __rtems__
#include <machine/rtems-bsd-syscall-api.h>
/* Maintain a global kqueue list on RTEMS */
static struct kqlist fd_kqlist;
#endif /* __rtems__ */
static MALLOC_DEFINE(M_KQUEUE, "kqueue", "memory for kqueue system");
@ -91,7 +95,6 @@ MTX_SYSINIT(kq_global, &kq_global, "kqueue order", MTX_DEF);
TASKQUEUE_DEFINE_THREAD(kqueue);
#ifndef __rtems__
static int kevent_copyout(void *arg, struct kevent *kevp, int count);
static int kevent_copyin(void *arg, struct kevent *kevp, int count);
static int kqueue_register(struct kqueue *kq, struct kevent *kev,
@ -109,6 +112,7 @@ static void kqueue_wakeup(struct kqueue *kq);
static struct filterops *kqueue_fo_find(int filt);
static void kqueue_fo_release(int filt);
#ifndef __rtems__
static fo_rdwr_t kqueue_read;
static fo_rdwr_t kqueue_write;
static fo_truncate_t kqueue_truncate;
@ -128,12 +132,13 @@ static struct fileops kqueueops = {
.fo_stat = kqueue_stat,
.fo_close = kqueue_close,
};
#else /* __rtems__ */
static const rtems_filesystem_file_handlers_r kqueueops;
#endif /* __rtems__ */
static int knote_attach(struct knote *kn, struct kqueue *kq);
static void knote_drop(struct knote *kn, struct thread *td);
#endif /* __rtems__ */
static void knote_enqueue(struct knote *kn);
#ifndef __rtems__
static void knote_dequeue(struct knote *kn);
static void knote_init(void);
static struct knote *knote_alloc(int waitok);
@ -141,9 +146,11 @@ static void knote_free(struct knote *kn);
static void filt_kqdetach(struct knote *kn);
static int filt_kqueue(struct knote *kn, long hint);
#ifndef __rtems__
static int filt_procattach(struct knote *kn);
static void filt_procdetach(struct knote *kn);
static int filt_proc(struct knote *kn, long hint);
#endif /* __rtems__ */
static int filt_fileattach(struct knote *kn);
static void filt_timerexpire(void *knx);
static int filt_timerattach(struct knote *kn);
@ -160,8 +167,10 @@ static struct filterops file_filtops =
static struct filterops kqread_filtops =
{ 1, NULL, filt_kqdetach, filt_kqueue };
/* XXX - move to kern_proc.c? */
#ifndef __rtems__
static struct filterops proc_filtops =
{ 0, filt_procattach, filt_procdetach, filt_proc };
#endif /* __rtems__ */
static struct filterops timer_filtops =
{ 0, filt_timerattach, filt_timerdetach, filt_timer };
static struct filterops user_filtops = {
@ -176,7 +185,6 @@ static int kq_ncallouts = 0;
static int kq_calloutmax = (4 * 1024);
SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
&kq_calloutmax, 0, "Maximum number of callouts allocated for kqueue");
#endif /* __rtems__ */
/* XXX - ensure not KN_INFLUX?? */
#define KNOTE_ACTIVATE(kn, islock) do { \
@ -241,7 +249,6 @@ SYSCTL_INT(_kern, OID_AUTO, kq_calloutmax, CTLFLAG_RW,
#define KN_HASHSIZE 64 /* XXX should be tunable */
#define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
#ifndef __rtems__
static int
filt_nullattach(struct knote *kn)
{
@ -270,11 +277,20 @@ static struct {
{ &file_filtops }, /* EVFILT_WRITE */
{ &null_filtops }, /* EVFILT_AIO */
{ &file_filtops }, /* EVFILT_VNODE */
#ifndef __rtems__
{ &proc_filtops }, /* EVFILT_PROC */
{ &sig_filtops }, /* EVFILT_SIGNAL */
#else /* __rtems__ */
{ &null_filtops }, /* EVFILT_PROC */
{ &null_filtops }, /* EVFILT_SIGNAL */
#endif /* __rtems__ */
{ &timer_filtops }, /* EVFILT_TIMER */
{ &null_filtops }, /* former EVFILT_NETDEV */
#ifndef __rtems__
{ &fs_filtops }, /* EVFILT_FS */
#else /* __rtems__ */
{ &null_filtops }, /* EVFILT_FS */
#endif /* __rtems__ */
{ &null_filtops }, /* EVFILT_LIO */
{ &user_filtops }, /* EVFILT_USER */
};
@ -305,6 +321,15 @@ kqueue_kqfilter(struct file *fp, struct knote *kn)
return (0);
}
#ifdef __rtems__
static int
rtems_bsd_kqueue_kqfilter(rtems_libio_t *iop, struct knote *kn)
{
struct file *fp = rtems_bsd_iop_to_fp(iop);
return kqueue_kqfilter(fp, kn);
}
#endif /* __rtems__ */
static void
filt_kqdetach(struct knote *kn)
@ -324,6 +349,7 @@ filt_kqueue(struct knote *kn, long hint)
return (kn->kn_data > 0);
}
#ifndef __rtems__
/* XXX - move to kern_proc.c? */
static int
filt_procattach(struct knote *kn)
@ -502,6 +528,7 @@ knote_fork(struct knlist *list, int pid)
}
list->kl_unlock(list->kl_lockarg);
}
#endif /* __rtems__ */
static int
timertoticks(intptr_t data)
@ -669,15 +696,24 @@ filt_usertouch(struct knote *kn, struct kevent *kev, u_long type)
}
}
#ifndef __rtems__
int
kqueue(struct thread *td, struct kqueue_args *uap)
#else /* __rtems__ */
static int
rtems_bsd_kqueue(struct thread *td, struct kqueue_args *uap)
#endif /* __rtems__ */
{
struct filedesc *fdp;
struct kqueue *kq;
struct file *fp;
int fd, error;
#ifndef __rtems__
fdp = td->td_proc->p_fd;
#else /* __rtems__ */
(void) fdp;
#endif /* __rtems__ */
error = falloc(td, &fp, &fd);
if (error)
goto done2;
@ -686,13 +722,21 @@ kqueue(struct thread *td, struct kqueue_args *uap)
kq = malloc(sizeof *kq, M_KQUEUE, M_WAITOK | M_ZERO);
mtx_init(&kq->kq_lock, "kqueue", NULL, MTX_DEF|MTX_DUPOK);
TAILQ_INIT(&kq->kq_head);
#ifndef __rtems__
kq->kq_fdp = fdp;
#endif /* __rtems__ */
knlist_init_mtx(&kq->kq_sel.si_note, &kq->kq_lock);
TASK_INIT(&kq->kq_task, 0, kqueue_task, kq);
#ifndef __rtems__
FILEDESC_XLOCK(fdp);
SLIST_INSERT_HEAD(&fdp->fd_kqlist, kq, kq_list);
FILEDESC_XUNLOCK(fdp);
#else /* __rtems__ */
rtems_libio_lock();
SLIST_INSERT_HEAD(&fd_kqlist, kq, kq_list);
rtems_libio_unlock();
#endif /* __rtems__ */
finit(fp, FREAD | FWRITE, DTYPE_KQUEUE, kq, &kqueueops);
fdrop(fp, td);
@ -701,6 +745,27 @@ kqueue(struct thread *td, struct kqueue_args *uap)
done2:
return (error);
}
#ifdef __rtems__
int
kqueue(void)
{
struct thread *td = rtems_bsd_get_curthread_or_null();
struct kqueue_args ua;
int error;
if (td != NULL) {
error = rtems_bsd_kqueue(td, &ua);
} else {
error = ENOMEM;
}
if (error == 0) {
return td->td_retval[0];
} else {
rtems_set_errno_and_return_minus_one(error);
}
}
#endif /* __rtems__ */
#ifndef _SYS_SYSPROTO_H_
struct kevent_args {
@ -712,8 +777,17 @@ struct kevent_args {
const struct timespec *timeout;
};
#endif
#ifndef __rtems__
int
kevent(struct thread *td, struct kevent_args *uap)
#else /* __rtems__ */
static int
kern_kevent(struct thread *td, int fd, int nchanges, int nevents, struct
kevent_copyops *k_ops, const struct timespec *timeout);
static int
rtems_bsd_kevent(struct thread *td, struct kevent_args *uap)
#endif /* __rtems__ */
{
struct timespec ts, *tsp;
struct kevent_copyops k_ops = { uap,
@ -763,6 +837,38 @@ kevent(struct thread *td, struct kevent_args *uap)
return (error);
}
#ifdef __rtems__
__weak_reference(kevent, _kevent);
int
kevent(int kq, const struct kevent *changelist, int nchanges,
struct kevent *eventlist, int nevents,
const struct timespec *timeout)
{
struct thread *td = rtems_bsd_get_curthread_or_null();
struct kevent_args ua = {
.fd = kq,
.changelist = changelist,
.nchanges = nchanges,
.eventlist = eventlist,
.nevents = nevents,
.timeout = timeout
};
int error;
if (td != NULL) {
error = rtems_bsd_kevent(td, &ua);
} else {
error = ENOMEM;
}
if (error == 0) {
return td->td_retval[0];
} else {
rtems_set_errno_and_return_minus_one(error);
}
}
#endif /* __rtems__ */
/*
* Copy 'count' items into the destination list pointed to by uap->eventlist.
@ -981,7 +1087,11 @@ findkn:
goto findkn;
}
#ifndef __rtems__
if (fp->f_type == DTYPE_KQUEUE) {
#else /* __rtems__ */
if (fp->f_io.pathinfo.handlers == &kqueueops) {
#endif /* __rtems__ */
/*
* if we add some inteligence about what we are doing,
* we should be able to support events on ourselves.
@ -1157,7 +1267,11 @@ kqueue_acquire(struct file *fp, struct kqueue **kqp)
error = 0;
kq = fp->f_data;
#ifndef __rtems__
if (fp->f_type != DTYPE_KQUEUE || kq == NULL)
#else /* __rtems__ */
if (fp->f_io.pathinfo.handlers != &kqueueops || kq == NULL)
#endif /* __rtems__ */
return (EBADF);
*kqp = kq;
KQ_LOCK(kq);
@ -1184,7 +1298,6 @@ kqueue_release(struct kqueue *kq, int locked)
if (!locked)
KQ_UNLOCK(kq);
}
#endif /* __rtems__ */
static void
kqueue_schedtask(struct kqueue *kq)
@ -1200,7 +1313,6 @@ kqueue_schedtask(struct kqueue *kq)
}
}
#ifndef __rtems__
/*
* Expand the kq to make sure we have storage for fops/ident pair.
*
@ -1497,6 +1609,7 @@ done_nl:
return (error);
}
#ifndef __rtems__
/*
* XXX
* This could be expanded to call kqueue_scan, if desired.
@ -1573,6 +1686,7 @@ kqueue_ioctl(struct file *fp, u_long cmd, void *data,
return (ENOTTY);
}
#endif /* __rtems__ */
/*ARGSUSED*/
static int
@ -1600,14 +1714,39 @@ kqueue_poll(struct file *fp, int events, struct ucred *active_cred,
KQ_UNLOCK(kq);
return (revents);
}
#ifdef __rtems__
static int
rtems_bsd_kqueue_poll(rtems_libio_t *iop, int events)
{
struct thread *td = rtems_bsd_get_curthread_or_null();
struct file *fp = rtems_bsd_iop_to_fp(iop);
int error;
if (td != NULL) {
error = kqueue_poll(fp, events, NULL, td);
} else {
error = ENOMEM;
}
return error;
}
#endif /* __rtems__ */
/*ARGSUSED*/
#ifndef __rtems__
static int
kqueue_stat(struct file *fp, struct stat *st, struct ucred *active_cred,
struct thread *td)
{
bzero((void *)st, sizeof *st);
#else /* __rtems__ */
static int
rtems_bsd_kqueue_stat(const rtems_filesystem_location_info_t *loc,
struct stat *st)
{
(void) loc;
#endif /* __rtems__ */
/*
* We no longer return kq_count because the unlocked value is useless.
* If you spent all this time getting the count, why not spend your
@ -1629,6 +1768,11 @@ kqueue_close(struct file *fp, struct thread *td)
int i;
int error;
#ifdef __rtems__
/* FIXME: Move this to the RTEMS close() function */
knote_fdclose(td, rtems_bsd_fp_to_fd(fp));
#endif /* __rtems__ */
if ((error = kqueue_acquire(fp, &kq)))
return error;
@ -1641,7 +1785,11 @@ kqueue_close(struct file *fp, struct thread *td)
msleep(&kq->kq_refcnt, &kq->kq_lock, PSOCK, "kqclose", 0);
KASSERT(kq->kq_refcnt == 1, ("other refs are out there!"));
#ifndef __rtems__
fdp = kq->kq_fdp;
#else /* __rtems__ */
(void) fdp;
#endif /* __rtems__ */
KASSERT(knlist_empty(&kq->kq_sel.si_note),
("kqueue's knlist not empty"));
@ -1693,13 +1841,21 @@ kqueue_close(struct file *fp, struct thread *td)
KQ_UNLOCK(kq);
#ifndef __rtems__
FILEDESC_XLOCK(fdp);
SLIST_REMOVE(&fdp->fd_kqlist, kq, kqueue, kq_list);
FILEDESC_XUNLOCK(fdp);
#else /* __rtems__ */
rtems_libio_lock();
SLIST_REMOVE(&fd_kqlist, kq, kqueue, kq_list);
rtems_libio_unlock();
#endif /* __rtems__ */
knlist_destroy(&kq->kq_sel.si_note);
mtx_destroy(&kq->kq_lock);
#ifndef __rtems__
kq->kq_fdp = NULL;
#endif /* __rtems__ */
if (kq->kq_knhash != NULL)
free(kq->kq_knhash, M_KQUEUE);
@ -1712,6 +1868,22 @@ kqueue_close(struct file *fp, struct thread *td)
return (0);
}
#ifdef __rtems__
static int
rtems_bsd_kqueue_close(rtems_libio_t *iop)
{
struct thread *td = rtems_bsd_get_curthread_or_null();
struct file *fp = rtems_bsd_iop_to_fp(iop);
int error;
if (td != NULL) {
error = kqueue_close(fp, td);
} else {
error = ENOMEM;
}
return rtems_bsd_error_to_status_and_errno(error);
}
#endif /* __rtems__ */
static void
@ -1731,7 +1903,11 @@ kqueue_wakeup(struct kqueue *kq)
if (!knlist_empty(&kq->kq_sel.si_note))
kqueue_schedtask(kq);
if ((kq->kq_state & KQ_ASYNC) == KQ_ASYNC) {
#ifndef __rtems__
pgsigio(&kq->kq_sigio, SIGIO, 0);
#else /* __rtems__ */
BSD_ASSERT(0);
#endif /* __rtems__ */
}
}
@ -1848,7 +2024,6 @@ knlist_remove(struct knlist *knl, struct knote *kn, int islocked)
knlist_remove_kq(knl, kn, islocked, 0);
}
#ifndef __rtems__
/*
* remove knote from a specified klist while in f_event handler.
*/
@ -1859,7 +2034,6 @@ knlist_remove_inevent(struct knlist *knl, struct knote *kn)
knlist_remove_kq(knl, kn, 1,
(kn->kn_status & KN_HASKQLOCK) == KN_HASKQLOCK);
}
#endif /* __rtems__ */
int
knlist_empty(struct knlist *knl)
@ -1953,7 +2127,6 @@ knlist_destroy(struct knlist *knl)
SLIST_INIT(&knl->kl_list);
}
#ifndef __rtems__
/*
* Even if we are locked, we may need to drop the lock to allow any influx
* knotes time to "settle".
@ -2022,7 +2195,9 @@ again: /* need to reacquire lock since we have dropped it */
void
knote_fdclose(struct thread *td, int fd)
{
#ifndef __rtems__
struct filedesc *fdp = td->td_proc->p_fd;
#endif /* __rtems__ */
struct kqueue *kq;
struct knote *kn;
int influx;
@ -2033,7 +2208,13 @@ knote_fdclose(struct thread *td, int fd)
* We shouldn't have to worry about new kevents appearing on fd
* since filedesc is locked.
*/
#ifndef __rtems__
SLIST_FOREACH(kq, &fdp->fd_kqlist, kq_list) {
#else /* __rtems__ */
/* FIXME: Use separate lock? */
rtems_libio_lock();
SLIST_FOREACH(kq, &fd_kqlist, kq_list) {
#endif /* __rtems__ */
KQ_LOCK(kq);
again:
@ -2058,6 +2239,9 @@ again:
}
KQ_UNLOCK_FLUX(kq);
}
#ifdef __rtems__
rtems_libio_unlock();
#endif /* __rtems__ */
}
static int
@ -2120,7 +2304,6 @@ knote_drop(struct knote *kn, struct thread *td)
kn->kn_fop = NULL;
knote_free(kn);
}
#endif /* __rtems__ */
static void
knote_enqueue(struct knote *kn)
@ -2136,7 +2319,6 @@ knote_enqueue(struct knote *kn)
kqueue_wakeup(kq);
}
#ifndef __rtems__
static void
knote_dequeue(struct knote *kn)
{
@ -2197,4 +2379,20 @@ noacquire:
return error;
}
#ifdef __rtems__
static const rtems_filesystem_file_handlers_r kqueueops = {
.open_h = rtems_filesystem_default_open,
.close_h = rtems_bsd_kqueue_close,
.read_h = rtems_filesystem_default_read,
.write_h = rtems_filesystem_default_write,
.ioctl_h = rtems_filesystem_default_ioctl,
.lseek_h = rtems_filesystem_default_lseek,
.fstat_h = rtems_bsd_kqueue_stat,
.ftruncate_h = rtems_filesystem_default_ftruncate,
.fsync_h = rtems_filesystem_default_fsync_or_fdatasync,
.fdatasync_h = rtems_filesystem_default_fsync_or_fdatasync,
.fcntl_h = rtems_filesystem_default_fcntl,
.poll_h = rtems_bsd_kqueue_poll,
.kqfilter_h = rtems_bsd_kqueue_kqfilter
};
#endif /* __rtems__ */

View File

@ -440,6 +440,11 @@ soo_close(struct file *fp, struct thread *td)
int error = 0;
struct socket *so;
#ifdef __rtems__
/* FIXME: Move this to the RTEMS close() function */
knote_fdclose(td, rtems_bsd_fp_to_fd(fp));
#endif /* __rtems__ */
so = fp->f_data;
#ifndef __rtems__
fp->f_ops = &badfileops;
@ -474,6 +479,7 @@ const rtems_filesystem_file_handlers_r socketops = {
.fsync_h = rtems_filesystem_default_fsync_or_fdatasync,
.fdatasync_h = rtems_filesystem_default_fsync_or_fdatasync,
.fcntl_h = rtems_filesystem_default_fcntl,
.poll_h = rtems_bsd_soo_poll
.poll_h = rtems_bsd_soo_poll,
.kqfilter_h = rtems_bsd_soo_kqfilter
};
#endif /* __rtems__ */

View File

@ -2945,6 +2945,9 @@ sopoll_generic(struct socket *so, int events, struct ucred *active_cred,
return (revents);
}
#ifdef __rtems__
static
#endif /* __rtems__ */
int
soo_kqfilter(struct file *fp, struct knote *kn)
{
@ -2973,6 +2976,15 @@ soo_kqfilter(struct file *fp, struct knote *kn)
SOCKBUF_UNLOCK(sb);
return (0);
}
#ifdef __rtems__
int
rtems_bsd_soo_kqfilter(rtems_libio_t *iop, struct knote *kn)
{
struct file *fp = rtems_bsd_iop_to_fp(iop);
return soo_kqfilter(fp, kn);
}
#endif /* __rtems__ */
/*
* Some routines that return EOPNOTSUPP for entry points that are not

View File

@ -46,7 +46,9 @@ struct kqueue {
int kq_count; /* number of pending events */
struct selinfo kq_sel;
struct sigio *kq_sigio;
#ifndef __rtems__
struct filedesc *kq_fdp;
#endif /* __rtems__ */
int kq_state;
#define KQ_SEL 0x01
#define KQ_SLEEP 0x02

View File

@ -298,6 +298,8 @@ fo_poll_t soo_poll;
fo_kqfilter_t soo_kqfilter;
fo_stat_t soo_stat;
fo_close_t soo_close;
#else /* __rtems__ */
int rtems_bsd_soo_kqfilter(rtems_libio_t *iop, struct knote *kn);
#endif /* __rtems__ */
#ifndef __rtems__
@ -446,6 +448,7 @@ fo_close(fp, td)
return ((*fp->f_ops->fo_close)(fp, td));
}
#endif /* __rtems__ */
static __inline int
fo_kqfilter(fp, kn)
@ -453,9 +456,12 @@ fo_kqfilter(fp, kn)
struct knote *kn;
{
#ifndef __rtems__
return ((*fp->f_ops->fo_kqfilter)(fp, kn));
}
#else /* __rtems__ */
return ((*fp->f_io.pathinfo.handlers->kqfilter_h)(&fp->f_io, kn));
#endif /* __rtems__ */
}
#endif /* _KERNEL */

View File

@ -1078,17 +1078,23 @@ struct getresgid_args {
char egid_l_[PADL_(gid_t *)]; gid_t * egid; char egid_r_[PADR_(gid_t *)];
char sgid_l_[PADL_(gid_t *)]; gid_t * sgid; char sgid_r_[PADR_(gid_t *)];
};
#endif /* __rtems__ */
struct kqueue_args {
register_t dummy;
};
struct kevent_args {
char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)];
#ifndef __rtems__
char changelist_l_[PADL_(struct kevent *)]; struct kevent * changelist; char changelist_r_[PADR_(struct kevent *)];
#else /* __rtems__ */
char changelist_l_[PADL_(struct kevent *)]; const struct kevent * changelist; char changelist_r_[PADR_(struct kevent *)];
#endif /* __rtems__ */
char nchanges_l_[PADL_(int)]; int nchanges; char nchanges_r_[PADR_(int)];
char eventlist_l_[PADL_(struct kevent *)]; struct kevent * eventlist; char eventlist_r_[PADR_(struct kevent *)];
char nevents_l_[PADL_(int)]; int nevents; char nevents_r_[PADR_(int)];
char timeout_l_[PADL_(const struct timespec *)]; const struct timespec * timeout; char timeout_r_[PADR_(const struct timespec *)];
};
#ifndef __rtems__
struct extattr_set_fd_args {
char fd_l_[PADL_(int)]; int fd; char fd_r_[PADR_(int)];
char attrnamespace_l_[PADL_(int)]; int attrnamespace; char attrnamespace_r_[PADR_(int)];

View File

@ -98,6 +98,8 @@ the current Git submodule commit is this
* ZONE(9): Review allocator lock usage in rtems-bsd-chunk.c.
* KQUEUE(2): Choose proper lock for global kqueue list.
[listing]
----
/* sysinit section? */

View File

@ -45,10 +45,13 @@
#define _RTEMS_BSD_MACHINE_RTEMS_BSD_SYSCALL_API_H_
#include <sys/cdefs.h>
#include <sys/types.h>
#include <sys/event.h>
#include <sys/poll.h>
#include <sys/select.h>
#include <sys/socket.h>
#include <sys/sysctl.h>
#include <sys/time.h>
__BEGIN_DECLS
@ -69,6 +72,12 @@ int getsockname(int, struct sockaddr * __restrict, socklen_t * __restrict);
int getsockopt(int, int, int, void * __restrict, socklen_t * __restrict);
int kqueue(void);
int kevent(int kq, const struct kevent *changelist, int nchanges,
struct kevent *eventlist, int nevents,
const struct timespec *timeout);
int listen(int, int);
int poll(struct pollfd _pfd[], nfds_t _nfds, int _timeout);

View File

@ -1,15 +0,0 @@
#include <unistd.h>
#include <errno.h>
#include <sys/time.h>
#include <rtems.h>
#include <rtems/error.h>
__weak_reference(_kevent, kevent);
int
_kevent(int kq, const struct kevent *changelist, int nchanges,
struct kevent *eventlist, int nevents, const struct timespec *timeout)
{
rtems_panic("Unimplemented method!!!\n");
return -1;
}

View File

@ -1,14 +0,0 @@
#include <rtems/bsd/sys/types.h>
#include <fcntl.h>
#include <unistd.h>
#include <rtems.h>
#include <rtems/error.h>
__weak_reference(_kqueue, kqueue);
int
_kqueue(void)
{
rtems_panic("Unimplemented method!!!\n");
return -1;
}

View File

@ -31,10 +31,12 @@
#include <sys/cdefs.h>
#include <sys/types.h>
#include <sys/event.h>
#include <sys/poll.h>
#include <sys/stat.h>
#include <sys/select.h>
#include <sys/socket.h>
#include <sys/time.h>
#include <sys/filio.h>
#include <netinet/in.h>
#include <arpa/inet.h>
@ -1354,6 +1356,151 @@ test_socket_poll(void)
assert(rtems_resource_snapshot_check(&snapshot));
}
static void
test_kqueue_unsupported_ops(void)
{
rtems_resource_snapshot snapshot;
int kq;
int rv;
off_t off;
ssize_t n;
char buf[1];
puts("test kqueue unsupported ops");
rtems_resource_snapshot_take(&snapshot);
kq = kqueue();
assert(kq >= 0);
errno = 0;
n = read(kq, &buf[0], sizeof(buf));
assert(n == -1);
assert(errno == ENOTSUP);
errno = 0;
n = write(kq, &buf[0], sizeof(buf));
assert(n == -1);
assert(errno == ENOTSUP);
errno = 0;
rv = ioctl(kq, 0);
assert(rv == -1);
assert(errno == ENOTSUP);
errno = 0;
off = lseek(kq, 0, SEEK_CUR);
assert(off == -1);
assert(errno == ESPIPE);
errno = 0;
rv = ftruncate(kq, 0);
assert(rv == -1);
assert(errno == EINVAL);
errno = 0;
rv = fsync(kq);
assert(rv == -1);
assert(errno == EINVAL);
errno = 0;
rv = fdatasync(kq);
assert(rv == -1);
assert(errno == EINVAL);
rv = close(kq);
assert(rv == 0);
assert(rtems_resource_snapshot_check(&snapshot));
}
static void
no_mem_kqueue_fstat(int fd)
{
struct stat st;
int rv;
rv = fstat(fd, &st);
assert(rv == 0);
}
static void
test_kqueue_fstat(void)
{
static const struct stat expected_st = {
.st_mode = S_IFIFO
};
rtems_resource_snapshot snapshot;
struct stat st;
int kq;
int rv;
puts("test kqueue fstat");
rtems_resource_snapshot_take(&snapshot);
kq = kqueue();
assert(kq >= 0);
do_no_mem_test(no_mem_kqueue_fstat, kq);
rv = fstat(kq, &st);
assert(rv == 0);
assert(memcmp(&expected_st, &st, sizeof(st)) == 0);
rv = close(kq);
assert(rv == 0);
assert(rtems_resource_snapshot_check(&snapshot));
}
static void
no_mem_kqueue_kevent(int fd)
{
int rv;
errno = 0;
rv = kevent(fd, NULL, 0, NULL, 0, NULL);
assert(rv == -1);
assert(errno == ENOMEM);
}
static void
test_kqueue_kevent(void)
{
rtems_resource_snapshot snapshot;
int kq;
int rv;
puts("test kqueue kevent");
rtems_resource_snapshot_take(&snapshot);
kq = kqueue();
assert(kq >= 0);
do_no_mem_test(no_mem_kqueue_kevent, kq);
rv = kevent(kq, NULL, 0, NULL, 0, NULL);
assert(rv == 0);
rv = close(kq);
assert(rv == 0);
errno = 0;
rv = kevent(kq, NULL, 0, NULL, 0, NULL);
assert(rv == -1);
assert(errno == EBADF);
errno = 0;
rv = kevent(0, NULL, 0, NULL, 0, NULL);
assert(rv == -1);
assert(errno == EBADF);
assert(rtems_resource_snapshot_check(&snapshot));
}
static const char prog_name[] = "prog";
static int
@ -1563,6 +1710,10 @@ test_main(void)
test_socket_select();
test_socket_poll();
test_kqueue_unsupported_ops();
test_kqueue_fstat();
test_kqueue_kevent();
test_bsd_program();
test_warn();
test_err();