Switch to a version of select that is closer to bsd's version.

This commit is contained in:
Jennifer Averett 2012-11-26 09:47:09 -06:00
parent 9d3ac2b51a
commit 0bde19eee0
27 changed files with 12822 additions and 57 deletions

View File

@ -12,7 +12,6 @@ CFLAGS += -I freebsd/$(RTEMS_CPU)/include
CFLAGS += -I contrib/altq
CFLAGS += -I contrib/pf
CFLAGS += -I copied/rtemsbsd/$(RTEMS_CPU)/include
CFLAGS += -g
CFLAGS += -w
CFLAGS += -std=gnu99
CFLAGS += -MT $@ -MD -MP -MF $(basename $@).d
@ -38,7 +37,6 @@ C_FILES += rtemsbsd/src/rtems-bsd-lock.c
C_FILES += rtemsbsd/src/rtems-bsd-log.c
C_FILES += rtemsbsd/src/rtems-bsd-sx.c
C_FILES += rtemsbsd/src/rtems-bsd-rwlock.c
C_FILES += rtemsbsd/src/rtems-bsd-generic.c
C_FILES += rtemsbsd/src/rtems-bsd-page.c
C_FILES += rtemsbsd/src/rtems-bsd-panic.c
C_FILES += rtemsbsd/src/rtems-bsd-synch.c
@ -406,6 +404,9 @@ C_FILES += freebsd/netatalk/ddp_pcb.c
C_FILES += freebsd/netatalk/ddp_usrreq.c
C_FILES += freebsd/netatalk/at_proto.c
C_FILES += freebsd/netatalk/ddp_output.c
C_FILES += freebsd/kern/sys_generic.c
C_FILES += freebsd/kern/kern_descrip.c
C_FILES += freebsd/kern/kern_mtxpool.c
ifeq ($(RTEMS_CPU), i386)
C_FILES += freebsd/i386/pci/pci_bus.c
C_FILES += freebsd/i386/i386/legacy.c

View File

@ -659,7 +659,7 @@ rtems.addRTEMSSourceFiles(
'src/rtems-bsd-log.c',
'src/rtems-bsd-sx.c',
'src/rtems-bsd-rwlock.c',
'src/rtems-bsd-generic.c',
#'src/rtems-bsd-generic.c',
'src/rtems-bsd-page.c',
'src/rtems-bsd-panic.c',
'src/rtems-bsd-synch.c',
@ -711,13 +711,13 @@ rtems.addEmptyHeaderFiles(
'sys/cpuset.h',
'sys/exec.h',
'sys/fail.h',
'sys/limits.h',
#'sys/limits.h',
'sys/sleepqueue.h',
'sys/namei.h',
'sys/_pthreadtypes.h',
#'sys/resourcevar.h',
'sys/sched.h',
'sys/select.h',
#'sys/select.h',
'sys/syscallsubr.h',
'sys/sysent.h',
'sys/syslimits.h',
@ -725,7 +725,7 @@ rtems.addEmptyHeaderFiles(
'sys/stat.h',
#'sys/time.h',
'time.h',
'sys/timespec.h',
#'sys/timespec.h',
'sys/_timeval.h',
#'sys/vmmeter.h',
#'sys/vnode.h',
@ -1346,6 +1346,14 @@ devNic.addHeaderFiles(
'netatalk/ddp_var.h',
'netatalk/phase2.h',
'sys/mman.h',
'sys/buf.h',
'sys/mqueue.h',
'sys/tty.h',
'sys/ttyqueue.h',
'sys/ttydisc.h',
'sys/ttydevsw.h',
'sys/ttyhook.h',
'sys/user.h',
]
)
@ -1412,6 +1420,9 @@ devNic.addSourceFiles(
'netatalk/ddp_usrreq.c',
'netatalk/at_proto.c',
'netatalk/ddp_output.c',
'kern/sys_generic.c',
'kern/kern_descrip.c',
'kern/kern_mtxpool.c',
]
)

View File

@ -160,6 +160,9 @@ C_FILES += lib/libipsec/ipsec_dump_policy.c
C_FILES += lib/libipsec/policy_token.c
C_FILES += lib/libipsec/policy_parse.c
C_FILES += lib/libc_r/uthread/uthread_select.c
C_FILES += lib/libc_r/uthread/uthread_kern.c
# RTEMS Specific Files
# C_FILES += rtems/rtems-net-setup.c
C_FILES += rtems/syslog.c
@ -171,6 +174,9 @@ C_FILES += rtems/rtems-uthread_main_np.c
C_FILES += rtems/rtems-uthread_kevent.c
C_FILES += rtems/rtems-uthread_kqueue.c
C_FILES += rtems/rtems-shell.c
C_FILES += rtems/rtems-syspoll.c
C_FILES += rtems/rtems-uthread_init.c
C_FILES += rtems/rtems-get_curthread.c
# ping command sources
C_FILES += commands/sbin/ping/ping.c
@ -278,14 +284,14 @@ GEN_FILES += commands/sbin/route/keywords.h
# lib/libc/net
GEN_FILES += lib/libc/net/nslexer.c
GEN_FILES += lib/libc/net/nsparser.c
EXTRA_CLEAN = lib/libc/net/nsparser.i
EXTRA_CLEAN += lib/libc/net/y.tab.h
CLEAN_FILES = lib/libc/net/nsparser.i
CLEAN_FILES += lib/libc/net/y.tab.h
# lib/libipsec
GEN_FILES += lib/libipsec/policy_token.c
GEN_FILES += lib/libipsec/policy_parse.c
EXTRA_CLEAN += lib/libipsec/policy_parse.i
EXTRA_CLEAN += lib/libipsec/y.tab.h
CLEAN_FILES += lib/libipsec/policy_parse.i
CLEAN_FILES += lib/libipsec/y.tab.h
all: $(LIB)

View File

@ -106,6 +106,10 @@ __FBSDID("$FreeBSD$");
#include <sysexits.h>
#include <unistd.h>
#ifdef __rtems__
#define select __select
#endif
#define INADDR_LEN ((int)sizeof(in_addr_t))
#define TIMEVAL_LEN ((int)sizeof(struct tv32))
#define MASK_LEN (ICMP_MASKLEN - ICMP_MINLEN)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,240 @@
/*
* Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include <unistd.h>
#include <errno.h>
#include <poll.h>
#include <stdlib.h>
#include <string.h>
#include <sys/param.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/fcntl.h>
#include <pthread.h>
#include "pthread_private.h"
__weak_reference(__select, select);
#ifdef __rtems__
#include <freebsd/sys/timespec.h>
#define realloc _bsd_realloc
#endif
int
_select(int numfds, fd_set * readfds, fd_set * writefds, fd_set * exceptfds,
struct timeval * timeout)
{
struct pthread *curthread = _get_curthread();
struct timespec ts;
int i, ret = 0, f_wait = 1;
int pfd_index, got_events = 0, fd_count = 0;
struct pthread_poll_data data;
#ifndef __rtems__ /* XXX - NOT SURE WHAT TO DEFINE _thread_dtablesize TO. */
if (numfds > _thread_dtablesize) {
numfds = _thread_dtablesize;
}
#endif
/* Count the number of file descriptors to be polled: */
if (readfds || writefds || exceptfds) {
for (i = 0; i < numfds; i++) {
if ((readfds && FD_ISSET(i, readfds)) ||
(exceptfds && FD_ISSET(i, exceptfds)) ||
(writefds && FD_ISSET(i, writefds))) {
fd_count++;
}
}
}
/*
* Allocate memory for poll data if it hasn't already been
* allocated or if previously allocated memory is insufficient.
*/
if ((curthread->poll_data.fds == NULL) ||
(curthread->poll_data.nfds < fd_count)) {
data.fds = (struct pollfd *) realloc(curthread->poll_data.fds,
sizeof(struct pollfd) * MAX(128, fd_count));
if (data.fds == NULL) {
errno = ENOMEM;
ret = -1;
}
else {
/*
* Note that the threads poll data always
* indicates what is allocated, not what is
* currently being polled.
*/
curthread->poll_data.fds = data.fds;
curthread->poll_data.nfds = MAX(128, fd_count);
}
}
/* Check if a timeout was specified: */
if (timeout) {
if (timeout->tv_sec < 0 ||
timeout->tv_usec < 0 || timeout->tv_usec >= 1000000) {
errno = EINVAL;
return (-1);
}
/* Convert the timeval to a timespec: */
TIMEVAL_TO_TIMESPEC(timeout, &ts);
/* Set the wake up time: */
_thread_kern_set_timeout(&ts);
if (ts.tv_sec == 0 && ts.tv_nsec == 0)
f_wait = 0;
} else {
/* Wait for ever: */
_thread_kern_set_timeout(NULL);
}
if (ret == 0) {
/* Setup the wait data. */
data.fds = curthread->poll_data.fds;
data.nfds = fd_count;
/*
* Setup the array of pollfds. Optimize this by
running the loop in reverse and stopping when
* the number of selected file descriptors is reached.
*/
for (i = numfds - 1, pfd_index = fd_count - 1;
(i >= 0) && (pfd_index >= 0); i--) {
data.fds[pfd_index].events = 0;
if (readfds && FD_ISSET(i, readfds)) {
data.fds[pfd_index].events = POLLRDNORM;
}
if (exceptfds && FD_ISSET(i, exceptfds)) {
data.fds[pfd_index].events |= POLLRDBAND;
}
if (writefds && FD_ISSET(i, writefds)) {
data.fds[pfd_index].events |= POLLWRNORM;
}
if (data.fds[pfd_index].events != 0) {
/*
* Set the file descriptor to be polled and
* clear revents in case of a timeout which
* leaves fds unchanged:
*/
data.fds[pfd_index].fd = i;
data.fds[pfd_index].revents = 0;
pfd_index--;
}
}
if (((ret = __sys_poll(data.fds, data.nfds, 0)) == 0) &&
(f_wait != 0)) {
curthread->data.poll_data = &data;
curthread->interrupted = 0;
_thread_kern_sched_state(PS_SELECT_WAIT, __FILE__, __LINE__);
if (curthread->interrupted) {
errno = EINTR;
data.nfds = 0;
ret = -1;
} else
ret = data.nfds;
}
}
if (ret >= 0) {
numfds = 0;
for (i = 0; i < fd_count; i++) {
/*
* Check the results of the poll and clear
* this file descriptor from the fdset if
* the requested event wasn't ready.
*/
/*
* First check for invalid descriptor.
* If found, set errno and return -1.
*/
if (data.fds[i].revents & POLLNVAL) {
errno = EBADF;
return -1;
}
got_events = 0;
if (readfds != NULL) {
if (FD_ISSET(data.fds[i].fd, readfds)) {
if ((data.fds[i].revents & (POLLIN
| POLLRDNORM | POLLERR
| POLLHUP | POLLNVAL)) != 0)
got_events++;
else
FD_CLR(data.fds[i].fd, readfds);
}
}
if (writefds != NULL) {
if (FD_ISSET(data.fds[i].fd, writefds)) {
if ((data.fds[i].revents & (POLLOUT
| POLLWRNORM | POLLWRBAND | POLLERR
| POLLHUP | POLLNVAL)) != 0)
got_events++;
else
FD_CLR(data.fds[i].fd,
writefds);
}
}
if (exceptfds != NULL) {
if (FD_ISSET(data.fds[i].fd, exceptfds)) {
if (data.fds[i].revents & (POLLRDBAND |
POLLPRI))
got_events++;
else
FD_CLR(data.fds[i].fd,
exceptfds);
}
}
if (got_events != 0)
numfds+=got_events;
}
ret = numfds;
}
return (ret);
}
int
__select(int numfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds,
struct timeval *timeout)
{
int ret;
#ifndef __rtems__
_thread_enter_cancellation_point();
#endif
ret = _select(numfds, readfds, writefds, exceptfds, timeout);
#ifndef __rtems__
_thread_leave_cancellation_point();
#endif
return ret;
}

View File

@ -0,0 +1,158 @@
/*
* Copyright (c) 1995-1998 John Birrell <jb@cimlogic.com.au>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* Private thread definitions for the uthread kernel.
*
* $FreeBSD$
*/
#ifndef _PTHREAD_PRIVATE_H
#define _PTHREAD_PRIVATE_H
/*
* Evaluate the storage class specifier.
*/
#ifdef GLOBAL_PTHREAD_PRIVATE
#define SCLASS
#else
#define SCLASS extern
#endif
/*
* Include files.
*/
#include <setjmp.h>
#include <signal.h>
#include <stdio.h>
#include <freebsd/sys/param.h>
#include <freebsd/sys/queue.h>
#include <freebsd/sys/types.h>
#include <freebsd/sys/time.h>
#include <freebsd/sys/cdefs.h>
#include <sched.h>
#include <spinlock.h>
#include <pthread_np.h>
#include <freebsd/sys/malloc.h>
/*
* Define a thread-safe macro to get the current time of day
* which is updated at regular intervals by the scheduling signal
* handler.
*/
#define GET_CURRENT_TOD(tv) \
do { \
tv.tv_sec = _sched_tod.tv_sec; \
tv.tv_usec = _sched_tod.tv_usec; \
} while (tv.tv_sec != _sched_tod.tv_sec)
/*
* rtems uses the following structure to allow the method
* _thread_kern_sched_state to be called. This function
* is stubbed out to cause a processor yeild.
*/
/*
* Thread states.
*/
enum pthread_state {
#if 0
PS_RUNNING,
PS_SIGTHREAD,
PS_MUTEX_WAIT,
PS_COND_WAIT,
PS_FDLR_WAIT,
PS_FDLW_WAIT,
PS_FDR_WAIT,
PS_FDW_WAIT,
PS_FILE_WAIT,
PS_POLL_WAIT,
#endif
PS_SELECT_WAIT,
#if 0
PS_SLEEP_WAIT,
PS_WAIT_WAIT,
PS_SIGSUSPEND,
PS_SIGWAIT,
PS_SPINBLOCK,
PS_JOIN,
PS_SUSPENDED,
PS_DEAD,
PS_DEADLOCK,
#endif
PS_STATE_MAX
};
struct pthread_poll_data {
int nfds;
struct pollfd *fds;
};
struct pthread_wait_data {
struct pthread_poll_data *poll_data;
};
/*
* Thread structure.
*/
struct pthread {
/*
* Time to wake up thread. This is used for sleeping threads and
* for any operation which may time out (such as select).
*/
struct timespec wakeup_time;
/* TRUE if operation has timed out. */
int timeout;
/* Wait data. */
struct pthread_wait_data data;
/*
* Allocated for converting select into poll.
*/
struct pthread_poll_data poll_data;
/*
* Set to TRUE if a blocking operation was
* interrupted by a signal:
*/
int interrupted;
};
/* Time of day at last scheduling timer signal: */
SCLASS struct timeval volatile _sched_tod
#ifdef GLOBAL_PTHREAD_PRIVATE
= { 0, 0 };
#else
;
#endif
struct pthread *_get_curthread(void);
#endif /* !_PTHREAD_PRIVATE_H */

View File

@ -0,0 +1,72 @@
#include <freebsd/machine/rtems-bsd-config.h>
#include <freebsd/sys/malloc.h>
#include <pthread.h>
#include "pthread_private.h"
static struct pthread *rtems_bsd_curpthread = NULL;
static void rtems_bsd_pthread_descriptor_dtor(void *td)
{
// XXX are there other pieces to clean up?
free(td, M_TEMP);
}
static struct pthread *
rtems_bsd_pthread_init( rtems_id id )
{
rtems_status_code sc = RTEMS_SUCCESSFUL;
unsigned index = 0;
struct pthread *td;
td = _bsd_malloc( sizeof(struct pthread), M_TEMP, M_WAITOK | M_ZERO);
if (td == NULL)
return NULL;
td->timeout = 0;
td->data.poll_data = NULL;
td->poll_data.nfds = 0;
td->poll_data.fds = NULL;
td->interrupted = 0;
rtems_bsd_curpthread = td;
// Now add the task descriptor as a per-task variable
sc = rtems_task_variable_add(
id,
&rtems_bsd_curpthread,
rtems_bsd_pthread_descriptor_dtor
);
if (sc != RTEMS_SUCCESSFUL) {
free(td, M_TEMP);
return NULL;
}
return td;
}
/*
*/
struct pthread *
_get_curthread(void)
{
struct pthread *td;
/*
* If we already have a struct thread associated with this thread,
* obtain it. Otherwise, allocate and initialize one.
*/
td = rtems_bsd_curpthread;
if ( td == NULL ) {
td = rtems_bsd_pthread_init( rtems_task_self() );
if ( td == NULL ){
panic("_get_curthread: Unable to create pthread\n");
}
}
return td;
}

View File

@ -0,0 +1,30 @@
#include <unistd.h>
#include <errno.h>
#include <sys/time.h>
#include <rtems.h>
#include <rtems/error.h>
#include <freebsd/sys/poll.h>
struct poll_args {
struct pollfd *fds;
u_int nfds;
int timeout;
};
int kern_poll( struct thread *td, struct poll_args *uap );
int
__sys_poll(struct pollfd *fds, unsigned nfds, int timeout)
{
struct poll_args uap;
struct thread *td = rtems_get_curthread();
uap.fds = fds;
uap.nfds = nfds;
uap.timeout = timeout;
kern_poll(td, &uap);
return -1;
}

View File

@ -0,0 +1,8 @@
#include <freebsd/machine/rtems-bsd-config.h>
/* Allocate space for global thread variables here: */
#define GLOBAL_PTHREAD_PRIVATE
#include <freebsd/sys/types.h>
#include <pthread.h>
#include "pthread_private.h"

455
freebsd/kern/kern_condvar.c Normal file
View File

@ -0,0 +1,455 @@
#include <freebsd/machine/rtems-bsd-config.h>
/*-
* Copyright (c) 2000 Jake Burkholder <jake@freebsd.org>.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <freebsd/sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <freebsd/local/opt_ktrace.h>
#include <freebsd/sys/param.h>
#include <freebsd/sys/systm.h>
#include <freebsd/sys/lock.h>
#include <freebsd/sys/mutex.h>
#include <freebsd/sys/proc.h>
#include <freebsd/sys/kernel.h>
#include <freebsd/sys/ktr.h>
#include <freebsd/sys/condvar.h>
#include <freebsd/sys/sched.h>
#include <freebsd/sys/signalvar.h>
#include <freebsd/sys/sleepqueue.h>
#include <freebsd/sys/resourcevar.h>
#ifdef KTRACE
#include <freebsd/sys/uio.h>
#include <freebsd/sys/ktrace.h>
#endif
/*
* Common sanity checks for cv_wait* functions.
*/
#define CV_ASSERT(cvp, lock, td) do { \
KASSERT((td) != NULL, ("%s: curthread NULL", __func__)); \
KASSERT(TD_IS_RUNNING(td), ("%s: not TDS_RUNNING", __func__)); \
KASSERT((cvp) != NULL, ("%s: cvp NULL", __func__)); \
KASSERT((lock) != NULL, ("%s: lock NULL", __func__)); \
} while (0)
/*
* Initialize a condition variable. Must be called before use.
*/
void
cv_init(struct cv *cvp, const char *desc)
{
cvp->cv_description = desc;
cvp->cv_waiters = 0;
}
/*
* Destroy a condition variable. The condition variable must be re-initialized
* in order to be re-used.
*/
void
cv_destroy(struct cv *cvp)
{
#ifdef INVARIANTS
struct sleepqueue *sq;
sleepq_lock(cvp);
sq = sleepq_lookup(cvp);
sleepq_release(cvp);
KASSERT(sq == NULL, ("%s: associated sleep queue non-empty", __func__));
#endif
}
/*
* Wait on a condition variable. The current thread is placed on the condition
* variable's wait queue and suspended. A cv_signal or cv_broadcast on the same
* condition variable will resume the thread. The mutex is released before
* sleeping and will be held on return. It is recommended that the mutex be
* held when cv_signal or cv_broadcast are called.
*/
void
_cv_wait(struct cv *cvp, struct lock_object *lock)
{
WITNESS_SAVE_DECL(lock_witness);
struct lock_class *class;
struct thread *td;
int lock_state;
td = curthread;
lock_state = 0;
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))
ktrcsw(1, 0);
#endif
CV_ASSERT(cvp, lock, td);
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, lock,
"Waiting on \"%s\"", cvp->cv_description);
class = LOCK_CLASS(lock);
if (cold || panicstr) {
/*
* During autoconfiguration, just give interrupts
* a chance, then just return. Don't run any other
* thread or panic below, in case this is the idle
* process and already asleep.
*/
return;
}
sleepq_lock(cvp);
cvp->cv_waiters++;
if (lock == &Giant.lock_object)
mtx_assert(&Giant, MA_OWNED);
DROP_GIANT();
sleepq_add(cvp, lock, cvp->cv_description, SLEEPQ_CONDVAR, 0);
if (lock != &Giant.lock_object) {
if (class->lc_flags & LC_SLEEPABLE)
sleepq_release(cvp);
WITNESS_SAVE(lock, lock_witness);
lock_state = class->lc_unlock(lock);
if (class->lc_flags & LC_SLEEPABLE)
sleepq_lock(cvp);
}
sleepq_wait(cvp, 0);
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))
ktrcsw(0, 0);
#endif
PICKUP_GIANT();
if (lock != &Giant.lock_object) {
class->lc_lock(lock, lock_state);
WITNESS_RESTORE(lock, lock_witness);
}
}
/*
* Wait on a condition variable. This function differs from cv_wait by
* not aquiring the mutex after condition variable was signaled.
*/
void
_cv_wait_unlock(struct cv *cvp, struct lock_object *lock)
{
struct lock_class *class;
struct thread *td;
td = curthread;
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))
ktrcsw(1, 0);
#endif
CV_ASSERT(cvp, lock, td);
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, lock,
"Waiting on \"%s\"", cvp->cv_description);
KASSERT(lock != &Giant.lock_object,
("cv_wait_unlock cannot be used with Giant"));
class = LOCK_CLASS(lock);
if (cold || panicstr) {
/*
* During autoconfiguration, just give interrupts
* a chance, then just return. Don't run any other
* thread or panic below, in case this is the idle
* process and already asleep.
*/
class->lc_unlock(lock);
return;
}
sleepq_lock(cvp);
cvp->cv_waiters++;
DROP_GIANT();
sleepq_add(cvp, lock, cvp->cv_description, SLEEPQ_CONDVAR, 0);
if (class->lc_flags & LC_SLEEPABLE)
sleepq_release(cvp);
class->lc_unlock(lock);
if (class->lc_flags & LC_SLEEPABLE)
sleepq_lock(cvp);
sleepq_wait(cvp, 0);
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))
ktrcsw(0, 0);
#endif
PICKUP_GIANT();
}
/*
* Wait on a condition variable, allowing interruption by signals. Return 0 if
* the thread was resumed with cv_signal or cv_broadcast, EINTR or ERESTART if
* a signal was caught. If ERESTART is returned the system call should be
* restarted if possible.
*/
int
_cv_wait_sig(struct cv *cvp, struct lock_object *lock)
{
WITNESS_SAVE_DECL(lock_witness);
struct lock_class *class;
struct thread *td;
int lock_state, rval;
td = curthread;
lock_state = 0;
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))
ktrcsw(1, 0);
#endif
CV_ASSERT(cvp, lock, td);
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, lock,
"Waiting on \"%s\"", cvp->cv_description);
class = LOCK_CLASS(lock);
if (cold || panicstr) {
/*
* After a panic, or during autoconfiguration, just give
* interrupts a chance, then just return; don't run any other
* procs or panic below, in case this is the idle process and
* already asleep.
*/
return (0);
}
sleepq_lock(cvp);
cvp->cv_waiters++;
if (lock == &Giant.lock_object)
mtx_assert(&Giant, MA_OWNED);
DROP_GIANT();
sleepq_add(cvp, lock, cvp->cv_description, SLEEPQ_CONDVAR |
SLEEPQ_INTERRUPTIBLE, 0);
if (lock != &Giant.lock_object) {
if (class->lc_flags & LC_SLEEPABLE)
sleepq_release(cvp);
WITNESS_SAVE(lock, lock_witness);
lock_state = class->lc_unlock(lock);
if (class->lc_flags & LC_SLEEPABLE)
sleepq_lock(cvp);
}
rval = sleepq_wait_sig(cvp, 0);
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))
ktrcsw(0, 0);
#endif
PICKUP_GIANT();
if (lock != &Giant.lock_object) {
class->lc_lock(lock, lock_state);
WITNESS_RESTORE(lock, lock_witness);
}
return (rval);
}
/*
* Wait on a condition variable for at most timo/hz seconds. Returns 0 if the
* process was resumed by cv_signal or cv_broadcast, EWOULDBLOCK if the timeout
* expires.
*/
int
_cv_timedwait(struct cv *cvp, struct lock_object *lock, int timo)
{
WITNESS_SAVE_DECL(lock_witness);
struct lock_class *class;
struct thread *td;
int lock_state, rval;
td = curthread;
lock_state = 0;
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))
ktrcsw(1, 0);
#endif
CV_ASSERT(cvp, lock, td);
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, lock,
"Waiting on \"%s\"", cvp->cv_description);
class = LOCK_CLASS(lock);
if (cold || panicstr) {
/*
* After a panic, or during autoconfiguration, just give
* interrupts a chance, then just return; don't run any other
* thread or panic below, in case this is the idle process and
* already asleep.
*/
return 0;
}
sleepq_lock(cvp);
cvp->cv_waiters++;
if (lock == &Giant.lock_object)
mtx_assert(&Giant, MA_OWNED);
DROP_GIANT();
sleepq_add(cvp, lock, cvp->cv_description, SLEEPQ_CONDVAR, 0);
sleepq_set_timeout(cvp, timo);
if (lock != &Giant.lock_object) {
if (class->lc_flags & LC_SLEEPABLE)
sleepq_release(cvp);
WITNESS_SAVE(lock, lock_witness);
lock_state = class->lc_unlock(lock);
if (class->lc_flags & LC_SLEEPABLE)
sleepq_lock(cvp);
}
rval = sleepq_timedwait(cvp, 0);
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))
ktrcsw(0, 0);
#endif
PICKUP_GIANT();
if (lock != &Giant.lock_object) {
class->lc_lock(lock, lock_state);
WITNESS_RESTORE(lock, lock_witness);
}
return (rval);
}
/*
* Wait on a condition variable for at most timo/hz seconds, allowing
* interruption by signals. Returns 0 if the thread was resumed by cv_signal
* or cv_broadcast, EWOULDBLOCK if the timeout expires, and EINTR or ERESTART if
* a signal was caught.
*/
int
_cv_timedwait_sig(struct cv *cvp, struct lock_object *lock, int timo)
{
WITNESS_SAVE_DECL(lock_witness);
struct lock_class *class;
struct thread *td;
int lock_state, rval;
td = curthread;
lock_state = 0;
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))
ktrcsw(1, 0);
#endif
CV_ASSERT(cvp, lock, td);
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, lock,
"Waiting on \"%s\"", cvp->cv_description);
class = LOCK_CLASS(lock);
if (cold || panicstr) {
/*
* After a panic, or during autoconfiguration, just give
* interrupts a chance, then just return; don't run any other
* thread or panic below, in case this is the idle process and
* already asleep.
*/
return 0;
}
sleepq_lock(cvp);
cvp->cv_waiters++;
if (lock == &Giant.lock_object)
mtx_assert(&Giant, MA_OWNED);
DROP_GIANT();
sleepq_add(cvp, lock, cvp->cv_description, SLEEPQ_CONDVAR |
SLEEPQ_INTERRUPTIBLE, 0);
sleepq_set_timeout(cvp, timo);
if (lock != &Giant.lock_object) {
if (class->lc_flags & LC_SLEEPABLE)
sleepq_release(cvp);
WITNESS_SAVE(lock, lock_witness);
lock_state = class->lc_unlock(lock);
if (class->lc_flags & LC_SLEEPABLE)
sleepq_lock(cvp);
}
rval = sleepq_timedwait_sig(cvp, 0);
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))
ktrcsw(0, 0);
#endif
PICKUP_GIANT();
if (lock != &Giant.lock_object) {
class->lc_lock(lock, lock_state);
WITNESS_RESTORE(lock, lock_witness);
}
return (rval);
}
/*
* Signal a condition variable, wakes up one waiting thread. Will also wakeup
* the swapper if the process is not in memory, so that it can bring the
* sleeping process in. Note that this may also result in additional threads
* being made runnable. Should be called with the same mutex as was passed to
* cv_wait held.
*/
void
cv_signal(struct cv *cvp)
{
int wakeup_swapper;
wakeup_swapper = 0;
sleepq_lock(cvp);
if (cvp->cv_waiters > 0) {
cvp->cv_waiters--;
wakeup_swapper = sleepq_signal(cvp, SLEEPQ_CONDVAR, 0, 0);
}
sleepq_release(cvp);
if (wakeup_swapper)
kick_proc0();
}
/*
* Broadcast a signal to a condition variable. Wakes up all waiting threads.
* Should be called with the same mutex as was passed to cv_wait held.
*/
void
cv_broadcastpri(struct cv *cvp, int pri)
{
int wakeup_swapper;
/*
* XXX sleepq_broadcast pri argument changed from -1 meaning
* no pri to 0 meaning no pri.
*/
wakeup_swapper = 0;
if (pri == -1)
pri = 0;
sleepq_lock(cvp);
if (cvp->cv_waiters > 0) {
cvp->cv_waiters = 0;
wakeup_swapper = sleepq_broadcast(cvp, SLEEPQ_CONDVAR, pri, 0);
}
sleepq_release(cvp);
if (wakeup_swapper)
kick_proc0();
}

6912
freebsd/kern/kern_descrip.c Normal file

File diff suppressed because it is too large Load Diff

220
freebsd/kern/kern_mtxpool.c Normal file
View File

@ -0,0 +1,220 @@
#include <freebsd/machine/rtems-bsd-config.h>
/*-
* Copyright (c) 2001 Matthew Dillon. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/* Mutex pool routines. These routines are designed to be used as short
* term leaf mutexes (e.g. the last mutex you might acquire other then
* calling msleep()). They operate using a shared pool. A mutex is chosen
* from the pool based on the supplied pointer (which may or may not be
* valid).
*
* Advantages:
* - no structural overhead. Mutexes can be associated with structures
* without adding bloat to the structures.
* - mutexes can be obtained for invalid pointers, useful when uses
* mutexes to interlock destructor ops.
* - no initialization/destructor overhead.
* - can be used with msleep.
*
* Disadvantages:
* - should generally only be used as leaf mutexes.
* - pool/pool dependancy ordering cannot be depended on.
* - possible L1 cache mastersip contention between cpus.
*/
#include <freebsd/sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <freebsd/sys/param.h>
#include <freebsd/sys/proc.h>
#include <freebsd/sys/kernel.h>
#include <freebsd/sys/ktr.h>
#include <freebsd/sys/lock.h>
#include <freebsd/sys/malloc.h>
#include <freebsd/sys/mutex.h>
#include <freebsd/sys/systm.h>
static MALLOC_DEFINE(M_MTXPOOL, "mtx_pool", "mutex pool");
/* Pool sizes must be a power of two */
#ifndef MTX_POOL_LOCKBUILDER_SIZE
#define MTX_POOL_LOCKBUILDER_SIZE 128
#endif
#ifndef MTX_POOL_SLEEP_SIZE
#define MTX_POOL_SLEEP_SIZE 128
#endif
struct mtxpool_header {
int mtxpool_size;
int mtxpool_mask;
int mtxpool_shift;
int mtxpool_next;
};
struct mtx_pool {
struct mtxpool_header mtx_pool_header;
struct mtx mtx_pool_ary[1];
};
static struct mtx_pool_lockbuilder {
struct mtxpool_header mtx_pool_header;
struct mtx mtx_pool_ary[MTX_POOL_LOCKBUILDER_SIZE];
} lockbuilder_pool;
#define mtx_pool_size mtx_pool_header.mtxpool_size
#define mtx_pool_mask mtx_pool_header.mtxpool_mask
#define mtx_pool_shift mtx_pool_header.mtxpool_shift
#define mtx_pool_next mtx_pool_header.mtxpool_next
struct mtx_pool *mtxpool_sleep;
struct mtx_pool *mtxpool_lockbuilder;
#if UINTPTR_MAX == UINT64_MAX /* 64 bits */
# define POINTER_BITS 64
# define HASH_MULTIPLIER 11400714819323198485u /* (2^64)*(sqrt(5)-1)/2 */
#else /* assume 32 bits */
# define POINTER_BITS 32
# define HASH_MULTIPLIER 2654435769u /* (2^32)*(sqrt(5)-1)/2 */
#endif
/*
* Return the (shared) pool mutex associated with the specified address.
* The returned mutex is a leaf level mutex, meaning that if you obtain it
* you cannot obtain any other mutexes until you release it. You can
* legally msleep() on the mutex.
*/
struct mtx *
mtx_pool_find(struct mtx_pool *pool, void *ptr)
{
int p;
KASSERT(pool != NULL, ("_mtx_pool_find(): null pool"));
/*
* Fibonacci hash, see Knuth's
* _Art of Computer Programming, Volume 3 / Sorting and Searching_
*/
p = ((HASH_MULTIPLIER * (uintptr_t)ptr) >> pool->mtx_pool_shift) &
pool->mtx_pool_mask;
return (&pool->mtx_pool_ary[p]);
}
static void
mtx_pool_initialize(struct mtx_pool *pool, const char *mtx_name, int pool_size,
int opts)
{
int i, maskbits;
pool->mtx_pool_size = pool_size;
pool->mtx_pool_mask = pool_size - 1;
for (i = 1, maskbits = 0; (i & pool_size) == 0; i = i << 1)
maskbits++;
pool->mtx_pool_shift = POINTER_BITS - maskbits;
pool->mtx_pool_next = 0;
for (i = 0; i < pool_size; ++i)
mtx_init(&pool->mtx_pool_ary[i], mtx_name, NULL, opts);
}
struct mtx_pool *
mtx_pool_create(const char *mtx_name, int pool_size, int opts)
{
struct mtx_pool *pool;
if (pool_size <= 0 || !powerof2(pool_size)) {
printf("WARNING: %s pool size is not a power of 2.\n",
mtx_name);
pool_size = 128;
}
pool = malloc(sizeof (struct mtx_pool) +
((pool_size - 1) * sizeof (struct mtx)),
M_MTXPOOL, M_WAITOK | M_ZERO);
mtx_pool_initialize(pool, mtx_name, pool_size, opts);
return pool;
}
void
mtx_pool_destroy(struct mtx_pool **poolp)
{
int i;
struct mtx_pool *pool = *poolp;
for (i = pool->mtx_pool_size - 1; i >= 0; --i)
mtx_destroy(&pool->mtx_pool_ary[i]);
free(pool, M_MTXPOOL);
*poolp = NULL;
}
static void
mtx_pool_setup_static(void *dummy __unused)
{
mtx_pool_initialize((struct mtx_pool *)&lockbuilder_pool,
"lockbuilder mtxpool", MTX_POOL_LOCKBUILDER_SIZE,
MTX_DEF | MTX_NOWITNESS | MTX_QUIET);
mtxpool_lockbuilder = (struct mtx_pool *)&lockbuilder_pool;
}
static void
mtx_pool_setup_dynamic(void *dummy __unused)
{
mtxpool_sleep = mtx_pool_create("sleep mtxpool",
MTX_POOL_SLEEP_SIZE, MTX_DEF);
}
/*
* Obtain a (shared) mutex from the pool. The returned mutex is a leaf
* level mutex, meaning that if you obtain it you cannot obtain any other
* mutexes until you release it. You can legally msleep() on the mutex.
*/
struct mtx *
mtx_pool_alloc(struct mtx_pool *pool)
{
int i;
KASSERT(pool != NULL, ("mtx_pool_alloc(): null pool"));
/*
* mtx_pool_next is unprotected against multiple accesses,
* but simultaneous access by two CPUs should not be very
* harmful.
*/
i = pool->mtx_pool_next;
pool->mtx_pool_next = (i + 1) & pool->mtx_pool_mask;
return (&pool->mtx_pool_ary[i]);
}
/*
* The lockbuilder pool must be initialized early because the lockmgr
* and sx locks depend on it. The sx locks are used in the kernel
* memory allocator. The lockmgr subsystem is initialized by
* SYSINIT(..., SI_SUB_LOCKMGR, ...).
*
* We can't call malloc() to dynamically allocate the sleep pool
* until after kmeminit() has been called, which is done by
* SYSINIT(..., SI_SUB_KMEM, ...).
*/
SYSINIT(mtxpooli1, SI_SUB_MTX_POOL_STATIC, SI_ORDER_FIRST,
mtx_pool_setup_static, NULL);
SYSINIT(mtxpooli2, SI_SUB_MTX_POOL_DYNAMIC, SI_ORDER_FIRST,
mtx_pool_setup_dynamic, NULL);

View File

@ -545,7 +545,6 @@ copyiniov(struct iovec *iovp, u_int iovcnt, struct iovec **iov, int error)
return (error);
}
#ifndef __rtems__
int
copyinuio(struct iovec *iovp, u_int iovcnt, struct uio **uiop)
{
@ -582,6 +581,7 @@ copyinuio(struct iovec *iovp, u_int iovcnt, struct uio **uiop)
return (0);
}
#ifndef __rtems__
struct uio *
cloneuio(struct uio *uiop)
{

View File

@ -698,6 +698,7 @@ realitexpire(void *arg)
}
/*NOTREACHED*/
}
#endif /* __rtems__ */
/*
* Check that a proposed value to load into the .it_value or
@ -716,6 +717,7 @@ itimerfix(struct timeval *tv)
return (0);
}
#ifndef __rtems__
/*
* Decrement an interval timer by a specified number
* of microseconds, which must be less than a second,
@ -756,6 +758,7 @@ expire:
itp->it_value.tv_usec = 0; /* sec is already 0 */
return (0);
}
#endif /* __rtems__ */
/*
* Add and subtract routines for timevals.
@ -772,7 +775,6 @@ timevaladd(struct timeval *t1, const struct timeval *t2)
t1->tv_usec += t2->tv_usec;
timevalfix(t1);
}
#endif /* __rtems__ */
void
timevalsub(struct timeval *t1, const struct timeval *t2)

1665
freebsd/kern/sys_generic.c Normal file

File diff suppressed because it is too large Load Diff

526
freebsd/sys/buf.h Normal file
View File

@ -0,0 +1,526 @@
/*-
* Copyright (c) 1982, 1986, 1989, 1993
* The Regents of the University of California. All rights reserved.
* (c) UNIX System Laboratories, Inc.
* All or some portions of this file are derived from material licensed
* to the University of California by American Telephone and Telegraph
* Co. or Unix System Laboratories, Inc. and are reproduced herein with
* the permission of UNIX System Laboratories, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)buf.h 8.9 (Berkeley) 3/30/95
* $FreeBSD$
*/
#ifndef _SYS_BUF_HH_
#define _SYS_BUF_HH_
#include <freebsd/sys/bufobj.h>
#include <freebsd/sys/queue.h>
#include <freebsd/sys/lock.h>
#include <freebsd/sys/lockmgr.h>
struct bio;
struct buf;
struct bufobj;
struct mount;
struct vnode;
struct uio;
/*
* To avoid including <ufs/ffs/softdep.h>
*/
LIST_HEAD(workhead, worklist);
/*
* These are currently used only by the soft dependency code, hence
* are stored once in a global variable. If other subsystems wanted
* to use these hooks, a pointer to a set of bio_ops could be added
* to each buffer.
*/
extern struct bio_ops {
void (*io_start)(struct buf *);
void (*io_complete)(struct buf *);
void (*io_deallocate)(struct buf *);
int (*io_countdeps)(struct buf *, int);
} bioops;
struct vm_object;
typedef unsigned char b_xflags_t;
/*
* The buffer header describes an I/O operation in the kernel.
*
* NOTES:
* b_bufsize, b_bcount. b_bufsize is the allocation size of the
* buffer, either DEV_BSIZE or PAGE_SIZE aligned. b_bcount is the
* originally requested buffer size and can serve as a bounds check
* against EOF. For most, but not all uses, b_bcount == b_bufsize.
*
* b_dirtyoff, b_dirtyend. Buffers support piecemeal, unaligned
* ranges of dirty data that need to be written to backing store.
* The range is typically clipped at b_bcount ( not b_bufsize ).
*
* b_resid. Number of bytes remaining in I/O. After an I/O operation
* completes, b_resid is usually 0 indicating 100% success.
*
* All fields are protected by the buffer lock except those marked:
* V - Protected by owning bufobj lock
* Q - Protected by the buf queue lock
* D - Protected by an dependency implementation specific lock
*/
struct buf {
struct bufobj *b_bufobj;
long b_bcount;
void *b_caller1;
caddr_t b_data;
int b_error;
uint8_t b_iocmd;
uint8_t b_ioflags;
off_t b_iooffset;
long b_resid;
void (*b_iodone)(struct buf *);
daddr_t b_blkno; /* Underlying physical block number. */
off_t b_offset; /* Offset into file. */
TAILQ_ENTRY(buf) b_bobufs; /* (V) Buffer's associated vnode. */
struct buf *b_left; /* (V) splay tree link */
struct buf *b_right; /* (V) splay tree link */
uint32_t b_vflags; /* (V) BV_* flags */
TAILQ_ENTRY(buf) b_freelist; /* (Q) Free list position inactive. */
unsigned short b_qindex; /* (Q) buffer queue index */
uint32_t b_flags; /* B_* flags. */
b_xflags_t b_xflags; /* extra flags */
struct lock b_lock; /* Buffer lock */
long b_bufsize; /* Allocated buffer size. */
long b_runningbufspace; /* when I/O is running, pipelining */
caddr_t b_kvabase; /* base kva for buffer */
int b_kvasize; /* size of kva for buffer */
daddr_t b_lblkno; /* Logical block number. */
struct vnode *b_vp; /* Device vnode. */
int b_dirtyoff; /* Offset in buffer of dirty region. */
int b_dirtyend; /* Offset of end of dirty region. */
struct ucred *b_rcred; /* Read credentials reference. */
struct ucred *b_wcred; /* Write credentials reference. */
void *b_saveaddr; /* Original b_addr for physio. */
union pager_info {
int pg_reqpage;
} b_pager;
union cluster_info {
TAILQ_HEAD(cluster_list_head, buf) cluster_head;
TAILQ_ENTRY(buf) cluster_entry;
} b_cluster;
struct vm_page *b_pages[btoc(MAXPHYS)];
int b_npages;
struct workhead b_dep; /* (D) List of filesystem dependencies. */
void *b_fsprivate1;
void *b_fsprivate2;
void *b_fsprivate3;
int b_pin_count;
};
#define b_object b_bufobj->bo_object
/*
* These flags are kept in b_flags.
*
* Notes:
*
* B_ASYNC VOP calls on bp's are usually async whether or not
* B_ASYNC is set, but some subsystems, such as NFS, like
* to know what is best for the caller so they can
* optimize the I/O.
*
* B_PAGING Indicates that bp is being used by the paging system or
* some paging system and that the bp is not linked into
* the b_vp's clean/dirty linked lists or ref counts.
* Buffer vp reassignments are illegal in this case.
*
* B_CACHE This may only be set if the buffer is entirely valid.
* The situation where B_DELWRI is set and B_CACHE is
* clear MUST be committed to disk by getblk() so
* B_DELWRI can also be cleared. See the comments for
* getblk() in kern/vfs_bio.c. If B_CACHE is clear,
* the caller is expected to clear BIO_ERROR and B_INVAL,
* set BIO_READ, and initiate an I/O.
*
* The 'entire buffer' is defined to be the range from
* 0 through b_bcount.
*
* B_MALLOC Request that the buffer be allocated from the malloc
* pool, DEV_BSIZE aligned instead of PAGE_SIZE aligned.
*
* B_CLUSTEROK This flag is typically set for B_DELWRI buffers
* by filesystems that allow clustering when the buffer
* is fully dirty and indicates that it may be clustered
* with other adjacent dirty buffers. Note the clustering
* may not be used with the stage 1 data write under NFS
* but may be used for the commit rpc portion.
*
* B_VMIO Indicates that the buffer is tied into an VM object.
* The buffer's data is always PAGE_SIZE aligned even
* if b_bufsize and b_bcount are not. ( b_bufsize is
* always at least DEV_BSIZE aligned, though ).
*
* B_DIRECT Hint that we should attempt to completely free
* the pages underlying the buffer. B_DIRECT is
* sticky until the buffer is released and typically
* only has an effect when B_RELBUF is also set.
*
*/
#define B_AGE 0x00000001 /* Move to age queue when I/O done. */
#define B_NEEDCOMMIT 0x00000002 /* Append-write in progress. */
#define B_ASYNC 0x00000004 /* Start I/O, do not wait. */
#define B_DIRECT 0x00000008 /* direct I/O flag (pls free vmio) */
#define B_DEFERRED 0x00000010 /* Skipped over for cleaning */
#define B_CACHE 0x00000020 /* Bread found us in the cache. */
#define B_VALIDSUSPWRT 0x00000040 /* Valid write during suspension. */
#define B_DELWRI 0x00000080 /* Delay I/O until buffer reused. */
#define B_PERSISTENT 0x00000100 /* Perm. ref'ed while EXT2FS mounted. */
#define B_DONE 0x00000200 /* I/O completed. */
#define B_EINTR 0x00000400 /* I/O was interrupted */
#define B_00000800 0x00000800 /* Available flag. */
#define B_00001000 0x00001000 /* Available flag. */
#define B_INVAL 0x00002000 /* Does not contain valid info. */
#define B_00004000 0x00004000 /* Available flag. */
#define B_NOCACHE 0x00008000 /* Do not cache block after use. */
#define B_MALLOC 0x00010000 /* malloced b_data */
#define B_CLUSTEROK 0x00020000 /* Pagein op, so swap() can count it. */
#define B_000400000 0x00040000 /* Available flag. */
#define B_000800000 0x00080000 /* Available flag. */
#define B_00100000 0x00100000 /* Available flag. */
#define B_DIRTY 0x00200000 /* Needs writing later (in EXT2FS). */
#define B_RELBUF 0x00400000 /* Release VMIO buffer. */
#define B_00800000 0x00800000 /* Available flag. */
#define B_01000000 0x01000000 /* Available flag. */
#define B_NEEDSGIANT 0x02000000 /* Buffer's vnode needs giant. */
#define B_PAGING 0x04000000 /* volatile paging I/O -- bypass VMIO */
#define B_MANAGED 0x08000000 /* Managed by FS. */
#define B_RAM 0x10000000 /* Read ahead mark (flag) */
#define B_VMIO 0x20000000 /* VMIO flag */
#define B_CLUSTER 0x40000000 /* pagein op, so swap() can count it */
#define B_REMFREE 0x80000000 /* Delayed bremfree */
#define PRINT_BUF_FLAGS "\20\40remfree\37cluster\36vmio\35ram\34b27" \
"\33paging\32b25\31b24\30b23\27relbuf\26dirty\25b20" \
"\24b19\23b18\22clusterok\21malloc\20nocache\17b14\16inval" \
"\15b12\14b11\13eintr\12done\11persist\10delwri\7validsuspwrt" \
"\6cache\5deferred\4direct\3async\2needcommit\1age"
/*
* These flags are kept in b_xflags.
*/
#define BX_VNDIRTY 0x00000001 /* On vnode dirty list */
#define BX_VNCLEAN 0x00000002 /* On vnode clean list */
#define BX_BKGRDWRITE 0x00000010 /* Do writes in background */
#define BX_BKGRDMARKER 0x00000020 /* Mark buffer for splay tree */
#define BX_ALTDATA 0x00000040 /* Holds extended data */
#define NOOFFSET (-1LL) /* No buffer offset calculated yet */
/*
* These flags are kept in b_vflags.
*/
#define BV_SCANNED 0x00000001 /* VOP_FSYNC funcs mark written bufs */
#define BV_BKGRDINPROG 0x00000002 /* Background write in progress */
#define BV_BKGRDWAIT 0x00000004 /* Background write waiting */
#define BV_INFREECNT 0x80000000 /* buf is counted in numfreebufs */
#ifdef _KERNEL
/*
* Buffer locking
*/
extern const char *buf_wmesg; /* Default buffer lock message */
#define BUF_WMESG "bufwait"
#include <freebsd/sys/proc.h> /* XXX for curthread */
#include <freebsd/sys/mutex.h>
/*
* Initialize a lock.
*/
#define BUF_LOCKINIT(bp) \
lockinit(&(bp)->b_lock, PRIBIO + 4, buf_wmesg, 0, 0)
/*
*
* Get a lock sleeping non-interruptably until it becomes available.
*/
#define BUF_LOCK(bp, locktype, interlock) \
_lockmgr_args(&(bp)->b_lock, (locktype), (interlock), \
LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, \
LOCK_FILE, LOCK_LINE)
/*
* Get a lock sleeping with specified interruptably and timeout.
*/
#define BUF_TIMELOCK(bp, locktype, interlock, wmesg, catch, timo) \
_lockmgr_args(&(bp)->b_lock, (locktype) | LK_TIMELOCK, \
(interlock), (wmesg), (PRIBIO + 4) | (catch), (timo), \
LOCK_FILE, LOCK_LINE)
/*
* Release a lock. Only the acquiring process may free the lock unless
* it has been handed off to biodone.
*/
#define BUF_UNLOCK(bp) do { \
KASSERT(((bp)->b_flags & B_REMFREE) == 0, \
("BUF_UNLOCK %p while B_REMFREE is still set.", (bp))); \
\
(void)_lockmgr_args(&(bp)->b_lock, LK_RELEASE, NULL, \
LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, \
LOCK_FILE, LOCK_LINE); \
} while (0)
/*
* Check if a buffer lock is recursed.
*/
#define BUF_LOCKRECURSED(bp) \
lockmgr_recursed(&(bp)->b_lock)
/*
* Check if a buffer lock is currently held.
*/
#define BUF_ISLOCKED(bp) \
lockstatus(&(bp)->b_lock)
/*
* Free a buffer lock.
*/
#define BUF_LOCKFREE(bp) \
lockdestroy(&(bp)->b_lock)
/*
* Buffer lock assertions.
*/
#if defined(INVARIANTS) && defined(INVARIANT_SUPPORT)
#define BUF_ASSERT_LOCKED(bp) \
_lockmgr_assert(&(bp)->b_lock, KA_LOCKED, LOCK_FILE, LOCK_LINE)
#define BUF_ASSERT_SLOCKED(bp) \
_lockmgr_assert(&(bp)->b_lock, KA_SLOCKED, LOCK_FILE, LOCK_LINE)
#define BUF_ASSERT_XLOCKED(bp) \
_lockmgr_assert(&(bp)->b_lock, KA_XLOCKED, LOCK_FILE, LOCK_LINE)
#define BUF_ASSERT_UNLOCKED(bp) \
_lockmgr_assert(&(bp)->b_lock, KA_UNLOCKED, LOCK_FILE, LOCK_LINE)
#define BUF_ASSERT_HELD(bp)
#define BUF_ASSERT_UNHELD(bp)
#else
#define BUF_ASSERT_LOCKED(bp)
#define BUF_ASSERT_SLOCKED(bp)
#define BUF_ASSERT_XLOCKED(bp)
#define BUF_ASSERT_UNLOCKED(bp)
#define BUF_ASSERT_HELD(bp)
#define BUF_ASSERT_UNHELD(bp)
#endif
#ifdef _SYS_PROC_HH_ /* Avoid #include <freebsd/sys/proc.h> pollution */
/*
* When initiating asynchronous I/O, change ownership of the lock to the
* kernel. Once done, the lock may legally released by biodone. The
* original owning process can no longer acquire it recursively, but must
* wait until the I/O is completed and the lock has been freed by biodone.
*/
#define BUF_KERNPROC(bp) \
_lockmgr_disown(&(bp)->b_lock, LOCK_FILE, LOCK_LINE)
#endif
/*
* Find out if the lock has waiters or not.
*/
#define BUF_LOCKWAITERS(bp) \
lockmgr_waiters(&(bp)->b_lock)
#endif /* _KERNEL */
struct buf_queue_head {
TAILQ_HEAD(buf_queue, buf) queue;
daddr_t last_pblkno;
struct buf *insert_point;
struct buf *switch_point;
};
/*
* This structure describes a clustered I/O. It is stored in the b_saveaddr
* field of the buffer on which I/O is done. At I/O completion, cluster
* callback uses the structure to parcel I/O's to individual buffers, and
* then free's this structure.
*/
struct cluster_save {
long bs_bcount; /* Saved b_bcount. */
long bs_bufsize; /* Saved b_bufsize. */
void *bs_saveaddr; /* Saved b_addr. */
int bs_nchildren; /* Number of associated buffers. */
struct buf **bs_children; /* List of associated buffers. */
};
#ifdef _KERNEL
static __inline int
bwrite(struct buf *bp)
{
KASSERT(bp->b_bufobj != NULL, ("bwrite: no bufobj bp=%p", bp));
KASSERT(bp->b_bufobj->bo_ops != NULL, ("bwrite: no bo_ops bp=%p", bp));
KASSERT(bp->b_bufobj->bo_ops->bop_write != NULL,
("bwrite: no bop_write bp=%p", bp));
return (BO_WRITE(bp->b_bufobj, bp));
}
static __inline void
bstrategy(struct buf *bp)
{
KASSERT(bp->b_bufobj != NULL, ("bstrategy: no bufobj bp=%p", bp));
KASSERT(bp->b_bufobj->bo_ops != NULL,
("bstrategy: no bo_ops bp=%p", bp));
KASSERT(bp->b_bufobj->bo_ops->bop_strategy != NULL,
("bstrategy: no bop_strategy bp=%p", bp));
BO_STRATEGY(bp->b_bufobj, bp);
}
static __inline void
buf_start(struct buf *bp)
{
if (bioops.io_start)
(*bioops.io_start)(bp);
}
static __inline void
buf_complete(struct buf *bp)
{
if (bioops.io_complete)
(*bioops.io_complete)(bp);
}
static __inline void
buf_deallocate(struct buf *bp)
{
if (bioops.io_deallocate)
(*bioops.io_deallocate)(bp);
BUF_LOCKFREE(bp);
}
static __inline int
buf_countdeps(struct buf *bp, int i)
{
if (bioops.io_countdeps)
return ((*bioops.io_countdeps)(bp, i));
else
return (0);
}
#endif /* _KERNEL */
/*
* Zero out the buffer's data area.
*/
#define clrbuf(bp) { \
bzero((bp)->b_data, (u_int)(bp)->b_bcount); \
(bp)->b_resid = 0; \
}
/*
* Flags for getblk's last parameter.
*/
#define GB_LOCK_NOWAIT 0x0001 /* Fail if we block on a buf lock. */
#define GB_NOCREAT 0x0002 /* Don't create a buf if not found. */
#define GB_NOWAIT_BD 0x0004 /* Do not wait for bufdaemon */
#ifdef _KERNEL
extern int nbuf; /* The number of buffer headers */
extern long maxswzone; /* Max KVA for swap structures */
extern long maxbcache; /* Max KVA for buffer cache */
extern long runningbufspace;
extern long hibufspace;
extern int dirtybufthresh;
extern int bdwriteskip;
extern int dirtybufferflushes;
extern int altbufferflushes;
extern int buf_maxio; /* nominal maximum I/O for buffer */
extern struct buf *buf; /* The buffer headers. */
extern char *buffers; /* The buffer contents. */
extern int bufpages; /* Number of memory pages in the buffer pool. */
extern struct buf *swbuf; /* Swap I/O buffer headers. */
extern int nswbuf; /* Number of swap I/O buffer headers. */
extern int cluster_pbuf_freecnt; /* Number of pbufs for clusters */
extern int vnode_pbuf_freecnt; /* Number of pbufs for vnode pager */
void runningbufwakeup(struct buf *);
void waitrunningbufspace(void);
caddr_t kern_vfs_bio_buffer_alloc(caddr_t v, long physmem_est);
void bufinit(void);
void bwillwrite(void);
int buf_dirty_count_severe(void);
void bremfree(struct buf *);
void bremfreef(struct buf *); /* XXX Force bremfree, only for nfs. */
int bread(struct vnode *, daddr_t, int, struct ucred *, struct buf **);
void breada(struct vnode *, daddr_t *, int *, int, struct ucred *);
int breadn(struct vnode *, daddr_t, int, daddr_t *, int *, int,
struct ucred *, struct buf **);
void bdwrite(struct buf *);
void bawrite(struct buf *);
void bdirty(struct buf *);
void bundirty(struct buf *);
void bufstrategy(struct bufobj *, struct buf *);
void brelse(struct buf *);
void bqrelse(struct buf *);
int vfs_bio_awrite(struct buf *);
struct buf * getpbuf(int *);
struct buf *incore(struct bufobj *, daddr_t);
struct buf *gbincore(struct bufobj *, daddr_t);
struct buf *getblk(struct vnode *, daddr_t, int, int, int, int);
struct buf *geteblk(int, int);
int bufwait(struct buf *);
int bufwrite(struct buf *);
void bufdone(struct buf *);
void bufdone_finish(struct buf *);
int cluster_read(struct vnode *, u_quad_t, daddr_t, long,
struct ucred *, long, int, struct buf **);
int cluster_wbuild(struct vnode *, long, daddr_t, int);
void cluster_write(struct vnode *, struct buf *, u_quad_t, int);
void vfs_bio_set_valid(struct buf *, int base, int size);
void vfs_bio_clrbuf(struct buf *);
void vfs_busy_pages(struct buf *, int clear_modify);
void vfs_unbusy_pages(struct buf *);
int vmapbuf(struct buf *);
void vunmapbuf(struct buf *);
void relpbuf(struct buf *, int *);
void brelvp(struct buf *);
void bgetvp(struct vnode *, struct buf *);
void pbgetbo(struct bufobj *bo, struct buf *bp);
void pbgetvp(struct vnode *, struct buf *);
void pbrelbo(struct buf *);
void pbrelvp(struct buf *);
int allocbuf(struct buf *bp, int size);
void reassignbuf(struct buf *);
struct buf *trypbuf(int *);
void bwait(struct buf *, u_char, const char *);
void bdone(struct buf *);
void bpin(struct buf *);
void bunpin(struct buf *);
void bunpin_wait(struct buf *);
#endif /* _KERNEL */
#endif /* !_SYS_BUF_HH_ */

45
freebsd/sys/mqueue.h Normal file
View File

@ -0,0 +1,45 @@
/*-
* Copyright (c) 2005 David Xu <davidxu@freebsd.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _SYS_MQUEUE_HH_
#define _SYS_MQUEUE_HH_
struct mq_attr {
long mq_flags; /* Message queue flags. */
long mq_maxmsg; /* Maximum number of messages. */
long mq_msgsize; /* Maximum message size. */
long mq_curmsgs; /* Number of messages currently queued. */
long __reserved[4]; /* Ignored for input, zeroed for output */
};
#ifdef _KERNEL
struct thread;
struct file;
extern void (*mq_fdclose)(struct thread *td, int fd, struct file *fp);
#endif
#endif

View File

@ -208,7 +208,9 @@ struct thread {
TAILQ_ENTRY(thread) td_slpq; /* (t) Sleep queue. */
TAILQ_ENTRY(thread) td_lockq; /* (t) Lock queue. */
struct cpuset *td_cpuset; /* (t) CPU affinity mask. */
#endif /* __rtems__ */
struct seltd *td_sel; /* Select queue/channel. */
#ifndef __rtems__
struct sleepqueue *td_sleepqueue; /* (k) Associated sleep queue. */
struct turnstile *td_turnstile; /* (k) Associated turnstile. */
struct umtx_q *td_umtxq; /* (c?) Link for when we're blocked. */
@ -478,7 +480,9 @@ struct proc {
TAILQ_HEAD(, thread) p_threads; /* (c) all threads. */
struct mtx p_slock; /* process spin lock */
struct ucred *p_ucred; /* (c) Process owner's identity. */
#endif /* __rtems__ */
struct filedesc *p_fd; /* (b) Open files. */
#ifndef __rtems__
struct filedesc_to_leader *p_fdtol; /* (b) Tracking node */
struct pstats *p_stats; /* (b) Accounting/statistics (CPU). */
struct plimit *p_limit; /* (c) Process limits. */
@ -572,6 +576,9 @@ struct proc {
struct kdtrace_proc *p_dtrace; /* (*) DTrace-specific data. */
struct cv p_pwait; /* (*) wait cv for exit/exec */
#else /* __rtems__ */
struct sigiolst p_sigiolst; /* (c) List of sigio sources. */
int p_flag; /* (c) P_* flags. */
struct proc *p_leader; /* (b) */
struct ucred *p_ucred; /* (c) Process owner's identity. */
struct mtx p_mtx; /* (n) Lock for this struct. */
rtems_id p_pid;

217
freebsd/sys/tty.h Normal file
View File

@ -0,0 +1,217 @@
/*-
* Copyright (c) 2008 Ed Schouten <ed@FreeBSD.org>
* All rights reserved.
*
* Portions of this software were developed under sponsorship from Snow
* B.V., the Netherlands.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _SYS_TTY_HH_
#define _SYS_TTY_HH_
#include <freebsd/sys/param.h>
#include <freebsd/sys/queue.h>
#include <freebsd/sys/lock.h>
#include <freebsd/sys/mutex.h>
#include <freebsd/sys/condvar.h>
#include <freebsd/sys/selinfo.h>
#ifndef __rtems__
#include <freebsd/sys/termios.h>
#endif
#include <freebsd/sys/ttycom.h>
#include <freebsd/sys/ttyqueue.h>
struct cdev;
struct file;
struct pgrp;
struct session;
struct ucred;
struct ttydevsw;
/*
* Per-TTY structure, containing buffers, etc.
*
* List of locks
* (t) locked by t_mtx
* (l) locked by tty_list_sx
* (c) const until freeing
*/
struct tty {
struct mtx *t_mtx; /* TTY lock. */
struct mtx t_mtxobj; /* Per-TTY lock (when not borrowing). */
TAILQ_ENTRY(tty) t_list; /* (l) TTY list entry. */
unsigned int t_flags; /* (t) Terminal option flags. */
/* Keep flags in sync with db_show_tty and pstat(8). */
#define TF_NOPREFIX 0x00001 /* Don't prepend "tty" to device name. */
#define TF_INITLOCK 0x00002 /* Create init/lock state devices. */
#define TF_CALLOUT 0x00004 /* Create "cua" devices. */
#define TF_OPENED_IN 0x00008 /* "tty" node is in use. */
#define TF_OPENED_OUT 0x00010 /* "cua" node is in use. */
#define TF_OPENED_CONS 0x00020 /* Device in use as console. */
#define TF_OPENED (TF_OPENED_IN|TF_OPENED_OUT|TF_OPENED_CONS)
#define TF_GONE 0x00040 /* Device node is gone. */
#define TF_OPENCLOSE 0x00080 /* Device is in open()/close(). */
#define TF_ASYNC 0x00100 /* Asynchronous I/O enabled. */
#define TF_LITERAL 0x00200 /* Accept the next character literally. */
#define TF_HIWAT_IN 0x00400 /* We've reached the input watermark. */
#define TF_HIWAT_OUT 0x00800 /* We've reached the output watermark. */
#define TF_HIWAT (TF_HIWAT_IN|TF_HIWAT_OUT)
#define TF_STOPPED 0x01000 /* Output flow control - stopped. */
#define TF_EXCLUDE 0x02000 /* Exclusive access. */
#define TF_BYPASS 0x04000 /* Optimized input path. */
#define TF_ZOMBIE 0x08000 /* Modem disconnect received. */
#define TF_HOOK 0x10000 /* TTY has hook attached. */
#define TF_BUSY_IN 0x20000 /* Process busy in read() -- not supported. */
#define TF_BUSY_OUT 0x40000 /* Process busy in write(). */
#define TF_BUSY (TF_BUSY_IN|TF_BUSY_OUT)
unsigned int t_revokecnt; /* (t) revoke() count. */
/* Buffering mechanisms. */
struct ttyinq t_inq; /* (t) Input queue. */
size_t t_inlow; /* (t) Input low watermark. */
struct ttyoutq t_outq; /* (t) Output queue. */
size_t t_outlow; /* (t) Output low watermark. */
/* Sleeping mechanisms. */
struct cv t_inwait; /* (t) Input wait queue. */
struct cv t_outwait; /* (t) Output wait queue. */
struct cv t_outserwait; /* (t) Serial output wait queue. */
struct cv t_bgwait; /* (t) Background wait queue. */
struct cv t_dcdwait; /* (t) Carrier Detect wait queue. */
/* Polling mechanisms. */
struct selinfo t_inpoll; /* (t) Input poll queue. */
struct selinfo t_outpoll; /* (t) Output poll queue. */
struct sigio *t_sigio; /* (t) Asynchronous I/O. */
struct termios t_termios; /* (t) I/O processing flags. */
struct winsize t_winsize; /* (t) Window size. */
unsigned int t_column; /* (t) Current cursor position. */
unsigned int t_writepos; /* (t) Where input was interrupted. */
int t_compatflags; /* (t) COMPAT_43TTY flags. */
/* Init/lock-state devices. */
struct termios t_termios_init_in; /* tty%s.init. */
struct termios t_termios_lock_in; /* tty%s.lock. */
struct termios t_termios_init_out; /* cua%s.init. */
struct termios t_termios_lock_out; /* cua%s.lock. */
struct ttydevsw *t_devsw; /* (c) Driver hooks. */
struct ttyhook *t_hook; /* (t) Capture/inject hook. */
/* Process signal delivery. */
struct pgrp *t_pgrp; /* (t) Foreground process group. */
struct session *t_session; /* (t) Associated session. */
unsigned int t_sessioncnt; /* (t) Backpointing sessions. */
void *t_devswsoftc; /* (c) Soft config, for drivers. */
void *t_hooksoftc; /* (t) Soft config, for hooks. */
struct cdev *t_dev; /* (c) Primary character device. */
};
/*
* Userland version of struct tty, for sysctl kern.ttys
*/
struct xtty {
size_t xt_size; /* Structure size. */
size_t xt_insize; /* Input queue size. */
size_t xt_incc; /* Canonicalized characters. */
size_t xt_inlc; /* Input line charaters. */
size_t xt_inlow; /* Input low watermark. */
size_t xt_outsize; /* Output queue size. */
size_t xt_outcc; /* Output queue usage. */
size_t xt_outlow; /* Output low watermark. */
unsigned int xt_column; /* Current column position. */
pid_t xt_pgid; /* Foreground process group. */
pid_t xt_sid; /* Session. */
unsigned int xt_flags; /* Terminal option flags. */
dev_t xt_dev; /* Userland device. */
};
#ifdef _KERNEL
/* Allocation and deallocation. */
struct tty *tty_alloc(struct ttydevsw *tsw, void *softc);
struct tty *tty_alloc_mutex(struct ttydevsw *tsw, void *softc, struct mtx *mtx);
void tty_rel_pgrp(struct tty *tp, struct pgrp *pgrp);
void tty_rel_sess(struct tty *tp, struct session *sess);
void tty_rel_gone(struct tty *tp);
#define tty_lock(tp) mtx_lock((tp)->t_mtx)
#define tty_unlock(tp) mtx_unlock((tp)->t_mtx)
#define tty_lock_assert(tp,ma) mtx_assert((tp)->t_mtx, (ma))
#define tty_getlock(tp) ((tp)->t_mtx)
/* Device node creation. */
void tty_makedev(struct tty *tp, struct ucred *cred, const char *fmt, ...)
__printflike(3, 4);
#define tty_makealias(tp,fmt,...) \
make_dev_alias((tp)->t_dev, fmt, ## __VA_ARGS__)
/* Signalling processes. */
void tty_signal_sessleader(struct tty *tp, int signal);
void tty_signal_pgrp(struct tty *tp, int signal);
/* Waking up readers/writers. */
int tty_wait(struct tty *tp, struct cv *cv);
int tty_timedwait(struct tty *tp, struct cv *cv, int timo);
void tty_wakeup(struct tty *tp, int flags);
/* System messages. */
int tty_checkoutq(struct tty *tp);
int tty_putchar(struct tty *tp, char c);
int tty_ioctl(struct tty *tp, u_long cmd, void *data, int fflag,
struct thread *td);
int tty_ioctl_compat(struct tty *tp, u_long cmd, caddr_t data,
int fflag, struct thread *td);
void tty_init_console(struct tty *tp, speed_t speed);
void tty_flush(struct tty *tp, int flags);
void tty_hiwat_in_block(struct tty *tp);
void tty_hiwat_in_unblock(struct tty *tp);
dev_t tty_udev(struct tty *tp);
#define tty_opened(tp) ((tp)->t_flags & TF_OPENED)
#define tty_gone(tp) ((tp)->t_flags & TF_GONE)
#define tty_softc(tp) ((tp)->t_devswsoftc)
#define tty_devname(tp) devtoname((tp)->t_dev)
/* Status line printing. */
void tty_info(struct tty *tp);
/* /dev/console selection. */
void ttyconsdev_select(const char *name);
/* Pseudo-terminal hooks. */
int pts_alloc_external(int fd, struct thread *td, struct file *fp,
struct cdev *dev, const char *name);
/* Drivers and line disciplines also need to call these. */
#include <freebsd/sys/ttydisc.h>
#include <freebsd/sys/ttydevsw.h>
#include <freebsd/sys/ttyhook.h>
#endif /* _KERNEL */
#endif /* !_SYS_TTY_HH_ */

169
freebsd/sys/ttydevsw.h Normal file
View File

@ -0,0 +1,169 @@
/*-
* Copyright (c) 2008 Ed Schouten <ed@FreeBSD.org>
* All rights reserved.
*
* Portions of this software were developed under sponsorship from Snow
* B.V., the Netherlands.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _SYS_TTYDEVSW_HH_
#define _SYS_TTYDEVSW_HH_
#ifndef _SYS_TTY_HH_
#error "can only be included through <sys/tty.h>"
#endif /* !_SYS_TTY_HH_ */
/*
* Driver routines that are called from the line discipline to adjust
* hardware parameters and such.
*/
typedef int tsw_open_t(struct tty *tp);
typedef void tsw_close_t(struct tty *tp);
typedef void tsw_outwakeup_t(struct tty *tp);
typedef void tsw_inwakeup_t(struct tty *tp);
typedef int tsw_ioctl_t(struct tty *tp, u_long cmd, caddr_t data,
struct thread *td);
typedef int tsw_param_t(struct tty *tp, struct termios *t);
typedef int tsw_modem_t(struct tty *tp, int sigon, int sigoff);
typedef int tsw_mmap_t(struct tty *tp, vm_offset_t offset,
vm_paddr_t * paddr, int nprot);
typedef void tsw_pktnotify_t(struct tty *tp, char event);
typedef void tsw_free_t(void *softc);
struct ttydevsw {
unsigned int tsw_flags; /* Default TTY flags. */
tsw_open_t *tsw_open; /* Device opening. */
tsw_close_t *tsw_close; /* Device closure. */
tsw_outwakeup_t *tsw_outwakeup; /* Output available. */
tsw_inwakeup_t *tsw_inwakeup; /* Input can be stored again. */
tsw_ioctl_t *tsw_ioctl; /* ioctl() hooks. */
tsw_param_t *tsw_param; /* TIOCSETA device parameter setting. */
tsw_modem_t *tsw_modem; /* Modem sigon/sigoff. */
tsw_mmap_t *tsw_mmap; /* mmap() hooks. */
tsw_pktnotify_t *tsw_pktnotify; /* TIOCPKT events. */
tsw_free_t *tsw_free; /* Destructor. */
};
static __inline int
ttydevsw_open(struct tty *tp)
{
tty_lock_assert(tp, MA_OWNED);
MPASS(!tty_gone(tp));
return tp->t_devsw->tsw_open(tp);
}
static __inline void
ttydevsw_close(struct tty *tp)
{
tty_lock_assert(tp, MA_OWNED);
MPASS(!tty_gone(tp));
tp->t_devsw->tsw_close(tp);
}
static __inline void
ttydevsw_outwakeup(struct tty *tp)
{
tty_lock_assert(tp, MA_OWNED);
MPASS(!tty_gone(tp));
/* Prevent spurious wakeups. */
if (ttydisc_getc_poll(tp) == 0)
return;
tp->t_devsw->tsw_outwakeup(tp);
}
static __inline void
ttydevsw_inwakeup(struct tty *tp)
{
tty_lock_assert(tp, MA_OWNED);
MPASS(!tty_gone(tp));
/* Prevent spurious wakeups. */
if (tp->t_flags & TF_HIWAT_IN)
return;
tp->t_devsw->tsw_inwakeup(tp);
}
static __inline int
ttydevsw_ioctl(struct tty *tp, u_long cmd, caddr_t data, struct thread *td)
{
tty_lock_assert(tp, MA_OWNED);
MPASS(!tty_gone(tp));
return tp->t_devsw->tsw_ioctl(tp, cmd, data, td);
}
static __inline int
ttydevsw_param(struct tty *tp, struct termios *t)
{
MPASS(!tty_gone(tp));
return tp->t_devsw->tsw_param(tp, t);
}
static __inline int
ttydevsw_modem(struct tty *tp, int sigon, int sigoff)
{
MPASS(!tty_gone(tp));
return tp->t_devsw->tsw_modem(tp, sigon, sigoff);
}
static __inline int
ttydevsw_mmap(struct tty *tp, vm_offset_t offset, vm_paddr_t *paddr, int nprot)
{
MPASS(!tty_gone(tp));
return tp->t_devsw->tsw_mmap(tp, offset, paddr, nprot);
}
static __inline void
ttydevsw_pktnotify(struct tty *tp, char event)
{
tty_lock_assert(tp, MA_OWNED);
MPASS(!tty_gone(tp));
tp->t_devsw->tsw_pktnotify(tp, event);
}
static __inline void
ttydevsw_free(struct tty *tp)
{
MPASS(tty_gone(tp));
tp->t_devsw->tsw_free(tty_softc(tp));
}
#endif /* !_SYS_TTYDEVSW_HH_ */

86
freebsd/sys/ttydisc.h Normal file
View File

@ -0,0 +1,86 @@
/*-
* Copyright (c) 2008 Ed Schouten <ed@FreeBSD.org>
* All rights reserved.
*
* Portions of this software were developed under sponsorship from Snow
* B.V., the Netherlands.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _SYS_TTYDISC_HH_
#define _SYS_TTYDISC_HH_
#ifndef _SYS_TTY_HH_
#error "can only be included through <sys/tty.h>"
#endif /* !_SYS_TTY_HH_ */
struct cv;
struct thread;
struct tty;
struct uio;
/* Top half routines. */
void ttydisc_open(struct tty *tp);
void ttydisc_close(struct tty *tp);
int ttydisc_read(struct tty *tp, struct uio *uio, int ioflag);
int ttydisc_write(struct tty *tp, struct uio *uio, int ioflag);
void ttydisc_optimize(struct tty *tp);
/* Bottom half routines. */
void ttydisc_modem(struct tty *tp, int open);
#define ttydisc_can_bypass(tp) ((tp)->t_flags & TF_BYPASS)
int ttydisc_rint(struct tty *tp, char c, int flags);
size_t ttydisc_rint_bypass(struct tty *tp, const void *buf, size_t len);
void ttydisc_rint_done(struct tty *tp);
size_t ttydisc_rint_poll(struct tty *tp);
size_t ttydisc_getc(struct tty *tp, void *buf, size_t len);
int ttydisc_getc_uio(struct tty *tp, struct uio *uio);
size_t ttydisc_getc_poll(struct tty *tp);
/* Error codes for ttydisc_rint(). */
#define TRE_FRAMING 0x01
#define TRE_PARITY 0x02
#define TRE_OVERRUN 0x04
#define TRE_BREAK 0x08
static __inline size_t
ttydisc_read_poll(struct tty *tp)
{
tty_lock_assert(tp, MA_OWNED);
return ttyinq_bytescanonicalized(&tp->t_inq);
}
static __inline size_t
ttydisc_write_poll(struct tty *tp)
{
tty_lock_assert(tp, MA_OWNED);
return ttyoutq_bytesleft(&tp->t_outq);
}
#endif /* !_SYS_TTYDISC_HH_ */

147
freebsd/sys/ttyhook.h Normal file
View File

@ -0,0 +1,147 @@
/*-
* Copyright (c) 2008 Ed Schouten <ed@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _SYS_TTYHOOK_HH_
#define _SYS_TTYHOOK_HH_
#ifndef _SYS_TTY_HH_
#error "can only be included through <sys/tty.h>"
#endif /* !_SYS_TTY_HH_ */
struct tty;
/*
* Hooks interface, which allows to capture and inject traffic into the
* input and output paths of a TTY.
*/
typedef int th_rint_t(struct tty *tp, char c, int flags);
typedef size_t th_rint_bypass_t(struct tty *tp, const void *buf, size_t len);
typedef void th_rint_done_t(struct tty *tp);
typedef size_t th_rint_poll_t(struct tty *tp);
typedef size_t th_getc_inject_t(struct tty *tp, void *buf, size_t len);
typedef void th_getc_capture_t(struct tty *tp, const void *buf, size_t len);
typedef size_t th_getc_poll_t(struct tty *tp);
typedef void th_close_t(struct tty *tp);
struct ttyhook {
/* Character input. */
th_rint_t *th_rint;
th_rint_bypass_t *th_rint_bypass;
th_rint_done_t *th_rint_done;
th_rint_poll_t *th_rint_poll;
/* Character output. */
th_getc_inject_t *th_getc_inject;
th_getc_capture_t *th_getc_capture;
th_getc_poll_t *th_getc_poll;
th_close_t *th_close;
};
int ttyhook_register(struct tty **, struct proc *, int,
struct ttyhook *, void *);
void ttyhook_unregister(struct tty *);
#define ttyhook_softc(tp) ((tp)->t_hooksoftc)
#define ttyhook_hashook(tp,hook) ((tp)->t_hook != NULL && \
(tp)->t_hook->th_ ## hook != NULL)
static __inline int
ttyhook_rint(struct tty *tp, char c, int flags)
{
tty_lock_assert(tp, MA_OWNED);
MPASS(!tty_gone(tp));
return tp->t_hook->th_rint(tp, c, flags);
}
static __inline size_t
ttyhook_rint_bypass(struct tty *tp, const void *buf, size_t len)
{
tty_lock_assert(tp, MA_OWNED);
MPASS(!tty_gone(tp));
return tp->t_hook->th_rint_bypass(tp, buf, len);
}
static __inline void
ttyhook_rint_done(struct tty *tp)
{
tty_lock_assert(tp, MA_OWNED);
MPASS(!tty_gone(tp));
tp->t_hook->th_rint_done(tp);
}
static __inline size_t
ttyhook_rint_poll(struct tty *tp)
{
tty_lock_assert(tp, MA_OWNED);
MPASS(!tty_gone(tp));
return tp->t_hook->th_rint_poll(tp);
}
static __inline size_t
ttyhook_getc_inject(struct tty *tp, void *buf, size_t len)
{
tty_lock_assert(tp, MA_OWNED);
MPASS(!tty_gone(tp));
return tp->t_hook->th_getc_inject(tp, buf, len);
}
static __inline void
ttyhook_getc_capture(struct tty *tp, const void *buf, size_t len)
{
tty_lock_assert(tp, MA_OWNED);
MPASS(!tty_gone(tp));
tp->t_hook->th_getc_capture(tp, buf, len);
}
static __inline size_t
ttyhook_getc_poll(struct tty *tp)
{
tty_lock_assert(tp, MA_OWNED);
MPASS(!tty_gone(tp));
return tp->t_hook->th_getc_poll(tp);
}
static __inline void
ttyhook_close(struct tty *tp)
{
tty_lock_assert(tp, MA_OWNED);
tp->t_hook->th_close(tp);
}
#endif /* !_SYS_TTYHOOK_HH_ */

178
freebsd/sys/ttyqueue.h Normal file
View File

@ -0,0 +1,178 @@
/*-
* Copyright (c) 2008 Ed Schouten <ed@FreeBSD.org>
* All rights reserved.
*
* Portions of this software were developed under sponsorship from Snow
* B.V., the Netherlands.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _SYS_TTYQUEUE_HH_
#define _SYS_TTYQUEUE_HH_
#ifndef _SYS_TTY_HH_
#error "can only be included through <sys/tty.h>"
#endif /* !_SYS_TTY_HH_ */
struct tty;
struct ttyinq_block;
struct ttyoutq_block;
struct uio;
/* Data input queue. */
struct ttyinq {
struct ttyinq_block *ti_firstblock;
struct ttyinq_block *ti_startblock;
struct ttyinq_block *ti_reprintblock;
struct ttyinq_block *ti_lastblock;
unsigned int ti_begin;
unsigned int ti_linestart;
unsigned int ti_reprint;
unsigned int ti_end;
unsigned int ti_nblocks;
unsigned int ti_quota;
};
#define TTYINQ_DATASIZE 128
/* Data output queue. */
struct ttyoutq {
struct ttyoutq_block *to_firstblock;
struct ttyoutq_block *to_lastblock;
unsigned int to_begin;
unsigned int to_end;
unsigned int to_nblocks;
unsigned int to_quota;
};
#define TTYOUTQ_DATASIZE (256 - sizeof(struct ttyoutq_block *))
#ifdef _KERNEL
/* Input queue handling routines. */
void ttyinq_setsize(struct ttyinq *ti, struct tty *tp, size_t len);
void ttyinq_free(struct ttyinq *ti);
int ttyinq_read_uio(struct ttyinq *ti, struct tty *tp, struct uio *uio,
size_t readlen, size_t flushlen);
size_t ttyinq_write(struct ttyinq *ti, const void *buf, size_t len,
int quote);
int ttyinq_write_nofrag(struct ttyinq *ti, const void *buf, size_t len,
int quote);
void ttyinq_canonicalize(struct ttyinq *ti);
size_t ttyinq_findchar(struct ttyinq *ti, const char *breakc, size_t maxlen,
char *lastc);
void ttyinq_flush(struct ttyinq *ti);
int ttyinq_peekchar(struct ttyinq *ti, char *c, int *quote);
void ttyinq_unputchar(struct ttyinq *ti);
void ttyinq_reprintpos_set(struct ttyinq *ti);
void ttyinq_reprintpos_reset(struct ttyinq *ti);
static __inline size_t
ttyinq_getsize(struct ttyinq *ti)
{
return (ti->ti_nblocks * TTYINQ_DATASIZE);
}
static __inline size_t
ttyinq_getallocatedsize(struct ttyinq *ti)
{
return (ti->ti_quota * TTYINQ_DATASIZE);
}
static __inline size_t
ttyinq_bytesleft(struct ttyinq *ti)
{
size_t len;
/* Make sure the usage never exceeds the length. */
len = ti->ti_nblocks * TTYINQ_DATASIZE;
MPASS(len >= ti->ti_end);
return (len - ti->ti_end);
}
static __inline size_t
ttyinq_bytescanonicalized(struct ttyinq *ti)
{
MPASS(ti->ti_begin <= ti->ti_linestart);
return (ti->ti_linestart - ti->ti_begin);
}
static __inline size_t
ttyinq_bytesline(struct ttyinq *ti)
{
MPASS(ti->ti_linestart <= ti->ti_end);
return (ti->ti_end - ti->ti_linestart);
}
/* Input buffer iteration. */
typedef void ttyinq_line_iterator_t(void *data, char c, int flags);
void ttyinq_line_iterate_from_linestart(struct ttyinq *ti,
ttyinq_line_iterator_t *iterator, void *data);
void ttyinq_line_iterate_from_reprintpos(struct ttyinq *ti,
ttyinq_line_iterator_t *iterator, void *data);
/* Output queue handling routines. */
void ttyoutq_flush(struct ttyoutq *to);
void ttyoutq_setsize(struct ttyoutq *to, struct tty *tp, size_t len);
void ttyoutq_free(struct ttyoutq *to);
size_t ttyoutq_read(struct ttyoutq *to, void *buf, size_t len);
int ttyoutq_read_uio(struct ttyoutq *to, struct tty *tp, struct uio *uio);
size_t ttyoutq_write(struct ttyoutq *to, const void *buf, size_t len);
int ttyoutq_write_nofrag(struct ttyoutq *to, const void *buf, size_t len);
static __inline size_t
ttyoutq_getsize(struct ttyoutq *to)
{
return (to->to_nblocks * TTYOUTQ_DATASIZE);
}
static __inline size_t
ttyoutq_getallocatedsize(struct ttyoutq *to)
{
return (to->to_quota * TTYOUTQ_DATASIZE);
}
static __inline size_t
ttyoutq_bytesleft(struct ttyoutq *to)
{
size_t len;
/* Make sure the usage never exceeds the length. */
len = to->to_nblocks * TTYOUTQ_DATASIZE;
MPASS(len >= to->to_end);
return (len - to->to_end);
}
static __inline size_t
ttyoutq_bytesused(struct ttyoutq *to)
{
return (to->to_end - to->to_begin);
}
#endif /* _KERNEL */
#endif /* !_SYS_TTYQUEUE_HH_ */

414
freebsd/sys/user.h Normal file
View File

@ -0,0 +1,414 @@
/*-
* Copyright (c) 1982, 1986, 1989, 1991, 1993
* The Regents of the University of California.
* Copyright (c) 2007 Robert N. M. Watson
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)user.h 8.2 (Berkeley) 9/23/93
* $FreeBSD$
*/
#ifndef _SYS_USER_HH_
#define _SYS_USER_HH_
#include <freebsd/machine/pcb.h>
#ifndef _KERNEL
/* stuff that *used* to be included by user.h, or is now needed */
#include <freebsd/sys/errno.h>
#include <freebsd/sys/time.h>
#include <freebsd/sys/resource.h>
#include <freebsd/sys/ucred.h>
#include <freebsd/sys/uio.h>
#include <freebsd/sys/queue.h>
#include <freebsd/sys/_lock.h>
#include <freebsd/sys/_mutex.h>
#include <freebsd/sys/proc.h>
#include <freebsd/vm/vm.h> /* XXX */
#include <freebsd/vm/vm_param.h> /* XXX */
#include <freebsd/vm/pmap.h> /* XXX */
#include <freebsd/vm/vm_map.h> /* XXX */
#endif /* !_KERNEL */
#ifndef _SYS_RESOURCEVAR_HH_
#include <freebsd/sys/resourcevar.h>
#endif
#ifndef _SYS_SIGNALVAR_HH_
#include <freebsd/sys/signalvar.h>
#endif
#ifndef _SYS_SOCKET_VAR_HH_
#include <freebsd/sys/socket.h>
#endif
/*
* KERN_PROC subtype ops return arrays of selected proc structure entries:
*
* This struct includes several arrays of spare space, with different arrays
* for different standard C-types. When adding new variables to this struct,
* the space for byte-aligned data should be taken from the ki_sparestring,
* pointers from ki_spareptrs, word-aligned data from ki_spareints, and
* doubleword-aligned data from ki_sparelongs. Make sure the space for new
* variables come from the array which matches the size and alignment of
* those variables on ALL hardware platforms, and then adjust the appropriate
* KI_NSPARE_* value(s) to match.
*
* Always verify that sizeof(struct kinfo_proc) == KINFO_PROC_SIZE on all
* platforms after you have added new variables. Note that if you change
* the value of KINFO_PROC_SIZE, then many userland programs will stop
* working until they are recompiled!
*
* Once you have added the new field, you will need to add code to initialize
* it in two places: function fill_kinfo_proc in sys/kern/kern_proc.c and
* function kvm_proclist in lib/libkvm/kvm_proc.c .
*/
#define KI_NSPARE_INT 9
#define KI_NSPARE_LONG 12
#define KI_NSPARE_PTR 6
#ifndef _KERNEL
#ifndef KINFO_PROC_SIZE
#error "Unknown architecture"
#endif
#endif /* !_KERNEL */
#define WMESGLEN 8 /* size of returned wchan message */
#define LOCKNAMELEN 8 /* size of returned lock name */
#define OCOMMLEN 16 /* size of returned thread name */
#define COMMLEN 19 /* size of returned ki_comm name */
#define KI_EMULNAMELEN 16 /* size of returned ki_emul */
#define KI_NGROUPS 16 /* number of groups in ki_groups */
#define LOGNAMELEN 17 /* size of returned ki_login */
/*
* Steal a bit from ki_cr_flags (cr_flags is never used) to indicate
* that the cred had more than KI_NGROUPS groups.
*/
#define KI_CRF_GRP_OVERFLOW 0x80000000
struct kinfo_proc {
int ki_structsize; /* size of this structure */
int ki_layout; /* reserved: layout identifier */
struct pargs *ki_args; /* address of command arguments */
struct proc *ki_paddr; /* address of proc */
struct user *ki_addr; /* kernel virtual addr of u-area */
struct vnode *ki_tracep; /* pointer to trace file */
struct vnode *ki_textvp; /* pointer to executable file */
struct filedesc *ki_fd; /* pointer to open file info */
struct vmspace *ki_vmspace; /* pointer to kernel vmspace struct */
void *ki_wchan; /* sleep address */
pid_t ki_pid; /* Process identifier */
pid_t ki_ppid; /* parent process id */
pid_t ki_pgid; /* process group id */
pid_t ki_tpgid; /* tty process group id */
pid_t ki_sid; /* Process session ID */
pid_t ki_tsid; /* Terminal session ID */
short ki_jobc; /* job control counter */
short ki_spare_short1; /* unused (just here for alignment) */
dev_t ki_tdev; /* controlling tty dev */
sigset_t ki_siglist; /* Signals arrived but not delivered */
sigset_t ki_sigmask; /* Current signal mask */
sigset_t ki_sigignore; /* Signals being ignored */
sigset_t ki_sigcatch; /* Signals being caught by user */
uid_t ki_uid; /* effective user id */
uid_t ki_ruid; /* Real user id */
uid_t ki_svuid; /* Saved effective user id */
gid_t ki_rgid; /* Real group id */
gid_t ki_svgid; /* Saved effective group id */
short ki_ngroups; /* number of groups */
short ki_spare_short2; /* unused (just here for alignment) */
gid_t ki_groups[KI_NGROUPS]; /* groups */
vm_size_t ki_size; /* virtual size */
segsz_t ki_rssize; /* current resident set size in pages */
segsz_t ki_swrss; /* resident set size before last swap */
segsz_t ki_tsize; /* text size (pages) XXX */
segsz_t ki_dsize; /* data size (pages) XXX */
segsz_t ki_ssize; /* stack size (pages) */
u_short ki_xstat; /* Exit status for wait & stop signal */
u_short ki_acflag; /* Accounting flags */
fixpt_t ki_pctcpu; /* %cpu for process during ki_swtime */
u_int ki_estcpu; /* Time averaged value of ki_cpticks */
u_int ki_slptime; /* Time since last blocked */
u_int ki_swtime; /* Time swapped in or out */
int ki_spareint1; /* unused (just here for alignment) */
u_int64_t ki_runtime; /* Real time in microsec */
struct timeval ki_start; /* starting time */
struct timeval ki_childtime; /* time used by process children */
long ki_flag; /* P_* flags */
long ki_kiflag; /* KI_* flags (below) */
int ki_traceflag; /* Kernel trace points */
char ki_stat; /* S* process status */
signed char ki_nice; /* Process "nice" value */
char ki_lock; /* Process lock (prevent swap) count */
char ki_rqindex; /* Run queue index */
u_char ki_oncpu; /* Which cpu we are on */
u_char ki_lastcpu; /* Last cpu we were on */
char ki_ocomm[OCOMMLEN+1]; /* thread name */
char ki_wmesg[WMESGLEN+1]; /* wchan message */
char ki_login[LOGNAMELEN+1]; /* setlogin name */
char ki_lockname[LOCKNAMELEN+1]; /* lock name */
char ki_comm[COMMLEN+1]; /* command name */
char ki_emul[KI_EMULNAMELEN+1]; /* emulation name */
/*
* When adding new variables, take space for char-strings from the
* front of ki_sparestrings, and ints from the end of ki_spareints.
* That way the spare room from both arrays will remain contiguous.
*/
char ki_sparestrings[68]; /* spare string space */
int ki_spareints[KI_NSPARE_INT]; /* spare room for growth */
u_int ki_cr_flags; /* Credential flags */
int ki_jid; /* Process jail ID */
int ki_numthreads; /* XXXKSE number of threads in total */
lwpid_t ki_tid; /* XXXKSE thread id */
struct priority ki_pri; /* process priority */
struct rusage ki_rusage; /* process rusage statistics */
/* XXX - most fields in ki_rusage_ch are not (yet) filled in */
struct rusage ki_rusage_ch; /* rusage of children processes */
struct pcb *ki_pcb; /* kernel virtual addr of pcb */
void *ki_kstack; /* kernel virtual addr of stack */
void *ki_udata; /* User convenience pointer */
struct thread *ki_tdaddr; /* address of thread */
/*
* When adding new variables, take space for pointers from the
* front of ki_spareptrs, and longs from the end of ki_sparelongs.
* That way the spare room from both arrays will remain contiguous.
*/
void *ki_spareptrs[KI_NSPARE_PTR]; /* spare room for growth */
long ki_sparelongs[KI_NSPARE_LONG]; /* spare room for growth */
long ki_sflag; /* PS_* flags */
long ki_tdflags; /* XXXKSE kthread flag */
};
void fill_kinfo_proc(struct proc *, struct kinfo_proc *);
/* XXX - the following two defines are temporary */
#define ki_childstime ki_rusage_ch.ru_stime
#define ki_childutime ki_rusage_ch.ru_utime
/*
* Legacy PS_ flag. This moved to p_flag but is maintained for
* compatibility.
*/
#define PS_INMEM 0x00001 /* Loaded into memory. */
/* ki_sessflag values */
#define KI_CTTY 0x00000001 /* controlling tty vnode active */
#define KI_SLEADER 0x00000002 /* session leader */
#define KI_LOCKBLOCK 0x00000004 /* proc blocked on lock ki_lockname */
/*
* This used to be the per-process structure containing data that
* isn't needed in core when the process is swapped out, but now it
* remains only for the benefit of a.out core dumps.
*/
struct user {
struct pstats u_stats; /* *p_stats */
struct kinfo_proc u_kproc; /* eproc */
};
/*
* The KERN_PROC_FILE sysctl allows a process to dump the file descriptor
* array of another process.
*/
#define KF_TYPE_NONE 0
#define KF_TYPE_VNODE 1
#define KF_TYPE_SOCKET 2
#define KF_TYPE_PIPE 3
#define KF_TYPE_FIFO 4
#define KF_TYPE_KQUEUE 5
#define KF_TYPE_CRYPTO 6
#define KF_TYPE_MQUEUE 7
#define KF_TYPE_SHM 8
#define KF_TYPE_SEM 9
#define KF_TYPE_PTS 10
#define KF_TYPE_UNKNOWN 255
#define KF_VTYPE_VNON 0
#define KF_VTYPE_VREG 1
#define KF_VTYPE_VDIR 2
#define KF_VTYPE_VBLK 3
#define KF_VTYPE_VCHR 4
#define KF_VTYPE_VLNK 5
#define KF_VTYPE_VSOCK 6
#define KF_VTYPE_VFIFO 7
#define KF_VTYPE_VBAD 8
#define KF_VTYPE_UNKNOWN 255
#define KF_FD_TYPE_CWD -1 /* Current working directory */
#define KF_FD_TYPE_ROOT -2 /* Root directory */
#define KF_FD_TYPE_JAIL -3 /* Jail directory */
#define KF_FLAG_READ 0x00000001
#define KF_FLAG_WRITE 0x00000002
#define KF_FLAG_APPEND 0x00000004
#define KF_FLAG_ASYNC 0x00000008
#define KF_FLAG_FSYNC 0x00000010
#define KF_FLAG_NONBLOCK 0x00000020
#define KF_FLAG_DIRECT 0x00000040
#define KF_FLAG_HASLOCK 0x00000080
/*
* Old format. Has variable hidden padding due to alignment.
* This is a compatability hack for pre-build 7.1 packages.
*/
#if defined(__amd64__)
#define KINFO_OFILE_SIZE 1328
#endif
#if defined(__i386__)
#define KINFO_OFILE_SIZE 1324
#endif
struct kinfo_ofile {
int kf_structsize; /* Size of kinfo_file. */
int kf_type; /* Descriptor type. */
int kf_fd; /* Array index. */
int kf_ref_count; /* Reference count. */
int kf_flags; /* Flags. */
/* XXX Hidden alignment padding here on amd64 */
off_t kf_offset; /* Seek location. */
int kf_vnode_type; /* Vnode type. */
int kf_sock_domain; /* Socket domain. */
int kf_sock_type; /* Socket type. */
int kf_sock_protocol; /* Socket protocol. */
char kf_path[PATH_MAX]; /* Path to file, if any. */
struct sockaddr_storage kf_sa_local; /* Socket address. */
struct sockaddr_storage kf_sa_peer; /* Peer address. */
};
#if defined(__amd64__) || defined(__i386__)
#define KINFO_FILE_SIZE 1392
#endif
struct kinfo_file {
int kf_structsize; /* Variable size of record. */
int kf_type; /* Descriptor type. */
int kf_fd; /* Array index. */
int kf_ref_count; /* Reference count. */
int kf_flags; /* Flags. */
int _kf_pad0; /* Round to 64 bit alignment */
int64_t kf_offset; /* Seek location. */
int kf_vnode_type; /* Vnode type. */
int kf_sock_domain; /* Socket domain. */
int kf_sock_type; /* Socket type. */
int kf_sock_protocol; /* Socket protocol. */
struct sockaddr_storage kf_sa_local; /* Socket address. */
struct sockaddr_storage kf_sa_peer; /* Peer address. */
int _kf_ispare[16]; /* Space for more stuff. */
/* Truncated before copyout in sysctl */
char kf_path[PATH_MAX]; /* Path to file, if any. */
};
/*
* The KERN_PROC_VMMAP sysctl allows a process to dump the VM layout of
* another process as a series of entries.
*/
#define KVME_TYPE_NONE 0
#define KVME_TYPE_DEFAULT 1
#define KVME_TYPE_VNODE 2
#define KVME_TYPE_SWAP 3
#define KVME_TYPE_DEVICE 4
#define KVME_TYPE_PHYS 5
#define KVME_TYPE_DEAD 6
#define KVME_TYPE_SG 7
#define KVME_TYPE_UNKNOWN 255
#define KVME_PROT_READ 0x00000001
#define KVME_PROT_WRITE 0x00000002
#define KVME_PROT_EXEC 0x00000004
#define KVME_FLAG_COW 0x00000001
#define KVME_FLAG_NEEDS_COPY 0x00000002
#define KVME_FLAG_NOCOREDUMP 0x00000004
#if defined(__amd64__)
#define KINFO_OVMENTRY_SIZE 1168
#endif
#if defined(__i386__)
#define KINFO_OVMENTRY_SIZE 1128
#endif
struct kinfo_ovmentry {
int kve_structsize; /* Size of kinfo_vmmapentry. */
int kve_type; /* Type of map entry. */
void *kve_start; /* Starting address. */
void *kve_end; /* Finishing address. */
int kve_flags; /* Flags on map entry. */
int kve_resident; /* Number of resident pages. */
int kve_private_resident; /* Number of private pages. */
int kve_protection; /* Protection bitmask. */
int kve_ref_count; /* VM obj ref count. */
int kve_shadow_count; /* VM obj shadow count. */
char kve_path[PATH_MAX]; /* Path to VM obj, if any. */
void *_kve_pspare[8]; /* Space for more stuff. */
off_t kve_offset; /* Mapping offset in object */
uint64_t kve_fileid; /* inode number if vnode */
dev_t kve_fsid; /* dev_t of vnode location */
int _kve_ispare[3]; /* Space for more stuff. */
};
#if defined(__amd64__) || defined(__i386__)
#define KINFO_VMENTRY_SIZE 1160
#endif
struct kinfo_vmentry {
int kve_structsize; /* Variable size of record. */
int kve_type; /* Type of map entry. */
uint64_t kve_start; /* Starting address. */
uint64_t kve_end; /* Finishing address. */
uint64_t kve_offset; /* Mapping offset in object */
uint64_t kve_fileid; /* inode number if vnode */
uint32_t kve_fsid; /* dev_t of vnode location */
int kve_flags; /* Flags on map entry. */
int kve_resident; /* Number of resident pages. */
int kve_private_resident; /* Number of private pages. */
int kve_protection; /* Protection bitmask. */
int kve_ref_count; /* VM obj ref count. */
int kve_shadow_count; /* VM obj shadow count. */
int _kve_pad0; /* 64bit align next field */
int _kve_ispare[16]; /* Space for more stuff. */
/* Truncated before copyout in sysctl */
char kve_path[PATH_MAX]; /* Path to VM obj, if any. */
};
/*
* The KERN_PROC_KSTACK sysctl allows a process to dump the kernel stacks of
* another process as a series of entries. Each stack is represented by a
* series of symbol names and offsets as generated by stack_sbuf_print(9).
*/
#define KKST_MAXLEN 1024
#define KKST_STATE_STACKOK 0 /* Stack is valid. */
#define KKST_STATE_SWAPPED 1 /* Stack swapped out. */
#define KKST_STATE_RUNNING 2 /* Stack ephemeral. */
#if defined(__amd64__) || defined(__i386__)
#define KINFO_KSTACK_SIZE 1096
#endif
struct kinfo_kstack {
lwpid_t kkst_tid; /* ID of thread. */
int kkst_state; /* Validity of stack. */
char kkst_trace[KKST_MAXLEN]; /* String representing stack. */
int _kkst_ispare[16]; /* Space for more stuff. */
};
#endif

View File

@ -183,3 +183,20 @@ cv_broadcastpri(struct cv *cv, int pri)
rv = pthread_cond_broadcast(&cv->cv_id);
BSD_ASSERT_RV(rv);
}
int
_cv_wait_sig(struct cv *cvp, struct lock_object *lock)
{
/* XXX */
_cv_wait_support(cvp, lock, 0, true);
}
int
_cv_timedwait_sig(struct cv *cvp, struct lock_object *lock, int timo)
{
/* XXX */
if (timo <= 0) {
timo = 1;
}
return _cv_wait_support(cvp, lock, timo, true);
}

View File

@ -49,6 +49,7 @@
#include <freebsd/sys/mutex.h>
#include <freebsd/sys/jail.h>
#include <freebsd/sys/resourcevar.h>
#include <freebsd/sys/filedesc.h>
RTEMS_CHAIN_DEFINE_EMPTY(rtems_bsd_thread_chain);
@ -56,6 +57,9 @@ RTEMS_CHAIN_DEFINE_EMPTY(rtems_bsd_thread_chain);
static struct ucred FIXME_ucred = {
.cr_ref = 1 /* reference count */
};
static struct filedesc FIXME_fd = {
.fd_ofiles = NULL /* file structures for open files */
};
static struct proc FIXME_proc = {
.p_ucred = NULL /* (c) Process owner's identity. */
};
@ -67,29 +71,34 @@ static struct prison FIXME_prison = {
static struct uidinfo FIXME_uidinfo; /* per euid resource consumption */
static struct uidinfo FIXME_ruidinfo; /* per ruid resource consumption */
static struct thread *rtems_bsd_current_td = NULL;
static void rtems_bsd_thread_descriptor_dtor(void *td)
{
// XXX are there other pieces to clean up?
free(td, M_TEMP);
}
static struct thread *
rtems_bsd_thread_init_note( rtems_id id )
rtems_bsd_thread_init( rtems_id id )
{
rtems_status_code sc = RTEMS_SUCCESSFUL;
unsigned index = 0;
char name [5] = "_???";
struct thread *td = malloc(sizeof(struct thread), M_TEMP, M_WAITOK | M_ZERO);
struct proc *proc;
struct thread *td;
struct proc *proc;
if ( td == NULL )
return td;
sc = rtems_task_set_note( id, RTEMS_NOTEPAD_0, ( uint32_t )td );
if (sc != RTEMS_SUCCESSFUL) {
free(td, M_TEMP);
td = malloc(sizeof(struct thread), M_TEMP, M_WAITOK | M_ZERO);
if (td == NULL)
return NULL;
}
// Initialize the thread descriptor
index = rtems_object_id_get_index(id);
snprintf(name + 1, sizeof(name) - 1, "%03u", index);
sc = rtems_object_set_name(id, name);
if (sc != RTEMS_SUCCESSFUL) {
rtems_task_delete(id);
// XXX does the thread get deleted? Seems wrong
// rtems_task_delete(id);
free(td, M_TEMP);
return NULL;
}
@ -98,55 +107,62 @@ rtems_bsd_thread_init_note( rtems_id id )
td->td_ucred = crhold(&FIXME_ucred);
td->td_proc = &FIXME_proc;
if (td->td_proc->p_ucred != NULL)
return td;
if (td->td_proc->p_ucred == NULL) {
if ( prison_init ) {
mtx_init(&FIXME_prison.pr_mtx, "prison lock", NULL, MTX_DEF | MTX_DUPOK);
prison_init = 0;
}
FIXME_ucred.cr_prison = &FIXME_prison; /* jail(2) */
FIXME_ucred.cr_uidinfo = uifind(0);
FIXME_ucred.cr_ruidinfo = uifind(0);
FIXME_ucred.cr_ngroups = 1; /* group 0 */
if (prison_init ) {
mtx_init(&FIXME_prison.pr_mtx, "prison lock", NULL, MTX_DEF | MTX_DUPOK);
td->td_proc->p_ucred = crhold(&FIXME_ucred);
mtx_init(&td->td_proc->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
td->td_proc->p_pid = getpid();
td->td_proc->p_fibnum = 0;
td->td_proc->p_fd = &FIXME_fd;
sx_init_flags(&FIXME_fd.fd_sx, "config SX thread lock", SX_DUPOK);
}
prison_init = 0;
}
// Actually set the global pointer
rtems_bsd_current_td = td;
FIXME_ucred.cr_prison = &FIXME_prison; /* jail(2) */
FIXME_ucred.cr_uidinfo = uifind(0);
FIXME_ucred.cr_ruidinfo = uifind(0);
FIXME_ucred.cr_ngroups = 1; /* group 0 */
// Now add the task descriptor as a per-task variable
sc = rtems_task_variable_add(
id,
&rtems_bsd_current_td,
rtems_bsd_thread_descriptor_dtor
);
if (sc != RTEMS_SUCCESSFUL) {
free(td, M_TEMP);
return NULL;
}
td->td_proc->p_ucred = crhold(&FIXME_ucred);
mtx_init(&td->td_proc->p_mtx, "process lock", NULL, MTX_DEF | MTX_DUPOK);
td->td_proc->p_pid = getpid();
td->td_proc->p_fibnum = 0;
return td;
return td;
}
/*
* XXX Threads which delete themselves will leak this
* XXX Maybe better integrated into the TCB OR a task variable.
* XXX but this is OK for now
* Threads which delete themselves would leak the task
* descriptor so we are using the per-task variable so
* it can be cleaned up.
*/
struct thread *rtems_get_curthread(void)
{
struct thread *td;
rtems_status_code sc;
rtems_id id;
/*
* If we already have a struct thread associated with this thread,
* obtain it
* obtain it. Otherwise, allocate and initialize one.
*/
id = rtems_task_self();
sc = rtems_task_get_note( id, RTEMS_NOTEPAD_0, (uint32_t *) &td );
if (sc != RTEMS_SUCCESSFUL) {
panic("rtems_get_curthread: get note Error\n");
td = rtems_bsd_current_td;
if ( td == NULL ) {
td = rtems_bsd_thread_init( rtems_task_self() );
if ( td == NULL ){
panic("rtems_get_curthread: Unable to thread descriptor\n");
}
}
td = rtems_bsd_thread_init_note( id);
if ( td == NULL ){
panic("rtems_get_curthread: Unable to generate thread note\n");
}
return td;
}
@ -163,6 +179,8 @@ rtems_bsd_thread_start(struct thread **td_ptr, void (*func)(void *), void *arg,
BSD_ASSERT(pages >= 0);
memset( td, 0, sizeof(struct thread) );
sc = rtems_task_create(
rtems_build_name('_', 'T', 'S', 'K'),
BSD_TASK_PRIORITY_NORMAL,
@ -177,8 +195,8 @@ rtems_bsd_thread_start(struct thread **td_ptr, void (*func)(void *), void *arg,
return ENOMEM;
}
td = rtems_bsd_thread_init_note( id );
if (!td)
td = rtems_bsd_thread_init( id );
if (!td)
return ENOMEM;
sc = rtems_task_start(id, (rtems_task_entry) func, (rtems_task_argument) arg);