SLEEP(9): Port to RTEMS

This commit is contained in:
Sebastian Huber 2015-03-24 10:09:43 +01:00
parent f661c79732
commit 53b03a1a57
8 changed files with 39 additions and 275 deletions

View File

@ -78,7 +78,6 @@ LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-get-task-priority.c
LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-get-task-stack-size.c
LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-init.c
LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-jail.c
LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-kern_synch.c
LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-log.c
LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-malloc.c
LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-mbuf.c
@ -175,6 +174,7 @@ LIB_C_FILES += freebsd/sys/kern/kern_mib.c
LIB_C_FILES += freebsd/sys/kern/kern_module.c
LIB_C_FILES += freebsd/sys/kern/kern_mtxpool.c
LIB_C_FILES += freebsd/sys/kern/kern_osd.c
LIB_C_FILES += freebsd/sys/kern/kern_synch.c
LIB_C_FILES += freebsd/sys/kern/kern_sysctl.c
LIB_C_FILES += freebsd/sys/kern/kern_time.c
LIB_C_FILES += freebsd/sys/kern/kern_timeout.c

View File

@ -678,7 +678,6 @@ rtems.addRTEMSSourceFiles(
'rtems/rtems-bsd-get-task-stack-size.c',
'rtems/rtems-bsd-init.c',
'rtems/rtems-bsd-jail.c',
'rtems/rtems-bsd-kern_synch.c',
'rtems/rtems-bsd-log.c',
'rtems/rtems-bsd-malloc.c',
'rtems/rtems-bsd-mbuf.c',
@ -884,6 +883,7 @@ base.addKernelSpaceSourceFiles(
'sys/kern/kern_module.c',
'sys/kern/kern_mtxpool.c',
'sys/kern/kern_osd.c',
'sys/kern/kern_synch.c',
'sys/kern/kern_sysctl.c',
'sys/kern/kern_time.c',
'sys/kern/kern_timeout.c',

View File

@ -469,11 +469,13 @@ proc0_init(void *dummy __unused)
* Add scheduler specific parts to proc, thread as needed.
*/
schedinit(); /* scheduler gets its house in order */
#endif /* __rtems__ */
/*
* Initialize sleep queue hash table
*/
sleepinit();
#ifndef __rtems__
/*
* additional VM structures
*/

View File

@ -82,13 +82,16 @@ __FBSDID("$FreeBSD$");
((td)->td_inhibitors & TDI_LOCK) != 0 ? "blocked" : \
((td)->td_inhibitors & TDI_IWAIT) != 0 ? "iwait" : "yielding")
#ifndef __rtems__
static void synch_setup(void *dummy);
SYSINIT(synch_setup, SI_SUB_KICK_SCHEDULER, SI_ORDER_FIRST, synch_setup,
NULL);
int hogticks;
#endif /* __rtems__ */
static int pause_wchan;
#ifndef __rtems__
static struct callout loadav_callout;
struct loadavg averunnable =
@ -122,12 +125,15 @@ SDT_PROBE_DEFINE(sched, , , cpucaps__wakeup);
SDT_PROBE_DEFINE(sched, , , schedctl__nopreempt);
SDT_PROBE_DEFINE(sched, , , schedctl__preempt);
SDT_PROBE_DEFINE(sched, , , schedctl__yield);
#endif /* __rtems__ */
void
sleepinit(void)
{
#ifndef __rtems__
hogticks = (hz / 10) * 2; /* Default only. */
#endif /* __rtems__ */
init_sleepqueues();
}
@ -151,13 +157,21 @@ _sleep(void *ident, struct lock_object *lock, int priority,
const char *wmesg, int timo)
{
struct thread *td;
#ifndef __rtems__
struct proc *p;
#endif /* __rtems__ */
struct lock_class *class;
#ifndef __rtems__
int catch, flags, lock_state, pri, rval;
#else /* __rtems__ */
int flags, lock_state, pri, rval;
#endif /* __rtems__ */
WITNESS_SAVE_DECL(lock_witness);
td = curthread;
#ifndef __rtems__
p = td->td_proc;
#endif /* __rtems__ */
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))
ktrcsw(1, 0, wmesg);
@ -168,14 +182,17 @@ _sleep(void *ident, struct lock_object *lock, int priority,
("sleeping without a lock"));
KASSERT(p != NULL, ("msleep1"));
KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
#ifndef __rtems__
if (priority & PDROP)
KASSERT(lock != NULL && lock != &Giant.lock_object,
("PDROP requires a non-Giant lock"));
#endif /* __rtems__ */
if (lock != NULL)
class = LOCK_CLASS(lock);
else
class = NULL;
#ifndef __rtems__
if (cold || SCHEDULER_STOPPED()) {
/*
* During autoconfiguration, just return;
@ -191,6 +208,9 @@ _sleep(void *ident, struct lock_object *lock, int priority,
}
catch = priority & PCATCH;
pri = priority & PRIMASK;
#else /* __rtems__ */
pri = priority;
#endif /* __rtems__ */
/*
* If we are already on a sleep queue, then remove us from that
@ -204,10 +224,12 @@ _sleep(void *ident, struct lock_object *lock, int priority,
flags = SLEEPQ_PAUSE;
else
flags = SLEEPQ_SLEEP;
#ifndef __rtems__
if (catch)
flags |= SLEEPQ_INTERRUPTIBLE;
if (priority & PBDRY)
flags |= SLEEPQ_STOP_ON_BDRY;
#endif /* __rtems__ */
sleepq_lock(ident);
CTR5(KTR_PROC, "sleep: thread %ld (pid %ld, %s) on %s (%p)",
@ -242,12 +264,18 @@ _sleep(void *ident, struct lock_object *lock, int priority,
lock_state = class->lc_unlock(lock);
sleepq_lock(ident);
}
#ifndef __rtems__
if (timo && catch)
rval = sleepq_timedwait_sig(ident, pri);
else if (timo)
#else /* __rtems__ */
if (timo)
#endif /* __rtems__ */
rval = sleepq_timedwait(ident, pri);
#ifndef __rtems__
else if (catch)
rval = sleepq_wait_sig(ident, pri);
#endif /* __rtems__ */
else {
sleepq_wait(ident, pri);
rval = 0;
@ -264,6 +292,7 @@ _sleep(void *ident, struct lock_object *lock, int priority,
return (rval);
}
#ifndef __rtems__
int
msleep_spin(void *ident, struct mtx *mtx, const char *wmesg, int timo)
{
@ -341,6 +370,7 @@ msleep_spin(void *ident, struct mtx *mtx, const char *wmesg, int timo)
WITNESS_RESTORE(&mtx->lock_object, mtx);
return (rval);
}
#endif /* __rtems__ */
/*
* pause() delays the calling thread by the given number of system ticks.
@ -409,6 +439,7 @@ wakeup_one(void *ident)
kick_proc0();
}
#ifndef __rtems__
static void
kdb_switch(void)
{
@ -623,3 +654,4 @@ sys_yield(struct thread *td, struct yield_args *uap)
td->td_retval[0] = 0;
return (0);
}
#endif /* __rtems__ */

View File

@ -234,7 +234,9 @@ struct thread {
int td_pflags; /* (k) Private thread (TDP_*) flags. */
int td_dupfd; /* (k) Ret value from fdopen. XXX */
int td_sqqueue; /* (t) Sleepqueue queue blocked on. */
#endif /* __rtems__ */
void *td_wchan; /* (t) Sleep address. */
#ifndef __rtems__
const char *td_wmesg; /* (t) Reason for sleep. */
u_char td_lastcpu; /* (t) Last cpu we were on. */
u_char td_oncpu; /* (t) Which cpu we are on. */

View File

@ -0,0 +1 @@
/* EMPTY */

View File

@ -1,273 +0,0 @@
/**
* @file
*
* @ingroup rtems_bsd_rtems
*
* @brief TODO.
*/
/*
* Copyright (c) 1982, 1986, 1990, 1991, 1993
* The Regents of the University of California. All rights reserved.
* (c) UNIX System Laboratories, Inc.
* All or some portions of this file are derived from material licensed
* to the University of California by American Telephone and Telegraph
* Co. or Unix System Laboratories, Inc. and are reproduced herein with
* the permission of UNIX System Laboratories, Inc.
*
* Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
* All rights reserved.
*
* Copyright (c) 2009-2013 embedded brains GmbH. All rights reserved.
*
* embedded brains GmbH
* Dornierstr. 4
* 82178 Puchheim
* Germany
* <rtems@embedded-brains.de>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the author nor the names of any co-contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* @(#)kern_synch.c 8.9 (Berkeley) 5/19/95
*/
#include <machine/rtems-bsd-kernel-space.h>
#include <machine/rtems-bsd-thread.h>
#include <rtems/score/statesimpl.h>
#include <rtems/score/threaddispatch.h>
#include <rtems/score/thread.h>
#include <rtems/score/threadqimpl.h>
#include <rtems/bsd/sys/param.h>
#include <rtems/bsd/sys/types.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <rtems/bsd/sys/lock.h>
#include <sys/mutex.h>
static int pause_wchan;
/*
* Constants for the hash table of sleep queue chains. These constants are
* the same ones that 4BSD (and possibly earlier versions of BSD) used.
* Basically, we ignore the lower 8 bits of the address since most wait
* channel pointers are aligned and only look at the next 7 bits for the
* hash. SC_TABLESIZE must be a power of two for SC_MASK to work properly.
*/
#define SC_TABLESIZE 128 /* Must be power of 2. */
#define SC_MASK (SC_TABLESIZE - 1)
#define SC_SHIFT 8
#define SC_HASH(wc) (((uintptr_t)(wc) >> SC_SHIFT) & SC_MASK)
#define SC_LOOKUP(wc) &sleepq_chains[SC_HASH(wc)]
struct sleepqueue_chain {
LIST_HEAD(, sleepqueue) sc_queues; /* List of sleep queues. */
};
static struct sleepqueue_chain sleepq_chains[SC_TABLESIZE];
static void
init_sleepqueues(void)
{
size_t i;
for (i = 0; i < SC_TABLESIZE; i++) {
LIST_INIT(&sleepq_chains[i].sc_queues);
}
}
SYSINIT(rtems_bsd_sleep, SI_SUB_INTRINSIC, SI_ORDER_FIRST, init_sleepqueues, NULL);
/*
* Look up the sleep queue associated with a given wait channel in the hash
* table locking the associated sleep queue chain. If no queue is found in
* the table, NULL is returned.
*/
static struct sleepqueue *
sleepq_lookup(void *wchan)
{
struct sleepqueue_chain *sc;
struct sleepqueue *sq;
KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__));
sc = SC_LOOKUP(wchan);
LIST_FOREACH(sq, &sc->sc_queues, sq_hash)
if (sq->sq_wchan == wchan)
return (sq);
return (NULL);
}
int
_sleep(void *wchan, struct lock_object *lock, int priority, const char *wmesg, int timo)
{
Thread_Control *executing;
struct thread *td;
struct lock_class *class;
int lock_state;
int rval;
struct sleepqueue *sq;
struct sleepqueue_chain *sc;
#ifdef KTRACE
if (KTRPOINT(td, KTR_CSW))
ktrcsw(1, 0);
#endif
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, lock,
"Sleeping on \"%s\"", wmesg);
KASSERT(timo != 0 || mtx_owned(&Giant) || lock != NULL,
("sleeping without a lock"));
KASSERT(p != NULL, ("msleep1"));
KASSERT(wchan != NULL && TD_IS_RUNNING(td), ("msleep"));
if (priority & PDROP)
KASSERT(lock != NULL && lock != &Giant.lock_object,
("PDROP requires a non-Giant lock"));
if (lock != NULL)
class = LOCK_CLASS(lock);
else
class = NULL;
if (lock == &Giant.lock_object)
mtx_assert(&Giant, MA_OWNED);
DROP_GIANT();
td = curthread;
_Thread_Disable_dispatch();
if (lock != NULL) {
lock_state = class->lc_unlock(lock);
}
sc = SC_LOOKUP(wchan);
/* Look up the sleep queue associated with the wait channel 'wchan'. */
sq = sleepq_lookup(wchan);
/*
* If the wait channel does not already have a sleep queue, use
* this thread's sleep queue. Otherwise, insert the current thread
* into the sleep queue already in use by this wait channel.
*/
if (sq == NULL) {
sq = td->td_sleepqueue;
LIST_INSERT_HEAD(&sc->sc_queues, sq, sq_hash);
sq->sq_wchan = wchan;
} else {
LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash);
}
td->td_sleepqueue = NULL;
_Thread_queue_Enter_critical_section(&sq->sq_blocked);
executing = _Thread_Executing;
executing->Wait.queue = &sq->sq_blocked;
_Thread_queue_Enqueue(&sq->sq_blocked, executing, (Watchdog_Interval) timo);
_Thread_Enable_dispatch();
rval = (int) executing->Wait.return_code;
_Thread_Disable_dispatch();
/*
* Get a sleep queue for this thread. If this is the last waiter,
* use the queue itself and take it out of the chain, otherwise,
* remove a queue from the free list.
*/
if (LIST_EMPTY(&sq->sq_free)) {
td->td_sleepqueue = sq;
} else
td->td_sleepqueue = LIST_FIRST(&sq->sq_free);
LIST_REMOVE(td->td_sleepqueue, sq_hash);
_Thread_Enable_dispatch();
PICKUP_GIANT();
if (lock != NULL && lock != &Giant.lock_object && !(priority & PDROP)) {
class->lc_lock(lock, lock_state);
WITNESS_RESTORE(lock, lock_witness);
}
return (rval);
}
/*
* pause() is like tsleep() except that the intention is to not be
* explicitly woken up by another thread. Instead, the current thread
* simply wishes to sleep until the timeout expires. It is
* implemented using a dummy wait channel.
*/
int
pause(const char *wmesg, int timo)
{
KASSERT(timo != 0, ("pause: timeout required"));
return (tsleep(&pause_wchan, 0, wmesg, timo));
}
static void
rtems_bsd_sleepq_wakeup(struct sleepqueue *sq, Thread_Control *thread)
{
thread->Wait.return_code = 0;
}
void
wakeup(void *wchan)
{
struct sleepqueue *sq;
_Thread_Disable_dispatch();
sq = sleepq_lookup(wchan);
if (sq != NULL) {
Thread_Control *thread;
while ((thread = _Thread_queue_Dequeue(&sq->sq_blocked)) != NULL) {
rtems_bsd_sleepq_wakeup(sq, thread);
}
}
_Thread_Enable_dispatch();
}
void
wakeup_one(void *wchan)
{
struct sleepqueue *sq;
_Thread_Disable_dispatch();
sq = sleepq_lookup(wchan);
if (sq != NULL) {
Thread_Control *thread;
thread = _Thread_queue_Dequeue(&sq->sq_blocked);
if (thread != NULL) {
rtems_bsd_sleepq_wakeup(sq, thread);
}
}
_Thread_Enable_dispatch();
}