mirror of
https://git.rtems.org/rtems-libbsd/
synced 2025-05-14 01:29:19 +08:00
SLEEPQUEUE(9): Port to RTEMS
This commit is contained in:
parent
9975839a12
commit
8475e7aa0a
1
Makefile
1
Makefile
@ -188,6 +188,7 @@ LIB_C_FILES += freebsd/sys/kern/subr_lock.c
|
|||||||
LIB_C_FILES += freebsd/sys/kern/subr_module.c
|
LIB_C_FILES += freebsd/sys/kern/subr_module.c
|
||||||
LIB_C_FILES += freebsd/sys/kern/subr_rman.c
|
LIB_C_FILES += freebsd/sys/kern/subr_rman.c
|
||||||
LIB_C_FILES += freebsd/sys/kern/subr_sbuf.c
|
LIB_C_FILES += freebsd/sys/kern/subr_sbuf.c
|
||||||
|
LIB_C_FILES += freebsd/sys/kern/subr_sleepqueue.c
|
||||||
LIB_C_FILES += freebsd/sys/kern/subr_taskqueue.c
|
LIB_C_FILES += freebsd/sys/kern/subr_taskqueue.c
|
||||||
LIB_C_FILES += freebsd/sys/kern/subr_uio.c
|
LIB_C_FILES += freebsd/sys/kern/subr_uio.c
|
||||||
LIB_C_FILES += freebsd/sys/kern/subr_unit.c
|
LIB_C_FILES += freebsd/sys/kern/subr_unit.c
|
||||||
|
@ -834,6 +834,7 @@ base.addKernelSpaceHeaderFiles(
|
|||||||
'sys/sys/sigio.h',
|
'sys/sys/sigio.h',
|
||||||
'sys/sys/_sigset.h',
|
'sys/sys/_sigset.h',
|
||||||
'sys/sys/smp.h',
|
'sys/sys/smp.h',
|
||||||
|
'sys/sys/sleepqueue.h',
|
||||||
'sys/sys/_sockaddr_storage.h',
|
'sys/sys/_sockaddr_storage.h',
|
||||||
'sys/sys/sockbuf.h',
|
'sys/sys/sockbuf.h',
|
||||||
'sys/sys/socket.h',
|
'sys/sys/socket.h',
|
||||||
@ -897,6 +898,7 @@ base.addKernelSpaceSourceFiles(
|
|||||||
'sys/kern/subr_module.c',
|
'sys/kern/subr_module.c',
|
||||||
'sys/kern/subr_rman.c',
|
'sys/kern/subr_rman.c',
|
||||||
'sys/kern/subr_sbuf.c',
|
'sys/kern/subr_sbuf.c',
|
||||||
|
'sys/kern/subr_sleepqueue.c',
|
||||||
'sys/kern/subr_taskqueue.c',
|
'sys/kern/subr_taskqueue.c',
|
||||||
'sys/kern/subr_uio.c',
|
'sys/kern/subr_uio.c',
|
||||||
'sys/kern/subr_unit.c',
|
'sys/kern/subr_unit.c',
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
/*-
|
/*-
|
||||||
* Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
|
* Copyright (c) 2004 John Baldwin <jhb@FreeBSD.org>
|
||||||
|
* Copyright (c) 2015 embedded brains GmbH <rtems@embedded-brains.de>
|
||||||
* All rights reserved.
|
* All rights reserved.
|
||||||
*
|
*
|
||||||
* Redistribution and use in source and binary forms, with or without
|
* Redistribution and use in source and binary forms, with or without
|
||||||
@ -85,6 +86,10 @@ __FBSDID("$FreeBSD$");
|
|||||||
#ifdef DDB
|
#ifdef DDB
|
||||||
#include <ddb/ddb.h>
|
#include <ddb/ddb.h>
|
||||||
#endif
|
#endif
|
||||||
|
#ifdef __rtems__
|
||||||
|
#include <machine/rtems-bsd-thread.h>
|
||||||
|
#include <rtems/score/threadimpl.h>
|
||||||
|
#endif /* __rtems__ */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Constants for the hash table of sleep queue chains. These constants are
|
* Constants for the hash table of sleep queue chains. These constants are
|
||||||
@ -155,9 +160,11 @@ static uma_zone_t sleepq_zone;
|
|||||||
/*
|
/*
|
||||||
* Prototypes for non-exported routines.
|
* Prototypes for non-exported routines.
|
||||||
*/
|
*/
|
||||||
|
#ifndef __rtems__
|
||||||
static int sleepq_catch_signals(void *wchan, int pri);
|
static int sleepq_catch_signals(void *wchan, int pri);
|
||||||
static int sleepq_check_signals(void);
|
static int sleepq_check_signals(void);
|
||||||
static int sleepq_check_timeout(void);
|
static int sleepq_check_timeout(void);
|
||||||
|
#endif /* __rtems__ */
|
||||||
#ifdef INVARIANTS
|
#ifdef INVARIANTS
|
||||||
static void sleepq_dtor(void *mem, int size, void *arg);
|
static void sleepq_dtor(void *mem, int size, void *arg);
|
||||||
#endif
|
#endif
|
||||||
@ -165,7 +172,11 @@ static int sleepq_init(void *mem, int size, int flags);
|
|||||||
static int sleepq_resume_thread(struct sleepqueue *sq, struct thread *td,
|
static int sleepq_resume_thread(struct sleepqueue *sq, struct thread *td,
|
||||||
int pri);
|
int pri);
|
||||||
static void sleepq_switch(void *wchan, int pri);
|
static void sleepq_switch(void *wchan, int pri);
|
||||||
|
#ifndef __rtems__
|
||||||
static void sleepq_timeout(void *arg);
|
static void sleepq_timeout(void *arg);
|
||||||
|
#else /* __rtems__ */
|
||||||
|
static void sleepq_timeout(Objects_Id id, void *arg);
|
||||||
|
#endif /* __rtems__ */
|
||||||
|
|
||||||
SDT_PROBE_DECLARE(sched, , , sleep);
|
SDT_PROBE_DECLARE(sched, , , sleep);
|
||||||
SDT_PROBE_DECLARE(sched, , , wakeup);
|
SDT_PROBE_DECLARE(sched, , , wakeup);
|
||||||
@ -206,7 +217,9 @@ init_sleepqueues(void)
|
|||||||
NULL, NULL, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
|
NULL, NULL, sleepq_init, NULL, UMA_ALIGN_CACHE, 0);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifndef __rtems__
|
||||||
thread0.td_sleepqueue = sleepq_alloc();
|
thread0.td_sleepqueue = sleepq_alloc();
|
||||||
|
#endif /* __rtems__ */
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -286,6 +299,11 @@ sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags,
|
|||||||
struct sleepqueue_chain *sc;
|
struct sleepqueue_chain *sc;
|
||||||
struct sleepqueue *sq;
|
struct sleepqueue *sq;
|
||||||
struct thread *td;
|
struct thread *td;
|
||||||
|
#ifdef __rtems__
|
||||||
|
ISR_lock_Context lock_context;
|
||||||
|
Thread_Control *executing;
|
||||||
|
struct thread *succ;
|
||||||
|
#endif /* __rtems__ */
|
||||||
|
|
||||||
td = curthread;
|
td = curthread;
|
||||||
sc = SC_LOOKUP(wchan);
|
sc = SC_LOOKUP(wchan);
|
||||||
@ -341,17 +359,40 @@ sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags,
|
|||||||
LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash);
|
LIST_INSERT_HEAD(&sq->sq_free, td->td_sleepqueue, sq_hash);
|
||||||
}
|
}
|
||||||
thread_lock(td);
|
thread_lock(td);
|
||||||
|
#ifndef __rtems__
|
||||||
TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq);
|
TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq);
|
||||||
|
#else /* __rtems__ */
|
||||||
|
/* FIXME: This is broken with clustered scheduling */
|
||||||
|
succ = NULL;
|
||||||
|
TAILQ_FOREACH(succ, &sq->sq_blocked[queue], td_slpq) {
|
||||||
|
if (td->td_thread->current_priority <
|
||||||
|
succ->td_thread->current_priority)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (succ == NULL)
|
||||||
|
TAILQ_INSERT_TAIL(&sq->sq_blocked[queue], td, td_slpq);
|
||||||
|
else
|
||||||
|
TAILQ_INSERT_BEFORE(succ, td, td_slpq);
|
||||||
|
#endif /* __rtems__ */
|
||||||
sq->sq_blockedcnt[queue]++;
|
sq->sq_blockedcnt[queue]++;
|
||||||
|
#ifdef __rtems__
|
||||||
|
executing = td->td_thread;
|
||||||
|
_Objects_ISR_disable_and_acquire(&executing->Object, &lock_context);
|
||||||
|
td->td_sq_state = TD_SQ_TIRED;
|
||||||
|
#endif /* __rtems__ */
|
||||||
td->td_sleepqueue = NULL;
|
td->td_sleepqueue = NULL;
|
||||||
td->td_sqqueue = queue;
|
td->td_sqqueue = queue;
|
||||||
td->td_wchan = wchan;
|
td->td_wchan = wchan;
|
||||||
td->td_wmesg = wmesg;
|
td->td_wmesg = wmesg;
|
||||||
|
#ifndef __rtems__
|
||||||
if (flags & SLEEPQ_INTERRUPTIBLE) {
|
if (flags & SLEEPQ_INTERRUPTIBLE) {
|
||||||
td->td_flags |= TDF_SINTR;
|
td->td_flags |= TDF_SINTR;
|
||||||
td->td_flags &= ~TDF_SLEEPABORT;
|
td->td_flags &= ~TDF_SLEEPABORT;
|
||||||
}
|
}
|
||||||
thread_unlock(td);
|
thread_unlock(td);
|
||||||
|
#else /* __rtems__ */
|
||||||
|
_Objects_Release_and_ISR_enable(&executing->Object, &lock_context);
|
||||||
|
#endif /* __rtems__ */
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -361,6 +402,7 @@ sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags,
|
|||||||
void
|
void
|
||||||
sleepq_set_timeout(void *wchan, int timo)
|
sleepq_set_timeout(void *wchan, int timo)
|
||||||
{
|
{
|
||||||
|
#ifndef __rtems__
|
||||||
struct sleepqueue_chain *sc;
|
struct sleepqueue_chain *sc;
|
||||||
struct thread *td;
|
struct thread *td;
|
||||||
|
|
||||||
@ -371,6 +413,17 @@ sleepq_set_timeout(void *wchan, int timo)
|
|||||||
MPASS(td->td_sleepqueue == NULL);
|
MPASS(td->td_sleepqueue == NULL);
|
||||||
MPASS(wchan != NULL);
|
MPASS(wchan != NULL);
|
||||||
callout_reset_curcpu(&td->td_slpcallout, timo, sleepq_timeout, td);
|
callout_reset_curcpu(&td->td_slpcallout, timo, sleepq_timeout, td);
|
||||||
|
#else /* __rtems__ */
|
||||||
|
Thread_Control *executing;
|
||||||
|
|
||||||
|
_Thread_Disable_dispatch();
|
||||||
|
executing = _Thread_Executing;
|
||||||
|
BSD_ASSERT(executing->Timer.state == WATCHDOG_INACTIVE);
|
||||||
|
_Watchdog_Initialize(&executing->Timer, sleepq_timeout,
|
||||||
|
0, executing);
|
||||||
|
_Watchdog_Insert_ticks(&executing->Timer, (Watchdog_Interval)timo);
|
||||||
|
_Thread_Enable_dispatch();
|
||||||
|
#endif /* __rtems__ */
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -389,6 +442,7 @@ sleepq_sleepcnt(void *wchan, int queue)
|
|||||||
return (sq->sq_blockedcnt[queue]);
|
return (sq->sq_blockedcnt[queue]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef __rtems__
|
||||||
/*
|
/*
|
||||||
* Marks the pending sleep of the current thread as interruptible and
|
* Marks the pending sleep of the current thread as interruptible and
|
||||||
* makes an initial check for pending signals before putting a thread
|
* makes an initial check for pending signals before putting a thread
|
||||||
@ -483,6 +537,7 @@ out:
|
|||||||
MPASS(td->td_lock != &sc->sc_lock);
|
MPASS(td->td_lock != &sc->sc_lock);
|
||||||
return (ret);
|
return (ret);
|
||||||
}
|
}
|
||||||
|
#endif /* __rtems__ */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Switches to another thread if we are still asleep on a sleep queue.
|
* Switches to another thread if we are still asleep on a sleep queue.
|
||||||
@ -491,6 +546,7 @@ out:
|
|||||||
static void
|
static void
|
||||||
sleepq_switch(void *wchan, int pri)
|
sleepq_switch(void *wchan, int pri)
|
||||||
{
|
{
|
||||||
|
#ifndef __rtems__
|
||||||
struct sleepqueue_chain *sc;
|
struct sleepqueue_chain *sc;
|
||||||
struct sleepqueue *sq;
|
struct sleepqueue *sq;
|
||||||
struct thread *td;
|
struct thread *td;
|
||||||
@ -542,6 +598,106 @@ sleepq_switch(void *wchan, int pri)
|
|||||||
KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
|
KASSERT(TD_IS_RUNNING(td), ("running but not TDS_RUNNING"));
|
||||||
CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)",
|
CTR3(KTR_PROC, "sleepq resume: thread %p (pid %ld, %s)",
|
||||||
(void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
|
(void *)td, (long)td->td_proc->p_pid, (void *)td->td_name);
|
||||||
|
#else /* __rtems__ */
|
||||||
|
Thread_Control *executing;
|
||||||
|
ISR_lock_Context lock_context;
|
||||||
|
struct thread *td;
|
||||||
|
bool block;
|
||||||
|
bool remove;
|
||||||
|
|
||||||
|
sleepq_release(wchan);
|
||||||
|
|
||||||
|
executing = _Thread_Acquire_executing(&lock_context);
|
||||||
|
td = rtems_bsd_get_thread(executing);
|
||||||
|
BSD_ASSERT(td != NULL);
|
||||||
|
|
||||||
|
block = false;
|
||||||
|
remove = false;
|
||||||
|
switch (td->td_sq_state) {
|
||||||
|
case TD_SQ_TIRED:
|
||||||
|
BSD_ASSERT(td->td_wchan == wchan);
|
||||||
|
td->td_sq_state = TD_SQ_SLEEPY;
|
||||||
|
block = true;
|
||||||
|
break;
|
||||||
|
case TD_SQ_NIGHTMARE:
|
||||||
|
BSD_ASSERT(td->td_wchan == wchan);
|
||||||
|
td->td_sq_state = TD_SQ_PANIC;
|
||||||
|
remove = true;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
BSD_ASSERT(td->td_wchan == NULL);
|
||||||
|
BSD_ASSERT(td->td_sq_state == TD_SQ_WAKEUP);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (block) {
|
||||||
|
Per_CPU_Control *cpu_self;
|
||||||
|
bool unblock;
|
||||||
|
|
||||||
|
cpu_self = _Objects_Release_and_thread_dispatch_disable(
|
||||||
|
&executing->Object, &lock_context);
|
||||||
|
|
||||||
|
_Giant_Acquire(cpu_self);
|
||||||
|
_Thread_Set_state(executing, STATES_WAITING_FOR_BSD_WAKEUP);
|
||||||
|
_Giant_Release(cpu_self);
|
||||||
|
|
||||||
|
_Objects_ISR_disable_and_acquire(&executing->Object,
|
||||||
|
&lock_context);
|
||||||
|
|
||||||
|
unblock = false;
|
||||||
|
switch (td->td_sq_state) {
|
||||||
|
case TD_SQ_NIGHTMARE:
|
||||||
|
BSD_ASSERT(td->td_wchan == wchan);
|
||||||
|
td->td_sq_state = TD_SQ_PANIC;
|
||||||
|
unblock = true;
|
||||||
|
remove = true;
|
||||||
|
break;
|
||||||
|
case TD_SQ_WAKEUP:
|
||||||
|
BSD_ASSERT(td->td_wchan == NULL);
|
||||||
|
unblock = true;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
BSD_ASSERT(td->td_wchan == wchan);
|
||||||
|
BSD_ASSERT(td->td_sq_state == TD_SQ_SLEEPY);
|
||||||
|
td->td_sq_state = TD_SQ_SLEEPING;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
_Objects_Release_and_ISR_enable(&executing->Object,
|
||||||
|
&lock_context);
|
||||||
|
|
||||||
|
if (unblock) {
|
||||||
|
_Giant_Acquire(cpu_self);
|
||||||
|
_Watchdog_Remove(&executing->Timer);
|
||||||
|
_Thread_Clear_state(executing, STATES_WAITING_FOR_BSD_WAKEUP);
|
||||||
|
_Giant_Release(cpu_self);
|
||||||
|
}
|
||||||
|
|
||||||
|
_Thread_Dispatch_enable(cpu_self);
|
||||||
|
|
||||||
|
_Objects_ISR_disable_and_acquire(&executing->Object,
|
||||||
|
&lock_context);
|
||||||
|
|
||||||
|
switch (td->td_sq_state) {
|
||||||
|
case TD_SQ_NIGHTMARE:
|
||||||
|
BSD_ASSERT(td->td_wchan == wchan);
|
||||||
|
td->td_sq_state = TD_SQ_PANIC;
|
||||||
|
remove = true;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
BSD_ASSERT(td->td_sq_state == TD_SQ_WAKEUP ||
|
||||||
|
td->td_sq_state == TD_SQ_PANIC);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_Objects_Release_and_ISR_enable(&executing->Object,
|
||||||
|
&lock_context);
|
||||||
|
|
||||||
|
if (remove) {
|
||||||
|
sleepq_remove(td, wchan);
|
||||||
|
}
|
||||||
|
#endif /* __rtems__ */
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -553,6 +709,7 @@ sleepq_check_timeout(void)
|
|||||||
struct thread *td;
|
struct thread *td;
|
||||||
|
|
||||||
td = curthread;
|
td = curthread;
|
||||||
|
#ifndef __rtems__
|
||||||
THREAD_LOCK_ASSERT(td, MA_OWNED);
|
THREAD_LOCK_ASSERT(td, MA_OWNED);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -581,8 +738,12 @@ sleepq_check_timeout(void)
|
|||||||
mi_switch(SW_INVOL | SWT_SLEEPQTIMO, NULL);
|
mi_switch(SW_INVOL | SWT_SLEEPQTIMO, NULL);
|
||||||
}
|
}
|
||||||
return (0);
|
return (0);
|
||||||
|
#else /* __rtems__ */
|
||||||
|
return (td->td_sq_state);
|
||||||
|
#endif /* __rtems__ */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef __rtems__
|
||||||
/*
|
/*
|
||||||
* Check to see if we were awoken by a signal.
|
* Check to see if we were awoken by a signal.
|
||||||
*/
|
*/
|
||||||
@ -605,6 +766,7 @@ sleepq_check_signals(void)
|
|||||||
|
|
||||||
return (0);
|
return (0);
|
||||||
}
|
}
|
||||||
|
#endif /* __rtems__ */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Block the current thread until it is awakened from its sleep queue.
|
* Block the current thread until it is awakened from its sleep queue.
|
||||||
@ -612,15 +774,20 @@ sleepq_check_signals(void)
|
|||||||
void
|
void
|
||||||
sleepq_wait(void *wchan, int pri)
|
sleepq_wait(void *wchan, int pri)
|
||||||
{
|
{
|
||||||
|
#ifndef __rtems__
|
||||||
struct thread *td;
|
struct thread *td;
|
||||||
|
|
||||||
td = curthread;
|
td = curthread;
|
||||||
MPASS(!(td->td_flags & TDF_SINTR));
|
MPASS(!(td->td_flags & TDF_SINTR));
|
||||||
thread_lock(td);
|
thread_lock(td);
|
||||||
|
#endif /* __rtems__ */
|
||||||
sleepq_switch(wchan, pri);
|
sleepq_switch(wchan, pri);
|
||||||
|
#ifndef __rtems__
|
||||||
thread_unlock(td);
|
thread_unlock(td);
|
||||||
|
#endif /* __rtems__ */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef __rtems__
|
||||||
/*
|
/*
|
||||||
* Block the current thread until it is awakened from its sleep queue
|
* Block the current thread until it is awakened from its sleep queue
|
||||||
* or it is interrupted by a signal.
|
* or it is interrupted by a signal.
|
||||||
@ -638,6 +805,7 @@ sleepq_wait_sig(void *wchan, int pri)
|
|||||||
return (rcatch);
|
return (rcatch);
|
||||||
return (rval);
|
return (rval);
|
||||||
}
|
}
|
||||||
|
#endif /* __rtems__ */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Block the current thread until it is awakened from its sleep queue
|
* Block the current thread until it is awakened from its sleep queue
|
||||||
@ -646,19 +814,26 @@ sleepq_wait_sig(void *wchan, int pri)
|
|||||||
int
|
int
|
||||||
sleepq_timedwait(void *wchan, int pri)
|
sleepq_timedwait(void *wchan, int pri)
|
||||||
{
|
{
|
||||||
|
#ifndef __rtems__
|
||||||
struct thread *td;
|
struct thread *td;
|
||||||
|
#endif /* __rtems__ */
|
||||||
int rval;
|
int rval;
|
||||||
|
|
||||||
|
#ifndef __rtems__
|
||||||
td = curthread;
|
td = curthread;
|
||||||
MPASS(!(td->td_flags & TDF_SINTR));
|
MPASS(!(td->td_flags & TDF_SINTR));
|
||||||
thread_lock(td);
|
thread_lock(td);
|
||||||
|
#endif /* __rtems__ */
|
||||||
sleepq_switch(wchan, pri);
|
sleepq_switch(wchan, pri);
|
||||||
rval = sleepq_check_timeout();
|
rval = sleepq_check_timeout();
|
||||||
|
#ifndef __rtems__
|
||||||
thread_unlock(td);
|
thread_unlock(td);
|
||||||
|
#endif /* __rtems__ */
|
||||||
|
|
||||||
return (rval);
|
return (rval);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef __rtems__
|
||||||
/*
|
/*
|
||||||
* Block the current thread until it is awakened from its sleep queue,
|
* Block the current thread until it is awakened from its sleep queue,
|
||||||
* it is interrupted by a signal, or it times out waiting to be awakened.
|
* it is interrupted by a signal, or it times out waiting to be awakened.
|
||||||
@ -678,6 +853,7 @@ sleepq_timedwait_sig(void *wchan, int pri)
|
|||||||
return (rvals);
|
return (rvals);
|
||||||
return (rvalt);
|
return (rvalt);
|
||||||
}
|
}
|
||||||
|
#endif /* __rtems__ */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Returns the type of sleepqueue given a waitchannel.
|
* Returns the type of sleepqueue given a waitchannel.
|
||||||
@ -709,6 +885,13 @@ static int
|
|||||||
sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
|
sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
|
||||||
{
|
{
|
||||||
struct sleepqueue_chain *sc;
|
struct sleepqueue_chain *sc;
|
||||||
|
#ifdef __rtems__
|
||||||
|
Thread_Control *thread;
|
||||||
|
ISR_lock_Context lock_context;
|
||||||
|
bool unblock;
|
||||||
|
|
||||||
|
BSD_ASSERT(sq != NULL);
|
||||||
|
#endif /* __rtems__ */
|
||||||
|
|
||||||
MPASS(td != NULL);
|
MPASS(td != NULL);
|
||||||
MPASS(sq->sq_wchan != NULL);
|
MPASS(sq->sq_wchan != NULL);
|
||||||
@ -740,9 +923,15 @@ sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
|
|||||||
} else
|
} else
|
||||||
td->td_sleepqueue = LIST_FIRST(&sq->sq_free);
|
td->td_sleepqueue = LIST_FIRST(&sq->sq_free);
|
||||||
LIST_REMOVE(td->td_sleepqueue, sq_hash);
|
LIST_REMOVE(td->td_sleepqueue, sq_hash);
|
||||||
|
#ifdef __rtems__
|
||||||
|
(void)sc;
|
||||||
|
thread = td->td_thread;
|
||||||
|
_Objects_ISR_disable_and_acquire(&thread->Object, &lock_context);
|
||||||
|
#endif /* __rtems__ */
|
||||||
|
|
||||||
td->td_wmesg = NULL;
|
td->td_wmesg = NULL;
|
||||||
td->td_wchan = NULL;
|
td->td_wchan = NULL;
|
||||||
|
#ifndef __rtems__
|
||||||
td->td_flags &= ~TDF_SINTR;
|
td->td_flags &= ~TDF_SINTR;
|
||||||
|
|
||||||
CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)",
|
CTR3(KTR_PROC, "sleepq_wakeup: thread %p (pid %ld, %s)",
|
||||||
@ -764,6 +953,39 @@ sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri)
|
|||||||
TD_CLR_SLEEPING(td);
|
TD_CLR_SLEEPING(td);
|
||||||
return (setrunnable(td));
|
return (setrunnable(td));
|
||||||
}
|
}
|
||||||
|
#else /* __rtems__ */
|
||||||
|
unblock = _Watchdog_Is_active(&thread->Timer);
|
||||||
|
switch (td->td_sq_state) {
|
||||||
|
case TD_SQ_SLEEPING:
|
||||||
|
unblock = true;
|
||||||
|
/* FALLTHROUGH */
|
||||||
|
case TD_SQ_TIRED:
|
||||||
|
case TD_SQ_SLEEPY:
|
||||||
|
case TD_SQ_NIGHTMARE:
|
||||||
|
td->td_sq_state = TD_SQ_WAKEUP;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
BSD_ASSERT(td->td_sq_state == TD_SQ_PANIC);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (unblock) {
|
||||||
|
Per_CPU_Control *cpu_self;
|
||||||
|
|
||||||
|
cpu_self = _Objects_Release_and_thread_dispatch_disable(
|
||||||
|
&thread->Object, &lock_context);
|
||||||
|
_Giant_Acquire(cpu_self);
|
||||||
|
|
||||||
|
_Watchdog_Remove(&thread->Timer);
|
||||||
|
_Thread_Clear_state(thread, STATES_WAITING_FOR_BSD_WAKEUP);
|
||||||
|
|
||||||
|
_Giant_Release(cpu_self);
|
||||||
|
_Thread_Dispatch_enable(cpu_self);
|
||||||
|
} else {
|
||||||
|
_Objects_Release_and_ISR_enable(&thread->Object,
|
||||||
|
&lock_context);
|
||||||
|
}
|
||||||
|
#endif /* __rtems__ */
|
||||||
return (0);
|
return (0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -811,7 +1033,11 @@ int
|
|||||||
sleepq_signal(void *wchan, int flags, int pri, int queue)
|
sleepq_signal(void *wchan, int flags, int pri, int queue)
|
||||||
{
|
{
|
||||||
struct sleepqueue *sq;
|
struct sleepqueue *sq;
|
||||||
|
#ifndef __rtems__
|
||||||
struct thread *td, *besttd;
|
struct thread *td, *besttd;
|
||||||
|
#else /* __rtems__ */
|
||||||
|
struct thread *besttd;
|
||||||
|
#endif /* __rtems__ */
|
||||||
int wakeup_swapper;
|
int wakeup_swapper;
|
||||||
|
|
||||||
CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags);
|
CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags);
|
||||||
@ -823,6 +1049,7 @@ sleepq_signal(void *wchan, int flags, int pri, int queue)
|
|||||||
KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
|
KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE),
|
||||||
("%s: mismatch between sleep/wakeup and cv_*", __func__));
|
("%s: mismatch between sleep/wakeup and cv_*", __func__));
|
||||||
|
|
||||||
|
#ifndef __rtems__
|
||||||
/*
|
/*
|
||||||
* Find the highest priority thread on the queue. If there is a
|
* Find the highest priority thread on the queue. If there is a
|
||||||
* tie, use the thread that first appears in the queue as it has
|
* tie, use the thread that first appears in the queue as it has
|
||||||
@ -834,6 +1061,9 @@ sleepq_signal(void *wchan, int flags, int pri, int queue)
|
|||||||
if (besttd == NULL || td->td_priority < besttd->td_priority)
|
if (besttd == NULL || td->td_priority < besttd->td_priority)
|
||||||
besttd = td;
|
besttd = td;
|
||||||
}
|
}
|
||||||
|
#else /* __rtems__ */
|
||||||
|
besttd = TAILQ_FIRST(&sq->sq_blocked[queue]);
|
||||||
|
#endif /* __rtems__ */
|
||||||
MPASS(besttd != NULL);
|
MPASS(besttd != NULL);
|
||||||
thread_lock(besttd);
|
thread_lock(besttd);
|
||||||
wakeup_swapper = sleepq_resume_thread(sq, besttd, pri);
|
wakeup_swapper = sleepq_resume_thread(sq, besttd, pri);
|
||||||
@ -871,6 +1101,7 @@ sleepq_broadcast(void *wchan, int flags, int pri, int queue)
|
|||||||
return (wakeup_swapper);
|
return (wakeup_swapper);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef __rtems__
|
||||||
/*
|
/*
|
||||||
* Time sleeping threads out. When the timeout expires, the thread is
|
* Time sleeping threads out. When the timeout expires, the thread is
|
||||||
* removed from the sleep queue and made runnable if it is still asleep.
|
* removed from the sleep queue and made runnable if it is still asleep.
|
||||||
@ -940,6 +1171,52 @@ sleepq_timeout(void *arg)
|
|||||||
if (wakeup_swapper)
|
if (wakeup_swapper)
|
||||||
kick_proc0();
|
kick_proc0();
|
||||||
}
|
}
|
||||||
|
#else /* __rtems__ */
|
||||||
|
static void
|
||||||
|
sleepq_timeout(Objects_Id id, void *arg)
|
||||||
|
{
|
||||||
|
Thread_Control *thread;
|
||||||
|
struct thread *td;
|
||||||
|
ISR_lock_Context lock_context;
|
||||||
|
bool unblock;
|
||||||
|
|
||||||
|
thread = arg;
|
||||||
|
td = rtems_bsd_get_thread(thread);
|
||||||
|
BSD_ASSERT(td != NULL);
|
||||||
|
|
||||||
|
_Objects_ISR_disable_and_acquire(&thread->Object, &lock_context);
|
||||||
|
|
||||||
|
unblock = false;
|
||||||
|
switch (td->td_sq_state) {
|
||||||
|
case TD_SQ_SLEEPING:
|
||||||
|
unblock = true;
|
||||||
|
/* Fall through */
|
||||||
|
case TD_SQ_TIRED:
|
||||||
|
case TD_SQ_SLEEPY:
|
||||||
|
td->td_sq_state = TD_SQ_NIGHTMARE;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
BSD_ASSERT(td->td_sq_state == TD_SQ_WAKEUP);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (unblock) {
|
||||||
|
Per_CPU_Control *cpu_self;
|
||||||
|
|
||||||
|
cpu_self = _Objects_Release_and_thread_dispatch_disable(
|
||||||
|
&thread->Object, &lock_context);
|
||||||
|
_Giant_Acquire(cpu_self);
|
||||||
|
|
||||||
|
_Thread_Clear_state(thread, STATES_WAITING_FOR_BSD_WAKEUP);
|
||||||
|
|
||||||
|
_Giant_Release(cpu_self);
|
||||||
|
_Thread_Dispatch_enable(cpu_self);
|
||||||
|
} else {
|
||||||
|
_Objects_Release_and_ISR_enable(&thread->Object,
|
||||||
|
&lock_context);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif /* __rtems__ */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Resumes a specific thread from the sleep queue associated with a specific
|
* Resumes a specific thread from the sleep queue associated with a specific
|
||||||
@ -980,6 +1257,7 @@ sleepq_remove(struct thread *td, void *wchan)
|
|||||||
kick_proc0();
|
kick_proc0();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifndef __rtems__
|
||||||
/*
|
/*
|
||||||
* Abort a thread as if an interrupt had occurred. Only abort
|
* Abort a thread as if an interrupt had occurred. Only abort
|
||||||
* interruptible waits (unfortunately it isn't safe to abort others).
|
* interruptible waits (unfortunately it isn't safe to abort others).
|
||||||
@ -1021,6 +1299,7 @@ sleepq_abort(struct thread *td, int intrval)
|
|||||||
/* Thread is asleep on sleep queue sq, so wake it up. */
|
/* Thread is asleep on sleep queue sq, so wake it up. */
|
||||||
return (sleepq_resume_thread(sq, td, 0));
|
return (sleepq_resume_thread(sq, td, 0));
|
||||||
}
|
}
|
||||||
|
#endif /* __rtems__ */
|
||||||
|
|
||||||
#ifdef SLEEPQUEUE_PROFILING
|
#ifdef SLEEPQUEUE_PROFILING
|
||||||
#define SLEEPQ_PROF_LOCATIONS 1024
|
#define SLEEPQ_PROF_LOCATIONS 1024
|
||||||
|
@ -195,6 +195,16 @@ struct rusage_ext {
|
|||||||
uint64_t rux_su; /* (c) Previous sys time in usec. */
|
uint64_t rux_su; /* (c) Previous sys time in usec. */
|
||||||
uint64_t rux_tu; /* (c) Previous total time in usec. */
|
uint64_t rux_tu; /* (c) Previous total time in usec. */
|
||||||
};
|
};
|
||||||
|
#ifdef __rtems__
|
||||||
|
enum thread_sq_states {
|
||||||
|
TD_SQ_WAKEUP,
|
||||||
|
TD_SQ_PANIC = EWOULDBLOCK,
|
||||||
|
TD_SQ_TIRED,
|
||||||
|
TD_SQ_SLEEPY,
|
||||||
|
TD_SQ_SLEEPING,
|
||||||
|
TD_SQ_NIGHTMARE
|
||||||
|
};
|
||||||
|
#endif /* __rtems__ */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Kernel runnable context (thread).
|
* Kernel runnable context (thread).
|
||||||
@ -212,7 +222,9 @@ struct thread {
|
|||||||
struct proc *td_proc; /* (*) Associated process. */
|
struct proc *td_proc; /* (*) Associated process. */
|
||||||
TAILQ_ENTRY(thread) td_plist; /* (*) All threads in this proc. */
|
TAILQ_ENTRY(thread) td_plist; /* (*) All threads in this proc. */
|
||||||
TAILQ_ENTRY(thread) td_runq; /* (t) Run queue. */
|
TAILQ_ENTRY(thread) td_runq; /* (t) Run queue. */
|
||||||
|
#endif /* __rtems__ */
|
||||||
TAILQ_ENTRY(thread) td_slpq; /* (t) Sleep queue. */
|
TAILQ_ENTRY(thread) td_slpq; /* (t) Sleep queue. */
|
||||||
|
#ifndef __rtems__
|
||||||
TAILQ_ENTRY(thread) td_lockq; /* (t) Lock queue. */
|
TAILQ_ENTRY(thread) td_lockq; /* (t) Lock queue. */
|
||||||
LIST_ENTRY(thread) td_hash; /* (d) Hash chain. */
|
LIST_ENTRY(thread) td_hash; /* (d) Hash chain. */
|
||||||
struct cpuset *td_cpuset; /* (t) CPU affinity mask. */
|
struct cpuset *td_cpuset; /* (t) CPU affinity mask. */
|
||||||
@ -233,11 +245,14 @@ struct thread {
|
|||||||
int td_inhibitors; /* (t) Why can not run. */
|
int td_inhibitors; /* (t) Why can not run. */
|
||||||
int td_pflags; /* (k) Private thread (TDP_*) flags. */
|
int td_pflags; /* (k) Private thread (TDP_*) flags. */
|
||||||
int td_dupfd; /* (k) Ret value from fdopen. XXX */
|
int td_dupfd; /* (k) Ret value from fdopen. XXX */
|
||||||
int td_sqqueue; /* (t) Sleepqueue queue blocked on. */
|
|
||||||
#endif /* __rtems__ */
|
#endif /* __rtems__ */
|
||||||
|
#ifdef __rtems__
|
||||||
|
enum thread_sq_states td_sq_state;
|
||||||
|
#endif /* __rtems__ */
|
||||||
|
int td_sqqueue; /* (t) Sleepqueue queue blocked on. */
|
||||||
void *td_wchan; /* (t) Sleep address. */
|
void *td_wchan; /* (t) Sleep address. */
|
||||||
#ifndef __rtems__
|
|
||||||
const char *td_wmesg; /* (t) Reason for sleep. */
|
const char *td_wmesg; /* (t) Reason for sleep. */
|
||||||
|
#ifndef __rtems__
|
||||||
u_char td_lastcpu; /* (t) Last cpu we were on. */
|
u_char td_lastcpu; /* (t) Last cpu we were on. */
|
||||||
u_char td_oncpu; /* (t) Which cpu we are on. */
|
u_char td_oncpu; /* (t) Which cpu we are on. */
|
||||||
volatile u_char td_owepreempt; /* (k*) Preempt on last critical_exit */
|
volatile u_char td_owepreempt; /* (k*) Preempt on last critical_exit */
|
||||||
@ -337,12 +352,16 @@ struct thread {
|
|||||||
struct mtx *thread_lock_block(struct thread *);
|
struct mtx *thread_lock_block(struct thread *);
|
||||||
void thread_lock_unblock(struct thread *, struct mtx *);
|
void thread_lock_unblock(struct thread *, struct mtx *);
|
||||||
void thread_lock_set(struct thread *, struct mtx *);
|
void thread_lock_set(struct thread *, struct mtx *);
|
||||||
|
#ifndef __rtems__
|
||||||
#define THREAD_LOCK_ASSERT(td, type) \
|
#define THREAD_LOCK_ASSERT(td, type) \
|
||||||
do { \
|
do { \
|
||||||
struct mtx *__m = (td)->td_lock; \
|
struct mtx *__m = (td)->td_lock; \
|
||||||
if (__m != &blocked_lock) \
|
if (__m != &blocked_lock) \
|
||||||
mtx_assert(__m, (type)); \
|
mtx_assert(__m, (type)); \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
#else /* __rtems__ */
|
||||||
|
#define THREAD_LOCK_ASSERT(td, type)
|
||||||
|
#endif /* __rtems__ */
|
||||||
|
|
||||||
#ifdef INVARIANTS
|
#ifdef INVARIANTS
|
||||||
#define THREAD_LOCKPTR_ASSERT(td, lock) \
|
#define THREAD_LOCKPTR_ASSERT(td, lock) \
|
||||||
@ -889,7 +908,11 @@ void fork_exit(void (*)(void *, struct trapframe *), void *,
|
|||||||
void fork_return(struct thread *, struct trapframe *);
|
void fork_return(struct thread *, struct trapframe *);
|
||||||
int inferior(struct proc *p);
|
int inferior(struct proc *p);
|
||||||
void kern_yield(int);
|
void kern_yield(int);
|
||||||
|
#ifndef __rtems__
|
||||||
void kick_proc0(void);
|
void kick_proc0(void);
|
||||||
|
#else /* __rtems__ */
|
||||||
|
#define kick_proc0()
|
||||||
|
#endif /* __rtems__ */
|
||||||
int leavepgrp(struct proc *p);
|
int leavepgrp(struct proc *p);
|
||||||
int maybe_preempt(struct thread *td);
|
int maybe_preempt(struct thread *td);
|
||||||
void maybe_yield(void);
|
void maybe_yield(void);
|
||||||
|
@ -316,6 +316,8 @@ rtems_mdns_gethostname().
|
|||||||
|
|
||||||
== Issues and TODO
|
== Issues and TODO
|
||||||
|
|
||||||
|
* Priority queues are broken with clustered scheduling.
|
||||||
|
|
||||||
* Per-CPU data should be enabled once the new stack is ready for SMP.
|
* Per-CPU data should be enabled once the new stack is ready for SMP.
|
||||||
|
|
||||||
* Per-CPU NETISR(9) should be enabled onece the new stack is ready for SMP.
|
* Per-CPU NETISR(9) should be enabled onece the new stack is ready for SMP.
|
||||||
|
@ -43,20 +43,11 @@
|
|||||||
#include <rtems/bsd/sys/param.h>
|
#include <rtems/bsd/sys/param.h>
|
||||||
#include <rtems/bsd/sys/types.h>
|
#include <rtems/bsd/sys/types.h>
|
||||||
#include <sys/proc.h>
|
#include <sys/proc.h>
|
||||||
#include <sys/queue.h>
|
|
||||||
|
|
||||||
#include <rtems/score/threadq.h>
|
|
||||||
#include <rtems.h>
|
#include <rtems.h>
|
||||||
|
|
||||||
#define BSD_TASK_NAME rtems_build_name('_', 'B', 'S', 'D')
|
#define BSD_TASK_NAME rtems_build_name('_', 'B', 'S', 'D')
|
||||||
|
|
||||||
struct sleepqueue {
|
|
||||||
Thread_queue_Control sq_blocked;
|
|
||||||
LIST_ENTRY(sleepqueue) sq_hash;
|
|
||||||
LIST_HEAD(, sleepqueue) sq_free;
|
|
||||||
void *sq_wchan;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct thread *
|
struct thread *
|
||||||
rtems_bsd_get_thread(const Thread_Control *thread);
|
rtems_bsd_get_thread(const Thread_Control *thread);
|
||||||
|
|
||||||
|
@ -1 +0,0 @@
|
|||||||
/* EMPTY */
|
|
@ -49,6 +49,7 @@
|
|||||||
#include <sys/kthread.h>
|
#include <sys/kthread.h>
|
||||||
#include <sys/malloc.h>
|
#include <sys/malloc.h>
|
||||||
#include <sys/selinfo.h>
|
#include <sys/selinfo.h>
|
||||||
|
#include <sys/sleepqueue.h>
|
||||||
|
|
||||||
#include <rtems/bsd/bsd.h>
|
#include <rtems/bsd/bsd.h>
|
||||||
|
|
||||||
@ -100,23 +101,14 @@ struct thread *
|
|||||||
rtems_bsd_thread_create(Thread_Control *thread, int wait)
|
rtems_bsd_thread_create(Thread_Control *thread, int wait)
|
||||||
{
|
{
|
||||||
struct thread *td = malloc(sizeof(*td), M_TEMP, M_ZERO | wait);
|
struct thread *td = malloc(sizeof(*td), M_TEMP, M_ZERO | wait);
|
||||||
struct sleepqueue *sq = malloc(sizeof(*sq), M_TEMP, wait);
|
struct sleepqueue *sq = sleepq_alloc();
|
||||||
|
|
||||||
if (td != NULL && sq != NULL) {
|
if (td != NULL && sq != NULL) {
|
||||||
td->td_thread = thread;
|
td->td_thread = thread;
|
||||||
td->td_sleepqueue = sq;
|
td->td_sleepqueue = sq;
|
||||||
|
|
||||||
LIST_INIT(&sq->sq_free);
|
|
||||||
|
|
||||||
_Thread_queue_Initialize(
|
|
||||||
&sq->sq_blocked,
|
|
||||||
THREAD_QUEUE_DISCIPLINE_PRIORITY,
|
|
||||||
STATES_WAITING_FOR_BSD_WAKEUP,
|
|
||||||
EWOULDBLOCK
|
|
||||||
);
|
|
||||||
} else {
|
} else {
|
||||||
free(td, M_TEMP);
|
free(td, M_TEMP);
|
||||||
free(sq, M_TEMP);
|
sleepq_free(sq);
|
||||||
td = NULL;
|
td = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -185,7 +177,7 @@ rtems_bsd_extension_thread_delete(
|
|||||||
|
|
||||||
if (td != NULL) {
|
if (td != NULL) {
|
||||||
seltdfini(td);
|
seltdfini(td);
|
||||||
free(td->td_sleepqueue, M_TEMP);
|
sleepq_free(td->td_sleepqueue);
|
||||||
free(td, M_TEMP);
|
free(td, M_TEMP);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user