TASKQUEUE(9): Use FreeBSD implementation

This commit is contained in:
Sebastian Huber 2013-10-28 10:45:22 +01:00
parent 795c5e6610
commit ea87228a26
5 changed files with 514 additions and 353 deletions

View File

@ -83,7 +83,6 @@ LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-syscall-api.c
LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-sysctlbyname.c
LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-sysctl.c
LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-sysctlnametomib.c
LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-taskqueue.c
LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-thread.c
LIB_C_FILES += rtemsbsd/rtems/rtems-bsd-timesupport.c
LIB_C_FILES += rtemsbsd/rtems/rtems-kvm.c
@ -144,6 +143,7 @@ LIB_C_FILES += freebsd/sys/kern/subr_kobj.c
LIB_C_FILES += freebsd/sys/kern/subr_module.c
LIB_C_FILES += freebsd/sys/kern/subr_rman.c
LIB_C_FILES += freebsd/sys/kern/subr_sbuf.c
LIB_C_FILES += freebsd/sys/kern/subr_taskqueue.c
LIB_C_FILES += freebsd/sys/kern/subr_unit.c
LIB_C_FILES += freebsd/sys/kern/sys_generic.c
LIB_C_FILES += freebsd/sys/kern/uipc_accf.c

View File

@ -640,7 +640,6 @@ rtems.addRTEMSSourceFiles(
'rtems/rtems-bsd-sysctlbyname.c',
'rtems/rtems-bsd-sysctl.c',
'rtems/rtems-bsd-sysctlnametomib.c',
'rtems/rtems-bsd-taskqueue.c',
'rtems/rtems-bsd-thread.c',
'rtems/rtems-bsd-timesupport.c',
'rtems/rtems-kvm.c',
@ -804,6 +803,7 @@ base.addSourceFiles(
'sys/kern/subr_module.c',
'sys/kern/subr_rman.c',
'sys/kern/subr_sbuf.c',
'sys/kern/subr_taskqueue.c',
'sys/kern/subr_unit.c',
'sys/kern/sys_generic.c',
'sys/kern/uipc_accf.c',
@ -1175,7 +1175,6 @@ devNet.addSourceFiles(
devNic = Module('dev_nic')
devNic.addHeaderFiles(
[
# 'sys/taskqueue.h',
'sys/sys/pciio.h',
'sys/dev/random/randomdev_soft.h',
'sys/sys/eventvar.h',

View File

@ -0,0 +1,509 @@
#include <machine/rtems-bsd-config.h>
/*-
* Copyright (c) 2000 Doug Rabson
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <rtems/bsd/sys/param.h>
#include <sys/systm.h>
#include <sys/bus.h>
#include <sys/interrupt.h>
#include <sys/kernel.h>
#include <sys/kthread.h>
#include <rtems/bsd/sys/lock.h>
#include <sys/malloc.h>
#include <sys/mutex.h>
#include <sys/proc.h>
#include <sys/sched.h>
#include <sys/taskqueue.h>
#include <rtems/bsd/sys/unistd.h>
#include <machine/stdarg.h>
static MALLOC_DEFINE(M_TASKQUEUE, "taskqueue", "Task Queues");
#ifndef __rtems__
static void *taskqueue_giant_ih;
#endif /* __rtems__ */
static void *taskqueue_ih;
struct taskqueue_busy {
struct task *tb_running;
TAILQ_ENTRY(taskqueue_busy) tb_link;
};
struct taskqueue {
STAILQ_HEAD(, task) tq_queue;
const char *tq_name;
taskqueue_enqueue_fn tq_enqueue;
void *tq_context;
TAILQ_HEAD(, taskqueue_busy) tq_active;
struct mtx tq_mutex;
struct thread **tq_threads;
int tq_tcount;
#ifndef __rtems__
int tq_spin;
#endif /* __rtems__ */
int tq_flags;
};
#define TQ_FLAGS_ACTIVE (1 << 0)
#define TQ_FLAGS_BLOCKED (1 << 1)
#define TQ_FLAGS_PENDING (1 << 2)
static void taskqueue_run_locked(struct taskqueue *);
static __inline void
TQ_LOCK(struct taskqueue *tq)
{
#ifndef __rtems__
if (tq->tq_spin)
mtx_lock_spin(&tq->tq_mutex);
else
#endif /* __rtems__ */
mtx_lock(&tq->tq_mutex);
}
static __inline void
TQ_UNLOCK(struct taskqueue *tq)
{
#ifndef __rtems__
if (tq->tq_spin)
mtx_unlock_spin(&tq->tq_mutex);
else
#endif /* __rtems__ */
mtx_unlock(&tq->tq_mutex);
}
static __inline int
TQ_SLEEP(struct taskqueue *tq, void *p, struct mtx *m, int pri, const char *wm,
int t)
{
#ifndef __rtems__
if (tq->tq_spin)
return (msleep_spin(p, m, wm, t));
#endif /* __rtems__ */
return (msleep(p, m, pri, wm, t));
}
static struct taskqueue *
_taskqueue_create(const char *name, int mflags,
taskqueue_enqueue_fn enqueue, void *context,
int mtxflags, const char *mtxname)
{
struct taskqueue *queue;
queue = malloc(sizeof(struct taskqueue), M_TASKQUEUE, mflags | M_ZERO);
if (!queue)
return NULL;
STAILQ_INIT(&queue->tq_queue);
TAILQ_INIT(&queue->tq_active);
queue->tq_name = name;
queue->tq_enqueue = enqueue;
queue->tq_context = context;
#ifndef __rtems__
queue->tq_spin = (mtxflags & MTX_SPIN) != 0;
#else /* __rtems__ */
/*
* FIXME: Here is a potential performance optimization. Maybe also an
* issue for correctness.
*/
#endif /* __rtems__ */
queue->tq_flags |= TQ_FLAGS_ACTIVE;
mtx_init(&queue->tq_mutex, mtxname, NULL, mtxflags);
return queue;
}
struct taskqueue *
taskqueue_create(const char *name, int mflags,
taskqueue_enqueue_fn enqueue, void *context)
{
return _taskqueue_create(name, mflags, enqueue, context,
MTX_DEF, "taskqueue");
}
/*
* Signal a taskqueue thread to terminate.
*/
static void
taskqueue_terminate(struct thread **pp, struct taskqueue *tq)
{
while (tq->tq_tcount > 0) {
wakeup(tq);
TQ_SLEEP(tq, pp, &tq->tq_mutex, PWAIT, "taskqueue_destroy", 0);
}
}
void
taskqueue_free(struct taskqueue *queue)
{
TQ_LOCK(queue);
queue->tq_flags &= ~TQ_FLAGS_ACTIVE;
taskqueue_run_locked(queue);
taskqueue_terminate(queue->tq_threads, queue);
KASSERT(TAILQ_EMPTY(&queue->tq_active), ("Tasks still running?"));
mtx_destroy(&queue->tq_mutex);
free(queue->tq_threads, M_TASKQUEUE);
free(queue, M_TASKQUEUE);
}
int
taskqueue_enqueue(struct taskqueue *queue, struct task *task)
{
struct task *ins;
struct task *prev;
TQ_LOCK(queue);
/*
* Count multiple enqueues.
*/
if (task->ta_pending) {
task->ta_pending++;
TQ_UNLOCK(queue);
return 0;
}
/*
* Optimise the case when all tasks have the same priority.
*/
prev = STAILQ_LAST(&queue->tq_queue, task, ta_link);
if (!prev || prev->ta_priority >= task->ta_priority) {
STAILQ_INSERT_TAIL(&queue->tq_queue, task, ta_link);
} else {
prev = NULL;
for (ins = STAILQ_FIRST(&queue->tq_queue); ins;
prev = ins, ins = STAILQ_NEXT(ins, ta_link))
if (ins->ta_priority < task->ta_priority)
break;
if (prev)
STAILQ_INSERT_AFTER(&queue->tq_queue, prev, task, ta_link);
else
STAILQ_INSERT_HEAD(&queue->tq_queue, task, ta_link);
}
task->ta_pending = 1;
if ((queue->tq_flags & TQ_FLAGS_BLOCKED) == 0)
queue->tq_enqueue(queue->tq_context);
else
queue->tq_flags |= TQ_FLAGS_PENDING;
TQ_UNLOCK(queue);
return 0;
}
void
taskqueue_block(struct taskqueue *queue)
{
TQ_LOCK(queue);
queue->tq_flags |= TQ_FLAGS_BLOCKED;
TQ_UNLOCK(queue);
}
void
taskqueue_unblock(struct taskqueue *queue)
{
TQ_LOCK(queue);
queue->tq_flags &= ~TQ_FLAGS_BLOCKED;
if (queue->tq_flags & TQ_FLAGS_PENDING) {
queue->tq_flags &= ~TQ_FLAGS_PENDING;
queue->tq_enqueue(queue->tq_context);
}
TQ_UNLOCK(queue);
}
static void
taskqueue_run_locked(struct taskqueue *queue)
{
struct taskqueue_busy tb;
struct task *task;
int pending;
mtx_assert(&queue->tq_mutex, MA_OWNED);
tb.tb_running = NULL;
TAILQ_INSERT_TAIL(&queue->tq_active, &tb, tb_link);
while (STAILQ_FIRST(&queue->tq_queue)) {
/*
* Carefully remove the first task from the queue and
* zero its pending count.
*/
task = STAILQ_FIRST(&queue->tq_queue);
STAILQ_REMOVE_HEAD(&queue->tq_queue, ta_link);
pending = task->ta_pending;
task->ta_pending = 0;
tb.tb_running = task;
TQ_UNLOCK(queue);
task->ta_func(task->ta_context, pending);
TQ_LOCK(queue);
tb.tb_running = NULL;
wakeup(task);
}
TAILQ_REMOVE(&queue->tq_active, &tb, tb_link);
}
void
taskqueue_run(struct taskqueue *queue)
{
TQ_LOCK(queue);
taskqueue_run_locked(queue);
TQ_UNLOCK(queue);
}
static int
task_is_running(struct taskqueue *queue, struct task *task)
{
struct taskqueue_busy *tb;
mtx_assert(&queue->tq_mutex, MA_OWNED);
TAILQ_FOREACH(tb, &queue->tq_active, tb_link) {
if (tb->tb_running == task)
return (1);
}
return (0);
}
void
taskqueue_drain(struct taskqueue *queue, struct task *task)
{
#ifndef __rtems__
if (queue->tq_spin) { /* XXX */
mtx_lock_spin(&queue->tq_mutex);
while (task->ta_pending != 0 || task_is_running(queue, task))
msleep_spin(task, &queue->tq_mutex, "-", 0);
mtx_unlock_spin(&queue->tq_mutex);
} else {
#endif /* __rtems__ */
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, __func__);
mtx_lock(&queue->tq_mutex);
while (task->ta_pending != 0 || task_is_running(queue, task))
msleep(task, &queue->tq_mutex, PWAIT, "-", 0);
mtx_unlock(&queue->tq_mutex);
#ifndef __rtems__
}
#endif /* __rtems__ */
}
static void
taskqueue_swi_enqueue(void *context)
{
swi_sched(taskqueue_ih, 0);
}
static void
taskqueue_swi_run(void *dummy)
{
taskqueue_run(taskqueue_swi);
}
#ifndef __rtems__
static void
taskqueue_swi_giant_enqueue(void *context)
{
swi_sched(taskqueue_giant_ih, 0);
}
static void
taskqueue_swi_giant_run(void *dummy)
{
taskqueue_run(taskqueue_swi_giant);
}
#endif /* __rtems__ */
int
taskqueue_start_threads(struct taskqueue **tqp, int count, int pri,
const char *name, ...)
{
va_list ap;
struct thread *td;
struct taskqueue *tq;
int i, error;
char ktname[MAXCOMLEN + 1];
if (count <= 0)
return (EINVAL);
tq = *tqp;
va_start(ap, name);
vsnprintf(ktname, sizeof(ktname), name, ap);
va_end(ap);
tq->tq_threads = malloc(sizeof(struct thread *) * count, M_TASKQUEUE,
M_NOWAIT | M_ZERO);
if (tq->tq_threads == NULL) {
printf("%s: no memory for %s threads\n", __func__, ktname);
return (ENOMEM);
}
for (i = 0; i < count; i++) {
if (count == 1)
error = kthread_add(taskqueue_thread_loop, tqp, NULL,
&tq->tq_threads[i], RFSTOPPED, 0, "%s", ktname);
else
error = kthread_add(taskqueue_thread_loop, tqp, NULL,
&tq->tq_threads[i], RFSTOPPED, 0,
"%s_%d", ktname, i);
if (error) {
/* should be ok to continue, taskqueue_free will dtrt */
printf("%s: kthread_add(%s): error %d", __func__,
ktname, error);
tq->tq_threads[i] = NULL; /* paranoid */
} else
tq->tq_tcount++;
}
#ifndef __rtems__
for (i = 0; i < count; i++) {
if (tq->tq_threads[i] == NULL)
continue;
td = tq->tq_threads[i];
thread_lock(td);
sched_prio(td, pri);
sched_add(td, SRQ_BORING);
thread_unlock(td);
}
#else /* __rtems__ */
(void) td;
#endif /* __rtems__ */
return (0);
}
void
taskqueue_thread_loop(void *arg)
{
struct taskqueue **tqp, *tq;
tqp = arg;
tq = *tqp;
TQ_LOCK(tq);
while ((tq->tq_flags & TQ_FLAGS_ACTIVE) != 0) {
taskqueue_run_locked(tq);
/*
* Because taskqueue_run() can drop tq_mutex, we need to
* check if the TQ_FLAGS_ACTIVE flag wasn't removed in the
* meantime, which means we missed a wakeup.
*/
if ((tq->tq_flags & TQ_FLAGS_ACTIVE) == 0)
break;
TQ_SLEEP(tq, tq, &tq->tq_mutex, 0, "-", 0);
}
/* rendezvous with thread that asked us to terminate */
tq->tq_tcount--;
wakeup_one(tq->tq_threads);
TQ_UNLOCK(tq);
kthread_exit();
}
void
taskqueue_thread_enqueue(void *context)
{
struct taskqueue **tqp, *tq;
tqp = context;
tq = *tqp;
mtx_assert(&tq->tq_mutex, MA_OWNED);
wakeup_one(tq);
}
TASKQUEUE_DEFINE(swi, taskqueue_swi_enqueue, NULL,
swi_add(NULL, "task queue", taskqueue_swi_run, NULL, SWI_TQ,
INTR_MPSAFE, &taskqueue_ih));
#ifndef __rtems__
TASKQUEUE_DEFINE(swi_giant, taskqueue_swi_giant_enqueue, NULL,
swi_add(NULL, "Giant taskq", taskqueue_swi_giant_run,
NULL, SWI_TQ_GIANT, 0, &taskqueue_giant_ih));
#endif /* __rtems__ */
TASKQUEUE_DEFINE_THREAD(thread);
struct taskqueue *
taskqueue_create_fast(const char *name, int mflags,
taskqueue_enqueue_fn enqueue, void *context)
{
return _taskqueue_create(name, mflags, enqueue, context,
MTX_SPIN, "fast_taskqueue");
}
/* NB: for backwards compatibility */
int
taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task)
{
return taskqueue_enqueue(queue, task);
}
static void *taskqueue_fast_ih;
static void
taskqueue_fast_enqueue(void *context)
{
swi_sched(taskqueue_fast_ih, 0);
}
static void
taskqueue_fast_run(void *dummy)
{
taskqueue_run(taskqueue_fast);
}
TASKQUEUE_FAST_DEFINE(fast, taskqueue_fast_enqueue, NULL,
swi_add(NULL, "Fast task queue", taskqueue_fast_run, NULL,
SWI_TQ_FAST, INTR_MPSAFE, &taskqueue_fast_ih));
int
taskqueue_member(struct taskqueue *queue, struct thread *td)
{
int i, j, ret = 0;
TQ_LOCK(queue);
for (i = 0, j = 0; ; i++) {
if (queue->tq_threads[i] == NULL)
continue;
if (queue->tq_threads[i] == td) {
ret = 1;
break;
}
if (++j >= queue->tq_tcount)
break;
}
TQ_UNLOCK(queue);
return (ret);
}

View File

@ -92,7 +92,9 @@ the current Git submodule commit is this
* get_cyclecount(): The implementation is a security problem.
* What to do with the priority parameter present in the FreeBSD synchronization
primitives?
primitives and the thread creation functions?
* TASKQUEUE(9): Support spin mutexes.
* ZONE(9): Review allocator lock usage in rtems-bsd-chunk.c.

View File

@ -1,349 +0,0 @@
/**
* @file
*
* @ingroup rtems_bsd_rtems
*
* @brief TODO.
*/
/*
* COPYRIGHT (c) 2012.
* On-Line Applications Research Corporation (OAR).
* All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <rtems.h>
#include <rtems/error.h>
#include <machine/rtems-bsd-taskqueue.h>
/*
#define STATIC static
*/
#undef DEBUG
#ifdef DEBUG
#include <stdio.h>
#ifndef STATIC
#define STATIC
#endif
#else
#ifndef STATIC
#define STATIC static
#endif
#endif
#define TQ_WAKE_EVENT RTEMS_EVENT_0
/* This implementation is extremely simple; we assume
* that all taskqueues (and as a matter of fact there is
* only a single one) are manipulated with the rtems
* bsdnet semaphore held. I.e.,
* taskqueue_enqueue()
* taskqueue_drain()
* etc.
* are called from an environment that holds the
* bsdnet semaphore.
* Likewise, the thread that works the taskqueue
* holds the semaphore while doing so.
*
*/
/* use single-linked list; 'drain' which would benefit from
* double-linked list is seldom used and performance doesn't
* matter much there. OTOH, the frequent case of working
* the list + enqueueing is more efficient for the single-linked
* list.
struct task {
struct task *ta_next;
int ta_pending;
int ta_priority;
task_fn ta_fn;
void *ta_fn_arg;
};
*/
struct taskqueue {
struct task anchor;
struct task *tail;
tq_enq_fn enq_fn;
void *enq_fn_arg;
rtems_id tid;
};
STATIC struct taskqueue the_taskqueue = {
{ 0, 0, 0, 0, 0 },
&the_taskqueue.anchor,
taskqueue_thread_enqueue,
&taskqueue_fast,
0
};
struct taskqueue *taskqueue_fast = &the_taskqueue;
struct taskqueue *taskqueue_swi = NULL;
struct taskqueue *
taskqueue_create(const char *name, int mflags, tq_enq_fn enq_fn, void *arg)
{
if ( enq_fn != taskqueue_thread_enqueue )
rtems_panic("rtems_taskqueue: attempt to create non-standard TQ; implementation needs to be modified\n");
return &the_taskqueue;
}
struct taskqueue *
taskqueue_create_fast(const char *name, int mflags, tq_enq_fn enq_fn, void *arg)
{
return taskqueue_create(name, mflags, enq_fn, arg);
}
/* taskqueue_enqueue must be allowed from an ISR;
* hence, all critical list manipulation must lock out
* interrupts...
*/
int
taskqueue_enqueue(struct taskqueue *tq, struct task *ta)
{
rtems_interrupt_level l;
rtems_interrupt_disable(l);
if ( 0 == ta->ta_pending ++ ) {
/* hook into list */
ta->ta_next = 0;
tq->tail->ta_next = ta;
tq->tail = ta;
}
tq->enq_fn(tq->enq_fn_arg);
rtems_interrupt_enable(l);
return 0;
}
int
taskqueue_enqueue_fast(struct taskqueue *queue, struct task *task)
{
return taskqueue_enqueue(queue, task);
}
void
taskqueue_thread_enqueue(void *ctxt)
{
int dopost;
/* pointer-to-pointer is what bsd provides; we currently
* follow the scheme even we don't directly use the argument
* passed to taskqueue_create...
*/
struct taskqueue *tq = *(struct taskqueue **)ctxt;
/* If this is the first entry on the list then the
* task needs to be notified...
*/
dopost = ( tq->anchor.ta_next == tq->tail && 1 == tq->tail->ta_pending );
if ( dopost )
rtems_event_send(tq->tid, TQ_WAKE_EVENT);
}
/* Returns 0 on success */
int
taskqueue_start_threads(struct taskqueue **ptq, int count, int prio, const char *fmt, ...)
{
if ( count != 1 )
rtems_panic("rtems_taskqueue: taskqueue_start_threads cannot currently deal with count != 1\n");
/* Do (non thread-safe) lazy init as a fallback */
if ( ! the_taskqueue.tid )
rtems_taskqueue_initialize();
return 0;
}
void
taskqueue_drain(struct taskqueue *tq, struct task *ta)
{
rtems_interrupt_level l;
struct task *p, *q;
int i;
/* find predecessor; searching the list should be
* safe; an ISR might append a new record to the tail
* while we are working but that should be OK.
*/
for ( p = &tq->anchor; (q = p->ta_next); p=q ) {
if ( q == ta ) {
rtems_interrupt_disable(l);
/* found; do work */
/* remember 'pending' count and extract */
i = ta->ta_pending;
ta->ta_pending = 0;
p->ta_next = ta->ta_next;
ta->ta_next = 0;
/* adjust tail */
if ( tq->tail == q )
tq->tail = p;
rtems_interrupt_enable(l);
for ( ; i>0; i-- ) {
ta->ta_fn(ta->ta_fn_arg, i);
}
return;
}
}
}
/* work the task queue and return
* nonzero if the list is not empty
* (which means that some callback has
* rescheduled itself)
*/
static void *
taskqueue_work(struct taskqueue *tq)
{
rtems_interrupt_level l;
struct task *p, *q;
task_fn f;
void *arg;
int i;
/* work off a temporary list in case any callback reschedules
* itself or if new tasks are queued from an ISR.
*/
rtems_interrupt_disable(l);
p = tq->anchor.ta_next;
tq->anchor.ta_next = 0;
tq->tail = &tq->anchor;
rtems_interrupt_enable(l);
while ( (q=p) ) {
rtems_interrupt_disable(l);
i = q->ta_pending;
q->ta_pending = 0;
/* extract */
p = q->ta_next;
q->ta_next = 0;
f = q->ta_fn;
arg = q->ta_fn_arg;
rtems_interrupt_enable(l);
for ( ; i>0; i-- ) {
f(arg, i);
}
}
return tq->anchor.ta_next;
}
void
taskqueue_free(struct taskqueue *tq)
{
taskqueue_work(tq);
}
static void
taskqueueDoWork(void *arg)
{
struct taskqueue *tq = arg;
rtems_event_set evs;
rtems_status_code sc;
while ( 1 ) {
sc = rtems_event_receive(TQ_WAKE_EVENT, RTEMS_EVENT_ANY | RTEMS_WAIT, RTEMS_NO_TIMEOUT, &evs);
if ( RTEMS_SUCCESSFUL != sc ) {
rtems_error(sc,"rtems_taskqueue: taskqueueDoWork() unable to receive wakup event\n");
rtems_panic("Can't proceed\n");
}
if ( taskqueue_work(tq) ) {
#if 0
/* chance to reschedule */
rtems_bsdnet_semaphore_release();
rtems_task_wake_after(0);
rtems_bsdnet_semaphore_obtain();
#else
/* hopefully, releasing the semaphore (as part of bsdnet_event_receive)
* and obtaining the event (which has been posted already)
* yields the CPU if necessary...
*/
#endif
}
}
}
#ifdef DEBUG
struct task_dbg {
struct task t;
char *nm;
};
struct task_dbg taskA = {
{0},
"taskA"
};
struct task_dbg taskB = {
{0},
"taskB"
};
struct task_dbg taskC = {
{0},
"taskC"
};
static void the_task_fn(void *arg, int pending)
{
struct task_dbg *td = arg;
printf("%s (pending: %i)\n", td->nm, pending);
/* Test rescheduling */
if ( pending > 3 )
taskqueue_enqueue(&the_taskqueue,&td->t);
}
void taskqueue_dump()
{
struct task *p;
printf("Anchor %p, Tail %p\n", &the_taskqueue.anchor, the_taskqueue.tail);
for ( p = the_taskqueue.anchor.ta_next; p; p=p->ta_next ) {
printf("%p: (pending %2i, next %p)\n",
p, p->ta_pending, p->ta_next);
}
}
#endif
rtems_id
rtems_taskqueue_initialize()
{
#ifdef DEBUG
TASK_INIT( &taskA.t, 0, the_task_fn, &taskA );
TASK_INIT( &taskB.t, 0, the_task_fn, &taskB );
TASK_INIT( &taskC.t, 0, the_task_fn, &taskC );
#endif
if ( ! the_taskqueue.tid )
the_taskqueue.tid = rtems_bsdnet_newproc("tskq", 10000, taskqueueDoWork, &the_taskqueue);
return the_taskqueue.tid;
}
#ifdef DEBUG
void
_cexpModuleInitialize(void *u)
{
rtems_bsdnet_initialize_network();
the_taskqueue.tid = rtems_taskqueue_initialize();
}
#endif