Implemented a version of rmlock using rwlock.

This commit is contained in:
Jennifer Averett
2012-04-16 09:10:35 -05:00
parent 362782eb25
commit 459afb1c76
8 changed files with 1183 additions and 93 deletions

View File

@@ -39,7 +39,7 @@ struct lock_class *lock_classes[LOCK_CLASS_MAX + 1] = {
&lock_class_mtx_spin,
&lock_class_mtx_sleep,
&lock_class_sx,
&lock_class_rm,
&lock_class_rw,
&lock_class_rw,
};

View File

@@ -1,92 +0,0 @@
/**
* @file
*
* @ingroup rtems_bsd_rtems
*
* @brief TODO.
*/
/*
* COPYRIGHT (c) 2012.
* On-Line Applications Research Corporation (OAR).
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.com/license/LICENSE.
*/
#include <freebsd/machine/rtems-bsd-config.h>
#include <sys/types.h>
#include <freebsd/sys/param.h>
#include <freebsd/sys/types.h>
#include <freebsd/sys/systm.h>
#include <freebsd/sys/lock.h>
#include <freebsd/sys/rmlock.h>
#include <pthread.h>
#define RMPF_ONQUEUE 1
#define RMPF_SIGNAL 2
/*
* To support usage of rmlock in CVs and msleep yet another list for the
* priority tracker would be needed. Using this lock for cv and msleep also
* does not seem very useful
*/
static __inline void compiler_memory_barrier(void) {
__asm __volatile("":::"memory");
}
static void assert_rm(struct lock_object *lock, int what);
static void lock_rm(struct lock_object *lock, int how);
#ifdef KDTRACE_HOOKS
static int owner_rm(struct lock_object *lock, struct thread **owner);
#endif
static int unlock_rm(struct lock_object *lock);
struct lock_class lock_class_rm = {
.lc_name = "rm",
.lc_flags = LC_SLEEPLOCK | LC_RECURSABLE,
.lc_assert = assert_rm,
#if 0
#ifdef DDB
.lc_ddb_show = db_show_rwlock,
#endif
#endif
.lc_lock = lock_rm,
.lc_unlock = unlock_rm,
#ifdef KDTRACE_HOOKS
.lc_owner = owner_rm,
#endif
};
static void
assert_rm(struct lock_object *lock, int what)
{
panic("assert_rm called");
}
static void
lock_rm(struct lock_object *lock, int how)
{
panic("lock_rm called");
}
static int
unlock_rm(struct lock_object *lock)
{
panic("unlock_rm called");
}
#ifdef KDTRACE_HOOKS
static int
owner_rm(struct lock_object *lock, struct thread **owner)
{
panic("owner_rm called");
}
#endif

View File

@@ -0,0 +1,111 @@
/**
* @file
*
* @ingroup rtems_bsd_rtems
*
* @brief TODO.
*/
/*
* COPYRIGHT (c) 1989-2012.
* On-Line Applications Research Corporation (OAR).
*
* The license and distribution terms for this file may be
* found in the file LICENSE in this distribution or at
* http://www.rtems.com/license/LICENSE.
*/
#include <freebsd/machine/rtems-bsd-config.h>
#include <freebsd/sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <freebsd/sys/param.h>
#include <freebsd/sys/systm.h>
#include <freebsd/sys/bus.h>
#include <freebsd/sys/callout.h>
#include <freebsd/sys/condvar.h>
#include <freebsd/sys/interrupt.h>
#include <freebsd/sys/kernel.h>
#include <freebsd/sys/ktr.h>
#include <freebsd/sys/lock.h>
#include <freebsd/sys/malloc.h>
#include <freebsd/sys/mutex.h>
#include <freebsd/sys/proc.h>
#include <freebsd/sys/sdt.h>
static int timeout_cpu;
/*
* There is one struct callout_cpu per cpu, holding all relevant
* state for the callout processing thread on the individual CPU.
* In particular:
* cc_ticks is incremented once per tick in callout_cpu().
* It tracks the global 'ticks' but in a way that the individual
* threads should not worry about races in the order in which
* hardclock() and hardclock_cpu() run on the various CPUs.
* cc_softclock is advanced in callout_cpu() to point to the
* first entry in cc_callwheel that may need handling. In turn,
* a softclock() is scheduled so it can serve the various entries i
* such that cc_softclock <= i <= cc_ticks .
* XXX maybe cc_softclock and cc_ticks should be volatile ?
*
* cc_ticks is also used in callout_reset_cpu() to determine
* when the callout should be served.
*/
struct callout_cpu {
struct mtx cc_lock;
struct callout *cc_callout;
struct callout_tailq *cc_callwheel;
struct callout_list cc_callfree;
struct callout *cc_next;
struct callout *cc_curr;
void *cc_cookie;
int cc_ticks;
int cc_softticks;
int cc_cancel;
int cc_waiting;
};
/*
* timeout --
* Execute a function after a specified length of time.
*
* untimeout --
* Cancel previous timeout function call.
*
* callout_handle_init --
* Initialize a handle so that using it with untimeout is benign.
*
* See AT&T BCI Driver Reference Manual for specification. This
* implementation differs from that one in that although an
* identification value is returned from timeout, the original
* arguments to timeout as well as the identifier are used to
* identify entries for untimeout.
*/
struct callout_handle
timeout(ftn, arg, to_ticks)
timeout_t *ftn;
void *arg;
int to_ticks;
{
struct callout_cpu *cc;
struct callout *new;
struct callout_handle handle;
#if 0
cc = CC_CPU(timeout_cpu);
CC_LOCK(cc);
/* Fill in the next free callout structure. */
new = SLIST_FIRST(&cc->cc_callfree);
if (new == NULL)
/* XXX Attempt to malloc first */
panic("timeout table full");
SLIST_REMOVE_HEAD(&cc->cc_callfree, c_links.sle);
callout_reset(new, to_ticks, ftn, arg);
handle.callout = new;
CC_UNLOCK(cc);
#endif
return (handle);
}