RWLOCK(9): Add reader/writer lock implementation

This commit is contained in:
Sebastian Huber 2017-11-14 12:57:01 +01:00
parent 344b8e6fb4
commit 97a98f6cd7
8 changed files with 1027 additions and 64 deletions

View File

@ -29,7 +29,7 @@
#ifndef _SYS__RWLOCK_H_
#define _SYS__RWLOCK_H_
#ifdef __rtems__
#include <machine/rtems-bsd-mutex.h>
#include <machine/rtems-bsd-rwlock.h>
#endif /* __rtems__ */
#include <machine/param.h>
@ -48,7 +48,7 @@ struct rwlock {
#ifndef __rtems__
volatile uintptr_t rw_lock;
#else /* __rtems__ */
rtems_bsd_mutex mutex;
rtems_bsd_rwlock rwlock;
#endif /* __rtems__ */
};

View File

@ -110,6 +110,7 @@ def rtems(mm):
'rtems/rtems-kernel-pci_cfgreg.c',
'rtems/rtems-kernel-program.c',
'rtems/rtems-kernel-rwlock.c',
'rtems/rtems-kernel-rwlockimpl.c',
'rtems/rtems-kernel-signal.c',
'rtems/rtems-kernel-sx.c',
'rtems/rtems-kernel-sysctlbyname.c',

View File

@ -2358,6 +2358,7 @@ def build(bld):
'rtemsbsd/rtems/rtems-kernel-pci_cfgreg.c',
'rtemsbsd/rtems/rtems-kernel-program.c',
'rtemsbsd/rtems/rtems-kernel-rwlock.c',
'rtemsbsd/rtems/rtems-kernel-rwlockimpl.c',
'rtemsbsd/rtems/rtems-kernel-signal.c',
'rtemsbsd/rtems/rtems-kernel-sx.c',
'rtemsbsd/rtems/rtems-kernel-sysctl.c',

View File

@ -0,0 +1,60 @@
/**
* @file
*
* @ingroup rtems_bsd_machine
*
* @brief TODO.
*/
/*
* Copyright (c) 2017 embedded brains GmbH. All rights reserved.
*
* embedded brains GmbH
* Dornierstr. 4
* 82178 Puchheim
* Germany
* <rtems@embedded-brains.de>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _RTEMS_BSD_MACHINE_RTEMS_BSD_RWLOCK_H_
#define _RTEMS_BSD_MACHINE_RTEMS_BSD_RWLOCK_H_
#include <rtems/score/threadq.h>
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
typedef struct {
Thread_queue_Control writer_queue;
Thread_queue_Control reader_queue;
int readers;
int nest_level;
} rtems_bsd_rwlock;
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* _RTEMS_BSD_MACHINE_RTEMS_BSD_RWLOCK_H_ */

View File

@ -0,0 +1,429 @@
/**
* @file
*
* @ingroup rtems_bsd_machine
*
* @brief Implementation of a reader/writer lock with priority inheritance for
* exclusive owners (writer).
*/
/*
* Copyright (c) 2017 embedded brains GmbH. All rights reserved.
*
* embedded brains GmbH
* Dornierstr. 4
* 82178 Puchheim
* Germany
* <rtems@embedded-brains.de>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _RTEMS_BSD_MACHINE_RTEMS_BSD_RWLOCKIMPL_H_
#define _RTEMS_BSD_MACHINE_RTEMS_BSD_RWLOCKIMPL_H_
#include <machine/rtems-bsd-rwlock.h>
#include <machine/rtems-bsd-support.h>
#include <sys/types.h>
#include <sys/lock.h>
#include <rtems/score/threadimpl.h>
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
typedef struct {
Thread_queue_Context writer;
Thread_queue_Context reader;
} rtems_bsd_rwlock_context;
static inline void
rtems_bsd_rwlock_context_init(rtems_bsd_rwlock_context *context)
{
_Thread_queue_Context_initialize(&context->writer);
}
static inline void
rtems_bsd_rwlock_init(struct lock_object *lock, rtems_bsd_rwlock *rw,
struct lock_class *class, const char *name, const char *type, int flags)
{
_Thread_queue_Initialize(&rw->writer_queue, name);
_Thread_queue_Initialize(&rw->reader_queue, name);
rw->readers = 0;
rw->nest_level = 0;
lock_init(lock, class, name, type, flags);
}
void rtems_bsd_rwlock_wlock_more(const struct lock_object *lock,
rtems_bsd_rwlock *rw, Thread_Control *executing,
rtems_bsd_rwlock_context *context);
void rtems_bsd_rwlock_wunlock_more(rtems_bsd_rwlock *rw,
Thread_Control *wowner, rtems_bsd_rwlock_context *context);
void rtems_bsd_rwlock_rlock_more(rtems_bsd_rwlock *rw,
rtems_bsd_rwlock_context *context);
void rtems_bsd_rwlock_runlock_more(rtems_bsd_rwlock *rw,
rtems_bsd_rwlock_context *context);
void rtems_bsd_rwlock_ready_waiting_readers(rtems_bsd_rwlock *rw,
rtems_bsd_rwlock_context *context);
#define rtems_bsd_rwlock_isr_disable(isr_level, context) \
do { \
_ISR_Local_disable(isr_level); \
_ISR_lock_ISR_disable_profile( \
&(context)->writer.Lock_context.Lock_context) \
} while (0)
static inline void
rtems_bsd_rwlock_acquire_critical(rtems_bsd_rwlock *rw,
rtems_bsd_rwlock_context *context)
{
_Thread_queue_Queue_acquire_critical(&rw->writer_queue.Queue,
&rw->writer_queue.Lock_stats,
&context->writer.Lock_context.Lock_context);
#if defined(RTEMS_SMP) && defined(RTEMS_DEBUG)
rw->writer_queue.owner = _SMP_lock_Who_am_I();
#endif
}
static inline void
rtems_bsd_rwlock_release(rtems_bsd_rwlock *rw, ISR_Level isr_level,
rtems_bsd_rwlock_context *context)
{
#if defined(RTEMS_SMP) && defined(RTEMS_DEBUG)
_Assert( _Thread_queue_Is_lock_owner( &rw->writer_queue ) );
rw->writer_queue.owner = SMP_LOCK_NO_OWNER;
#endif
_Thread_queue_Queue_release_critical(&rw->writer_queue.Queue,
&context->writer.Lock_context.Lock_context);
_ISR_Local_enable(isr_level);
}
static inline void
rtems_bsd_rwlock_set_isr_level(rtems_bsd_rwlock_context *context,
ISR_Level isr_level)
{
_ISR_lock_Context_set_level(&context->writer.Lock_context.Lock_context,
isr_level);
}
static inline Thread_Control *
rtems_bsd_rwlock_wowner(const rtems_bsd_rwlock *rw)
{
return (rw->writer_queue.Queue.owner);
}
static inline void
rtems_bsd_rwlock_set_wowner(rtems_bsd_rwlock *rw, Thread_Control *wowner)
{
rw->writer_queue.Queue.owner = wowner;
}
static inline void
rtems_bsd_rwlock_wlock(struct lock_object *lock, rtems_bsd_rwlock *rw)
{
ISR_Level isr_level;
rtems_bsd_rwlock_context context;
Thread_Control *executing;
rtems_bsd_rwlock_context_init(&context);
rtems_bsd_rwlock_isr_disable(isr_level, &context);
executing = _Thread_Executing;
rtems_bsd_rwlock_acquire_critical(rw, &context);
if (__predict_true(rtems_bsd_rwlock_wowner(rw) == NULL &&
rw->readers == 0)) {
rtems_bsd_rwlock_set_wowner(rw, executing);
_Thread_Resource_count_increment(executing);
rtems_bsd_rwlock_release(rw, isr_level, &context);
} else {
rtems_bsd_rwlock_set_isr_level(&context, isr_level);
rtems_bsd_rwlock_wlock_more(lock, rw, executing,
&context);
}
}
static inline int
rtems_bsd_rwlock_try_wlock(struct lock_object *lock, rtems_bsd_rwlock *rw)
{
int success;
ISR_Level isr_level;
rtems_bsd_rwlock_context context;
Thread_Control *executing;
rtems_bsd_rwlock_context_init(&context);
rtems_bsd_rwlock_isr_disable(isr_level, &context);
executing = _Thread_Executing;
rtems_bsd_rwlock_acquire_critical(rw, &context);
if (rw->readers == 0) {
Thread_Control *wowner;
wowner = rtems_bsd_rwlock_wowner(rw);
if (wowner == NULL) {
rtems_bsd_rwlock_set_wowner(rw, executing);
_Thread_Resource_count_increment(executing);
success = 1;
} else if (wowner == executing) {
BSD_ASSERT(lock->lo_flags & LO_RECURSABLE);
++rw->nest_level;
success = 1;
} else {
success = 0;
}
} else {
success = 0;
}
rtems_bsd_rwlock_release(rw, isr_level, &context);
return (success);
}
static inline void
rtems_bsd_rwlock_wunlock(rtems_bsd_rwlock *rw)
{
ISR_Level isr_level;
rtems_bsd_rwlock_context context;
Thread_Control *wowner;
int nest_level;
rtems_bsd_rwlock_context_init(&context);
rtems_bsd_rwlock_isr_disable(isr_level, &context);
rtems_bsd_rwlock_acquire_critical(rw, &context);
nest_level = rw->nest_level;
wowner = rtems_bsd_rwlock_wowner(rw);
BSD_ASSERT(wowner == _Thread_Executing);
if (__predict_true(nest_level == 0)) {
rtems_bsd_rwlock_set_wowner(rw, NULL);
_Thread_Resource_count_decrement(wowner);
if (__predict_true(
_Thread_queue_Is_empty(&rw->writer_queue.Queue) &&
_Thread_queue_Is_empty(&rw->reader_queue.Queue))) {
rtems_bsd_rwlock_release(rw, isr_level, &context);
} else {
rtems_bsd_rwlock_set_isr_level(&context,
isr_level);
rtems_bsd_rwlock_wunlock_more(rw, wowner,
&context);
}
} else {
rw->nest_level = nest_level - 1;
rtems_bsd_rwlock_release(rw, isr_level, &context);
}
}
static inline int
rtems_bsd_rwlock_wowned(const rtems_bsd_rwlock *rw)
{
return (rtems_bsd_rwlock_wowner(rw) == _Thread_Get_executing());
}
static inline int
rtems_bsd_rwlock_recursed(const rtems_bsd_rwlock *rw)
{
return (rw->nest_level != 0);
}
static inline void
rtems_bsd_rwlock_rlock(struct lock_object *lock, rtems_bsd_rwlock *rw)
{
ISR_Level isr_level;
rtems_bsd_rwlock_context context;
rtems_bsd_rwlock_context_init(&context);
rtems_bsd_rwlock_isr_disable(isr_level, &context);
rtems_bsd_rwlock_acquire_critical(rw, &context);
if (__predict_true(rtems_bsd_rwlock_wowner(rw) == NULL &&
_Thread_queue_Is_empty(&rw->writer_queue.Queue))) {
++rw->readers;
_Thread_Resource_count_increment(_Thread_Executing);
rtems_bsd_rwlock_release(rw, isr_level, &context);
} else {
rtems_bsd_rwlock_set_isr_level(&context, isr_level);
rtems_bsd_rwlock_rlock_more(rw, &context);
}
}
static inline int
rtems_bsd_rwlock_try_rlock(struct lock_object *lock, rtems_bsd_rwlock *rw)
{
int success;
ISR_Level isr_level;
rtems_bsd_rwlock_context context;
rtems_bsd_rwlock_context_init(&context);
rtems_bsd_rwlock_isr_disable(isr_level, &context);
rtems_bsd_rwlock_acquire_critical(rw, &context);
if (__predict_true(rtems_bsd_rwlock_wowner(rw) == NULL &&
_Thread_queue_Is_empty(&rw->writer_queue.Queue))) {
++rw->readers;
_Thread_Resource_count_increment(_Thread_Executing);
success = 1;
} else {
success = 0;
}
rtems_bsd_rwlock_release(rw, isr_level, &context);
return (success);
}
static inline void
rtems_bsd_rwlock_runlock(rtems_bsd_rwlock *rw)
{
ISR_Level isr_level;
rtems_bsd_rwlock_context context;
int readers;
rtems_bsd_rwlock_context_init(&context);
rtems_bsd_rwlock_isr_disable(isr_level, &context);
rtems_bsd_rwlock_acquire_critical(rw, &context);
readers = rw->readers;
_Thread_Resource_count_decrement(_Thread_Executing);
if (__predict_true(readers == 1)) {
rw->readers = 0;
if (__predict_true(
_Thread_queue_Is_empty(&rw->writer_queue.Queue) &&
_Thread_queue_Is_empty(&rw->reader_queue.Queue))) {
rtems_bsd_rwlock_release(rw, isr_level,
&context);
} else {
rtems_bsd_rwlock_set_isr_level(&context,
isr_level);
rtems_bsd_rwlock_runlock_more(rw, &context);
}
} else {
rw->readers = readers - 1;
rtems_bsd_rwlock_release(rw, isr_level, &context);
}
}
static inline int
rtems_bsd_rwlock_try_upgrade(rtems_bsd_rwlock *rw)
{
int success;
ISR_Level isr_level;
rtems_bsd_rwlock_context context;
Thread_Control *executing;
Thread_Control *wowner;
rtems_bsd_rwlock_context_init(&context);
rtems_bsd_rwlock_isr_disable(isr_level, &context);
executing = _Thread_Executing;
rtems_bsd_rwlock_acquire_critical(rw, &context);
wowner = rtems_bsd_rwlock_wowner(rw);
BSD_ASSERT(wowner == NULL);
if (rw->readers == 1) {
rw->readers = 0;
rtems_bsd_rwlock_set_wowner(rw, executing);
/* FIXME: priority inheritance */
success = 1;
} else {
success = 0;
}
rtems_bsd_rwlock_release(rw, isr_level, &context);
return (success);
}
static inline void
rtems_bsd_rwlock_downgrade(rtems_bsd_rwlock *rw)
{
ISR_Level isr_level;
rtems_bsd_rwlock_context context;
Thread_Control *wowner;
rtems_bsd_rwlock_context_init(&context);
rtems_bsd_rwlock_isr_disable(isr_level, &context);
rtems_bsd_rwlock_acquire_critical(rw, &context);
wowner = rtems_bsd_rwlock_wowner(rw);
BSD_ASSERT(wowner == _Thread_Executing);
BSD_ASSERT(rw->nest_level == 0);
rtems_bsd_rwlock_set_wowner(rw, NULL);
rw->readers = 1;
if (__predict_true(_Thread_queue_Is_empty(&rw->reader_queue.Queue))) {
rtems_bsd_rwlock_release(rw, isr_level, &context);
} else {
rtems_bsd_rwlock_set_isr_level(&context, isr_level);
rtems_bsd_rwlock_ready_waiting_readers(rw, &context);
}
}
static inline const char *
rtems_bsd_rwlock_name(const rtems_bsd_rwlock *rw)
{
return (rw->writer_queue.Queue.name);
}
static inline void
rtems_bsd_rwlock_destroy(struct lock_object *lock, rtems_bsd_rwlock *rw)
{
BSD_ASSERT(_Thread_queue_Is_empty(&rw->writer_queue.Queue));
BSD_ASSERT(_Thread_queue_Is_empty(&rw->reader_queue.Queue));
if (rtems_bsd_rwlock_wowned(rw)) {
rw->nest_level = 0;
rtems_bsd_rwlock_wunlock(rw);
}
_Thread_queue_Destroy(&rw->writer_queue);
_Thread_queue_Destroy(&rw->reader_queue);
lock_destroy(lock);
}
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* _RTEMS_BSD_MACHINE_RTEMS_BSD_RWLOCKIMPL_H_ */

View File

@ -15,7 +15,7 @@
* USA
* <kevin.kirspel@optimedical.com>
*
* Copyright (c) 2013-2015 embedded brains GmbH. All rights reserved.
* Copyright (c) 2013, 2017 embedded brains GmbH. All rights reserved.
*
* embedded brains GmbH
* Dornierstr. 4
@ -23,6 +23,9 @@
* Germany
* <rtems@embedded-brains.de>
*
* Copyright (c) 2006 John Baldwin <jhb@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
@ -46,7 +49,7 @@
*/
#include <machine/rtems-bsd-kernel-space.h>
#include <machine/rtems-bsd-muteximpl.h>
#include <machine/rtems-bsd-rwlockimpl.h>
#include <sys/param.h>
#include <sys/types.h>
@ -70,9 +73,9 @@ struct lock_class lock_class_rw = {
.lc_unlock = unlock_rw,
};
#define rw_wowner(rw) rtems_bsd_mutex_owner(&(rw)->mutex)
#define rw_wowner(rw) rtems_bsd_rwlock_wowner(&(rw)->rwlock)
#define rw_recursed(rw) rtems_bsd_mutex_recursed(&(rw)->mutex)
#define rw_recursed(rw) rtems_bsd_rwlock_recursed(&(rw)->rwlock)
void
assert_rw(const struct lock_object *lock, int what)
@ -84,17 +87,30 @@ assert_rw(const struct lock_object *lock, int what)
void
lock_rw(struct lock_object *lock, uintptr_t how)
{
struct rwlock *rw;
rw_wlock((struct rwlock *)lock);
rw = (struct rwlock *)lock;
if (how)
rw_rlock(rw);
else
rw_wlock(rw);
}
uintptr_t
unlock_rw(struct lock_object *lock)
{
struct rwlock *rw;
rw_unlock((struct rwlock *)lock);
rw = (struct rwlock *)lock;
rw_assert(rw, RA_LOCKED | LA_NOTRECURSED);
if (rw->rwlock.readers > 0) {
rw_runlock(rw);
return (1);
} else {
rw_wunlock(rw);
return (0);
}
}
void
rw_init_flags(struct rwlock *rw, const char *name, int opts)
@ -105,7 +121,7 @@ rw_init_flags(struct rwlock *rw, const char *name, int opts)
if (opts & RW_RECURSE)
flags |= LO_RECURSABLE;
rtems_bsd_mutex_init(&rw->lock_object, &rw->mutex, &lock_class_rw,
rtems_bsd_rwlock_init(&rw->lock_object, &rw->rwlock, &lock_class_rw,
name, NULL, flags);
}
@ -113,7 +129,7 @@ void
rw_destroy(struct rwlock *rw)
{
rtems_bsd_mutex_destroy(&rw->lock_object, &rw->mutex);
rtems_bsd_rwlock_destroy(&rw->lock_object, &rw->rwlock);
}
void
@ -121,7 +137,7 @@ rw_sysinit(void *arg)
{
struct rw_args *args = arg;
rw_init(args->ra_rw, args->ra_desc);
rw_init((struct rwlock *)args->ra_rw, args->ra_desc);
}
void
@ -129,61 +145,71 @@ rw_sysinit_flags(void *arg)
{
struct rw_args_flags *args = arg;
rw_init_flags(args->ra_rw, args->ra_desc, args->ra_flags);
rw_init_flags((struct rwlock *)args->ra_rw, args->ra_desc,
args->ra_flags);
}
int
rw_wowned(struct rwlock *rw)
{
return (rtems_bsd_mutex_owned(&rw->mutex));
return (rtems_bsd_rwlock_wowned(&rw->rwlock));
}
void
_rw_wlock(struct rwlock *rw, const char *file, int line)
{
rtems_bsd_mutex_lock(&rw->lock_object, &rw->mutex);
rtems_bsd_rwlock_wlock(&rw->lock_object, &rw->rwlock);
}
int
_rw_try_wlock(struct rwlock *rw, const char *file, int line)
{
return (rtems_bsd_mutex_trylock(&rw->lock_object, &rw->mutex));
return (rtems_bsd_rwlock_try_wlock(&rw->lock_object, &rw->rwlock));
}
void
_rw_wunlock(struct rwlock *rw, const char *file, int line)
{
rtems_bsd_mutex_unlock(&rw->mutex);
rtems_bsd_rwlock_wunlock(&rw->rwlock);
}
void
_rw_rlock(struct rwlock *rw, const char *file, int line)
{
rtems_bsd_mutex_lock(&rw->lock_object, &rw->mutex);
rtems_bsd_rwlock_rlock(&rw->lock_object, &rw->rwlock);
}
int
_rw_try_rlock(struct rwlock *rw, const char *file, int line)
{
return (rtems_bsd_mutex_trylock(&rw->lock_object, &rw->mutex));
return (rtems_bsd_rwlock_try_rlock(&rw->lock_object, &rw->rwlock));
}
void
_rw_runlock(struct rwlock *rw, const char *file, int line)
{
rtems_bsd_mutex_unlock(&rw->mutex);
rtems_bsd_rwlock_runlock(&rw->rwlock);
}
int
_rw_try_upgrade(struct rwlock *rw, const char *file, int line)
{
return (1);
return (rtems_bsd_rwlock_try_upgrade(&rw->rwlock));
}
void
_rw_downgrade(struct rwlock *rw, const char *file, int line)
{
/* Nothing to do */
rtems_bsd_rwlock_downgrade(&rw->rwlock);
}
#ifdef INVARIANT_SUPPORT
@ -195,7 +221,7 @@ _rw_downgrade(struct rwlock *rw, const char *file, int line)
void
_rw_assert(const struct rwlock *rw, int what, const char *file, int line)
{
const char *name = rtems_bsd_mutex_name(&rw->mutex);
const char *name = rtems_bsd_rwlock_name(&rw->rwlock);
switch (what) {
case RA_LOCKED:
@ -204,6 +230,33 @@ _rw_assert(const struct rwlock *rw, int what, const char *file, int line)
case RA_RLOCKED:
case RA_RLOCKED | RA_RECURSED:
case RA_RLOCKED | RA_NOTRECURSED:
#ifdef WITNESS
witness_assert(&rw->lock_object, what, file, line);
#else
/*
* If some other thread has a write lock or we have one
* and are asserting a read lock, fail. Also, if no one
* has a lock at all, fail.
*/
if ((rw->rwlock.readers == 0 && rw_wowner(rw) == NULL) ||
(rw->rwlock.readers == 0 && (what & RA_RLOCKED ||
rw_wowner(rw) != _Thread_Get_executing())))
panic("Lock %s not %slocked @ %s:%d\n",
name, (what & RA_RLOCKED) ?
"read " : "", file, line);
if (rw->rwlock.readers == 0 && !(what & RA_RLOCKED)) {
if (rw_recursed(rw)) {
if (what & RA_NOTRECURSED)
panic("Lock %s recursed @ %s:%d\n",
name, file,
line);
} else if (what & RA_RECURSED)
panic("Lock %s not recursed @ %s:%d\n",
name, file, line);
}
#endif
break;
case RA_WLOCKED:
case RA_WLOCKED | RA_RECURSED:
case RA_WLOCKED | RA_NOTRECURSED:
@ -212,11 +265,11 @@ _rw_assert(const struct rwlock *rw, int what, const char *file, int line)
name, file, line);
if (rw_recursed(rw)) {
if (what & RA_NOTRECURSED)
panic("Lock %s recursed @ %s:%d\n", name, file,
line);
panic("Lock %s recursed @ %s:%d\n",
name, file, line);
} else if (what & RA_RECURSED)
panic("Lock %s not recursed @ %s:%d\n", name, file,
line);
panic("Lock %s not recursed @ %s:%d\n",
name, file, line);
break;
case RA_UNLOCKED:
#ifdef WITNESS
@ -227,8 +280,8 @@ _rw_assert(const struct rwlock *rw, int what, const char *file, int line)
* to see if we hold a read lock or not.
*/
if (rw_wowner(rw) == _Thread_Get_executing())
panic("Lock %s exclusively locked @ %s:%d\n", name,
file, line);
panic("Lock %s exclusively locked @ %s:%d\n",
name, file, line);
#endif
break;
default:

View File

@ -0,0 +1,177 @@
/**
* @file
*
* @ingroup rtems_bsd_rtems
*
* @brief TODO.
*/
/*
* Copyright (c) 2017 embedded brains GmbH. All rights reserved.
*
* embedded brains GmbH
* Dornierstr. 4
* 82178 Puchheim
* Germany
* <rtems@embedded-brains.de>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <machine/rtems-bsd-kernel-space.h>
#include <machine/rtems-bsd-rwlockimpl.h>
#include <rtems/score/schedulerimpl.h>
void
rtems_bsd_rwlock_wlock_more(const struct lock_object *lock,
rtems_bsd_rwlock *rw, Thread_Control *executing,
rtems_bsd_rwlock_context *context)
{
Thread_Control *wowner;
wowner = rtems_bsd_rwlock_wowner(rw);
if (wowner == executing) {
BSD_ASSERT(lock->lo_flags & LO_RECURSABLE);
++rw->nest_level;
_Thread_queue_Release(&rw->writer_queue, &context->writer);
} else {
_Thread_queue_Context_set_thread_state(&context->writer,
STATES_WAITING_FOR_RWLOCK);
_Thread_queue_Context_set_enqueue_do_nothing_extra(
&context->writer);
_Thread_queue_Context_set_deadlock_callout(&context->writer,
_Thread_queue_Deadlock_fatal);
_Thread_queue_Enqueue(&rw->writer_queue.Queue,
&_Thread_queue_Operations_priority, executing,
&context->writer);
}
}
static Thread_Control *
rtems_bsd_rwlock_flush_reader_filter(Thread_Control *reader,
Thread_queue_Queue *queue, Thread_queue_Context *queue_context)
{
rtems_bsd_rwlock *rw;
rw = RTEMS_CONTAINER_OF(queue, rtems_bsd_rwlock, reader_queue.Queue);
++rw->readers;
_Thread_Resource_count_increment(reader);
return (reader);
}
static void
rtems_bsd_rwlock_flush_reader_post_release(Thread_queue_Queue *queue,
Thread_queue_Context *queue_context)
{
rtems_bsd_rwlock *rw;
rtems_bsd_rwlock_context *context;
rw = RTEMS_CONTAINER_OF(queue, rtems_bsd_rwlock, reader_queue.Queue);
context = RTEMS_CONTAINER_OF(queue_context, rtems_bsd_rwlock_context,
reader);
_Thread_queue_Release(&rw->writer_queue, &context->writer);
}
void
rtems_bsd_rwlock_wunlock_more(rtems_bsd_rwlock *rw, Thread_Control *wowner,
rtems_bsd_rwlock_context *context)
{
if (!_Thread_queue_Is_empty(&rw->reader_queue.Queue)) {
BSD_ASSERT(rw->readers == 0);
rtems_bsd_rwlock_ready_waiting_readers(rw, context);
} else {
BSD_ASSERT(!_Thread_queue_Is_empty(&rw->writer_queue.Queue));
_Thread_queue_Surrender(&rw->writer_queue.Queue,
rw->writer_queue.Queue.heads, wowner, &context->writer,
&_Thread_queue_Operations_priority);
}
}
static void
rtems_bsd_rwlock_reader_enqueue(Thread_queue_Queue *queue,
Thread_Control *executing, Per_CPU_Control *cpu_self,
Thread_queue_Context *queue_context
)
{
rtems_bsd_rwlock *rw;
rtems_bsd_rwlock_context *context;
rw = RTEMS_CONTAINER_OF(queue, rtems_bsd_rwlock, reader_queue.Queue);
context = RTEMS_CONTAINER_OF(queue_context, rtems_bsd_rwlock_context,
reader);
_Thread_queue_Release(&rw->writer_queue, &context->writer);
}
void
rtems_bsd_rwlock_rlock_more(rtems_bsd_rwlock *rw,
rtems_bsd_rwlock_context *context)
{
Thread_Control *executing;
executing = _Thread_Executing;
_Thread_queue_Context_initialize(&context->reader);
_Thread_queue_Context_set_thread_state(&context->reader,
STATES_WAITING_FOR_RWLOCK);
_Thread_queue_Context_set_enqueue_callout(
&context->reader, rtems_bsd_rwlock_reader_enqueue);
_Thread_queue_Context_set_deadlock_callout(&context->reader,
_Thread_queue_Deadlock_fatal);
_Thread_queue_Acquire(&rw->reader_queue, &context->reader);
_Thread_queue_Enqueue(&rw->reader_queue.Queue,
&_Thread_queue_Operations_FIFO, executing, &context->reader);
}
void
rtems_bsd_rwlock_runlock_more(rtems_bsd_rwlock *rw,
rtems_bsd_rwlock_context *context)
{
if (!_Thread_queue_Is_empty(&rw->writer_queue.Queue)) {
BSD_ASSERT(rw->readers == 0);
_Thread_queue_Surrender(&rw->writer_queue.Queue,
rw->writer_queue.Queue.heads, NULL, &context->writer,
&_Thread_queue_Operations_priority);
} else {
BSD_ASSERT(!_Thread_queue_Is_empty(&rw->reader_queue.Queue));
rtems_bsd_rwlock_ready_waiting_readers(rw, context);
}
}
void
rtems_bsd_rwlock_ready_waiting_readers(rtems_bsd_rwlock *rw,
rtems_bsd_rwlock_context *context)
{
_Thread_queue_Context_initialize(&context->reader);
_Thread_queue_Acquire(&rw->reader_queue, &context->reader);
_Thread_queue_Flush_critical(&rw->reader_queue.Queue,
&_Thread_queue_Operations_FIFO,
rtems_bsd_rwlock_flush_reader_filter,
rtems_bsd_rwlock_flush_reader_post_release,
&context->reader);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013 embedded brains GmbH. All rights reserved.
* Copyright (c) 2013, 2017 embedded brains GmbH. All rights reserved.
*
* embedded brains GmbH
* Dornierstr. 4
@ -67,8 +67,11 @@ typedef struct {
struct rwlock rw;
bool done;
int rv;
bool done2;
int rv2;
int timo;
rtems_id worker_task;
rtems_id worker2_task;
} test_context;
static test_context test_instance;
@ -83,9 +86,8 @@ set_self_prio(rtems_task_priority prio)
}
static void
worker_task(rtems_task_argument arg)
worker(test_context *ctx, int *rv, bool *done)
{
test_context *ctx = (test_context *) arg;
struct rwlock *rw = &ctx->rw;
while (true) {
@ -102,36 +104,52 @@ worker_task(rtems_task_argument arg)
if ((events & EVENT_RLOCK) != 0) {
rw_rlock(rw);
ctx->done = true;
*done = true;
}
if ((events & EVENT_WLOCK) != 0) {
rw_wlock(rw);
ctx->done = true;
*done = true;
}
if ((events & EVENT_TRY_RLOCK) != 0) {
ctx->rv = rw_try_rlock(rw);
ctx->done = true;
*rv = rw_try_rlock(rw);
*done = true;
}
if ((events & EVENT_TRY_WLOCK) != 0) {
ctx->rv = rw_try_wlock(rw);
ctx->done = true;
*rv = rw_try_wlock(rw);
*done = true;
}
if ((events & EVENT_UNLOCK) != 0) {
rw_unlock(rw);
ctx->done = true;
*done = true;
}
if ((events & EVENT_SLEEP) != 0) {
ctx->rv = rw_sleep(ctx, rw, 0, "worker", ctx->timo);
ctx->done = true;
*rv = rw_sleep(ctx, rw, 0, "worker", ctx->timo);
*done = true;
}
}
}
static void
worker_task(rtems_task_argument arg)
{
test_context *ctx = (test_context *) arg;
worker(ctx, &ctx->rv, &ctx->done);
}
static void
worker2_task(rtems_task_argument arg)
{
test_context *ctx = (test_context *) arg;
worker(ctx, &ctx->rv2, &ctx->done2);
}
static void
send_events(test_context *ctx, rtems_event_set events)
{
@ -141,13 +159,22 @@ send_events(test_context *ctx, rtems_event_set events)
assert(sc == RTEMS_SUCCESSFUL);
}
static void
send_events2(test_context *ctx, rtems_event_set events)
{
rtems_status_code sc;
sc = rtems_event_send(ctx->worker2_task, events);
assert(sc == RTEMS_SUCCESSFUL);
}
static void
start_worker(test_context *ctx)
{
rtems_status_code sc;
sc = rtems_task_create(
rtems_build_name('W', 'O', 'R', 'K'),
rtems_build_name('W', 'R', 'K', '1'),
PRIO_WORKER,
RTEMS_MINIMUM_STACK_SIZE,
RTEMS_DEFAULT_MODES,
@ -162,6 +189,23 @@ start_worker(test_context *ctx)
(rtems_task_argument) ctx
);
assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_create(
rtems_build_name('W', 'R', 'K', '2'),
PRIO_WORKER,
RTEMS_MINIMUM_STACK_SIZE,
RTEMS_DEFAULT_MODES,
RTEMS_FLOATING_POINT,
&ctx->worker2_task
);
assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_start(
ctx->worker2_task,
worker2_task,
(rtems_task_argument) ctx
);
assert(sc == RTEMS_SUCCESSFUL);
}
static void
@ -171,6 +215,9 @@ delete_worker(test_context *ctx)
sc = rtems_task_delete(ctx->worker_task);
assert(sc == RTEMS_SUCCESSFUL);
sc = rtems_task_delete(ctx->worker2_task);
assert(sc == RTEMS_SUCCESSFUL);
}
static void
@ -186,13 +233,17 @@ test_rw_non_recursive(test_context *ctx)
assert(rw_initialized(rw));
rw_rlock(rw);
/* FIXME: We use a mutex implementation */
assert(rw_wowned(rw));
assert(!rw_wowned(rw));
rw_runlock(rw);
rw_rlock(rw);
rw_unlock(rw);
rw_rlock(rw);
rw_rlock(rw);
rw_runlock(rw);
rw_runlock(rw);
rw_rlock(rw);
ok = rw_try_upgrade(rw);
assert(ok != 0);
@ -210,8 +261,7 @@ test_rw_non_recursive(test_context *ctx)
assert(ok != 0);
assert(rw_wowned(rw));
rw_downgrade(rw);
/* FIXME: We use a mutex implementation */
assert(rw_wowned(rw));
assert(!rw_wowned(rw));
rw_runlock(rw);
rw_rlock(rw);
@ -219,8 +269,7 @@ test_rw_non_recursive(test_context *ctx)
assert(ok != 0);
assert(rw_wowned(rw));
rw_downgrade(rw);
/* FIXME: We use a mutex implementation */
assert(rw_wowned(rw));
assert(!rw_wowned(rw));
rw_unlock(rw);
rw_wlock(rw);
@ -252,11 +301,6 @@ test_rw_recursive(test_context *ctx)
rw_init_flags(rw, "test", RW_RECURSE);
assert(rw_initialized(rw));
rw_rlock(rw);
rw_rlock(rw);
rw_runlock(rw);
rw_runlock(rw);
rw_wlock(rw);
rw_wlock(rw);
rw_wunlock(rw);
@ -275,13 +319,15 @@ test_rw_try_rlock(test_context *ctx)
rw_init(rw, "test");
rw_rlock(rw);
/* FIXME: We use a mutex implementation */
ctx->done = false;
ctx->rv = 1;
ctx->rv = 0;
send_events(ctx, EVENT_TRY_RLOCK);
assert(ctx->done);
assert(ctx->rv == 0);
assert(ctx->rv == 1);
rw_unlock(rw);
ctx->done = false;
send_events(ctx, EVENT_UNLOCK);
assert(ctx->done);
rw_wlock(rw);
ctx->done = false;
@ -332,10 +378,9 @@ test_rw_rlock(test_context *ctx)
rw_init(rw, "test");
rw_rlock(rw);
/* FIXME: We use a mutex implementation */
ctx->done = false;
send_events(ctx, EVENT_RLOCK);
assert(!ctx->done);
assert(ctx->done);
rw_unlock(rw);
assert(ctx->done);
ctx->done = false;
@ -387,6 +432,202 @@ test_rw_wlock(test_context *ctx)
rw_destroy(rw);
}
static void
test_rw_rlock_phase_fair(test_context *ctx)
{
struct rwlock *rw = &ctx->rw;
puts("test rw rlock phase fair");
rw_init(rw, "test");
rw_rlock(rw);
ctx->done = false;
send_events(ctx, EVENT_WLOCK);
assert(!ctx->done);
ctx->done2 = false;
send_events2(ctx, EVENT_RLOCK);
assert(!ctx->done2);
rw_unlock(rw);
assert(ctx->done);
assert(!ctx->done2);
ctx->done = false;
send_events(ctx, EVENT_UNLOCK);
assert(ctx->done);
assert(ctx->done2);
ctx->done2 = false;
send_events2(ctx, EVENT_UNLOCK);
assert(ctx->done2);
rw_destroy(rw);
}
static void
test_rw_wlock_phase_fair(test_context *ctx)
{
struct rwlock *rw = &ctx->rw;
puts("test rw wlock phase fair");
rw_init(rw, "test");
rw_wlock(rw);
ctx->done = false;
send_events(ctx, EVENT_WLOCK);
assert(!ctx->done);
ctx->done2 = false;
send_events2(ctx, EVENT_RLOCK);
assert(!ctx->done2);
rw_unlock(rw);
assert(!ctx->done);
assert(ctx->done2);
ctx->done2 = false;
send_events2(ctx, EVENT_UNLOCK);
assert(ctx->done2);
assert(ctx->done);
ctx->done = false;
send_events(ctx, EVENT_UNLOCK);
assert(ctx->done);
rw_destroy(rw);
}
static void
test_rw_try_upgrade(test_context *ctx)
{
struct rwlock *rw = &ctx->rw;
int ok;
puts("test rw try upgrade");
rw_init(rw, "test");
rw_rlock(rw);
ctx->done = false;
send_events(ctx, EVENT_WLOCK);
assert(!ctx->done);
assert(!rw_wowned(rw));
ok = rw_try_upgrade(rw);
assert(ok != 0);
assert(rw_wowned(rw));
assert(!ctx->done);
rw_unlock(rw);
assert(!rw_wowned(rw));
assert(ctx->done);
ctx->done = false;
send_events(ctx, EVENT_UNLOCK);
assert(ctx->done);
rw_rlock(rw);
ctx->done = false;
send_events(ctx, EVENT_WLOCK);
assert(!ctx->done);
ctx->done2 = false;
send_events2(ctx, EVENT_RLOCK);
assert(!ctx->done2);
assert(!rw_wowned(rw));
ok = rw_try_upgrade(rw);
assert(ok != 0);
assert(rw_wowned(rw));
assert(!ctx->done);
assert(!ctx->done2);
rw_unlock(rw);
assert(!rw_wowned(rw));
assert(!ctx->done);
assert(ctx->done2);
ctx->done2 = false;
send_events2(ctx, EVENT_UNLOCK);
assert(ctx->done2);
assert(ctx->done);
ctx->done = false;
send_events(ctx, EVENT_UNLOCK);
assert(ctx->done);
rw_rlock(rw);
ctx->done = false;
send_events(ctx, EVENT_RLOCK);
assert(ctx->done);
assert(!rw_wowned(rw));
ok = rw_try_upgrade(rw);
assert(ok == 0);
assert(!rw_wowned(rw));
ctx->done = false;
send_events(ctx, EVENT_UNLOCK);
assert(ctx->done);
assert(!rw_wowned(rw));
ok = rw_try_upgrade(rw);
assert(ok != 0);
assert(rw_wowned(rw));
rw_unlock(rw);
assert(!rw_wowned(rw));
rw_destroy(rw);
}
static void
test_rw_downgrade(test_context *ctx)
{
struct rwlock *rw = &ctx->rw;
puts("test rw downgrade");
rw_init(rw, "test");
rw_wlock(rw);
assert(rw_wowned(rw));
rw_downgrade(rw);
assert(!rw_wowned(rw));
rw_unlock(rw);
assert(!rw_wowned(rw));
rw_wlock(rw);
assert(rw_wowned(rw));
ctx->done = false;
send_events(ctx, EVENT_RLOCK);
assert(!ctx->done);
rw_downgrade(rw);
assert(!rw_wowned(rw));
assert(ctx->done);
rw_unlock(rw);
assert(!rw_wowned(rw));
ctx->done = false;
send_events(ctx, EVENT_UNLOCK);
assert(ctx->done);
rw_destroy(rw);
}
static void
test_rw_sleep_with_rlock(test_context *ctx)
{
@ -407,11 +648,8 @@ test_rw_sleep_with_rlock(test_context *ctx)
rw_rlock(rw);
wakeup(ctx);
/* FIXME: We use a mutex implementation */
assert(!ctx->done);
rw_unlock(rw);
/* FIXME: We use a mutex implementation */
assert(ctx->done);
rw_unlock(rw);
ctx->done = false;
send_events(ctx, EVENT_UNLOCK);
@ -511,6 +749,10 @@ test_main(void)
test_rw_try_wlock(ctx);
test_rw_rlock(ctx);
test_rw_wlock(ctx);
test_rw_rlock_phase_fair(ctx);
test_rw_wlock_phase_fair(ctx);
test_rw_try_upgrade(ctx);
test_rw_downgrade(ctx);
assert(rtems_resource_snapshot_check(&snapshot_1));