rtems-bsd-mutex: Use standard thread queues

This commit is contained in:
Sebastian Huber 2015-04-30 10:41:34 +02:00
parent 165dd8ea12
commit b3ff71e003
3 changed files with 33 additions and 92 deletions

View File

@ -40,19 +40,17 @@
#ifndef _RTEMS_BSD_MACHINE_RTEMS_BSD_MUTEX_H_
#define _RTEMS_BSD_MACHINE_RTEMS_BSD_MUTEX_H_
#include <rtems/score/isrlock.h>
#include <rtems/score/rbtree.h>
#include <rtems/score/thread.h>
#include <rtems/score/threadq.h>
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
typedef struct {
ISR_LOCK_MEMBER(lock)
Thread_queue_Control queue;
Thread_Control *owner;
int nest_level;
RBTree_Control rivals;
} rtems_bsd_mutex;
#ifdef __cplusplus

View File

@ -57,10 +57,9 @@ static inline void
rtems_bsd_mutex_init(struct lock_object *lock, rtems_bsd_mutex *m,
struct lock_class *class, const char *name, const char *type, int flags)
{
_ISR_lock_Initialize(&m->lock, name);
_Thread_queue_Initialize(&m->queue, THREAD_QUEUE_DISCIPLINE_PRIORITY);
m->owner = NULL;
m->nest_level = 0;
_RBTree_Initialize_empty(&m->rivals);
lock_init(lock, class, name, type, flags);
}
@ -76,16 +75,16 @@ rtems_bsd_mutex_lock(struct lock_object *lock, rtems_bsd_mutex *m)
Thread_Control *executing;
Thread_Control *owner;
_ISR_lock_ISR_disable_and_acquire(&m->lock, &lock_context);
_Thread_queue_Acquire(&m->queue, &lock_context);
owner = m->owner;
executing = _Thread_Executing;
++executing->resource_count;
if (__predict_true(owner == NULL)) {
m->owner = executing;
++executing->resource_count;
_ISR_lock_Release_and_ISR_enable(&m->lock, &lock_context);
_Thread_queue_Release(&m->queue, &lock_context);
} else {
rtems_bsd_mutex_lock_more(lock, m, owner, executing,
&lock_context);
@ -100,7 +99,7 @@ rtems_bsd_mutex_trylock(struct lock_object *lock, rtems_bsd_mutex *m)
Thread_Control *executing;
Thread_Control *owner;
_ISR_lock_ISR_disable_and_acquire(&m->lock, &lock_context);
_Thread_queue_Acquire(&m->queue, &lock_context);
owner = m->owner;
executing = _Thread_Executing;
@ -117,7 +116,7 @@ rtems_bsd_mutex_trylock(struct lock_object *lock, rtems_bsd_mutex *m)
success = 0;
}
_ISR_lock_Release_and_ISR_enable(&m->lock, &lock_context);
_Thread_queue_Release(&m->queue, &lock_context);
return (success);
}
@ -132,7 +131,7 @@ rtems_bsd_mutex_unlock(rtems_bsd_mutex *m)
Thread_Control *owner;
int nest_level;
_ISR_lock_ISR_disable_and_acquire(&m->lock, &lock_context);
_Thread_queue_Acquire(&m->queue, &lock_context);
nest_level = m->nest_level;
owner = m->owner;
@ -140,17 +139,26 @@ rtems_bsd_mutex_unlock(rtems_bsd_mutex *m)
BSD_ASSERT(owner == _Thread_Executing);
if (__predict_true(nest_level == 0)) {
RBTree_Node *first = _RBTree_First(&m->rivals, RBT_LEFT);
RBTree_Node *first;
int keep_priority;
--owner->resource_count;
/*
* Ensure that the owner resource count is visible to all other
* processors and that we read the latest priority restore
* hint.
*/
_Atomic_Fence( ATOMIC_ORDER_ACQ_REL );
first = _RBTree_First(&m->queue.Queues.Priority, RBT_LEFT);
keep_priority = _Thread_Owns_resources(owner)
|| owner->real_priority == owner->current_priority;
|| !owner->priority_restore_hint;
m->owner = NULL;
if (__predict_true(first == NULL && keep_priority)) {
_ISR_lock_Release_and_ISR_enable(&m->lock, &lock_context);
_Thread_queue_Release(&m->queue, &lock_context);
} else {
rtems_bsd_mutex_unlock_more(m, owner, keep_priority,
first, &lock_context);
@ -159,7 +167,7 @@ rtems_bsd_mutex_unlock(rtems_bsd_mutex *m)
} else {
m->nest_level = nest_level - 1;
_ISR_lock_Release_and_ISR_enable(&m->lock, &lock_context);
_Thread_queue_Release(&m->queue, &lock_context);
}
}
@ -180,14 +188,14 @@ rtems_bsd_mutex_recursed(rtems_bsd_mutex *m)
static inline void
rtems_bsd_mutex_destroy(struct lock_object *lock, rtems_bsd_mutex *m)
{
BSD_ASSERT(_RBTree_Is_empty(&m->rivals));
BSD_ASSERT(_RBTree_Is_empty(&m->queue.Queues.Priority));
if (rtems_bsd_mutex_owned(m)) {
m->nest_level = 0;
rtems_bsd_mutex_unlock(m);
}
_ISR_lock_Destroy(&m->lock);
_Thread_queue_Dequeue(&m->queue);
lock_destroy(lock);
}

View File

@ -41,29 +41,8 @@
#include <machine/rtems-bsd-muteximpl.h>
#include <rtems/score/schedulerimpl.h>
#include <rtems/score/threaddispatch.h>
#include <rtems/score/threadqimpl.h>
#define INTEND_TO_BLOCK \
(THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_INTEND_TO_BLOCK)
#define BLOCKED \
(THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_BLOCKED)
#define INTERRUPT_SATISFIED \
(THREAD_WAIT_CLASS_OBJECT | THREAD_WAIT_STATE_INTERRUPT_SATISFIED)
static void
rtems_bsd_mutex_priority_change(Thread_Control *thread,
Priority_Control new_priority, void *context)
{
rtems_bsd_mutex *m = context;
_RBTree_Extract(&m->rivals, &thread->RBNode);
_RBTree_Insert(&m->rivals, &thread->RBNode,
_Thread_queue_Compare_priority, false);
}
void
rtems_bsd_mutex_lock_more(struct lock_object *lock, rtems_bsd_mutex *m,
Thread_Control *owner, Thread_Control *executing,
@ -73,38 +52,14 @@ rtems_bsd_mutex_lock_more(struct lock_object *lock, rtems_bsd_mutex *m,
BSD_ASSERT(lock->lo_flags & LO_RECURSABLE);
++m->nest_level;
_ISR_lock_Release_and_ISR_enable(&m->lock, lock_context);
_Thread_queue_Release(&m->queue, lock_context);
} else {
Per_CPU_Control *cpu_self;
bool success;
_Thread_Lock_set(executing, &m->lock);
_Thread_Priority_set_change_handler(executing,
rtems_bsd_mutex_priority_change, m);
++executing->resource_count;
_RBTree_Insert(&m->rivals, &executing->RBNode,
_Thread_queue_Compare_priority, false);
cpu_self = _Thread_Dispatch_disable_critical();
/* Priority inheritance */
_Scheduler_Change_priority_if_higher(_Scheduler_Get(owner),
owner, executing->current_priority, false);
_Thread_Raise_priority(owner, executing->current_priority);
_Thread_Wait_flags_set(executing, INTEND_TO_BLOCK);
_ISR_lock_Release_and_ISR_enable(&m->lock, lock_context);
_Thread_Set_state(executing, STATES_WAITING_FOR_MUTEX);
success = _Thread_Wait_flags_try_change(executing,
INTEND_TO_BLOCK, BLOCKED);
if (!success) {
_Thread_Clear_state(executing,
STATES_WAITING_FOR_MUTEX);
}
_Thread_Dispatch_enable(cpu_self);
_Thread_queue_Enqueue_critical(&m->queue, executing,
STATES_WAITING_FOR_MUTEX, WATCHDOG_NO_TIMEOUT, 0,
lock_context);
}
}
@ -114,40 +69,20 @@ rtems_bsd_mutex_unlock_more(rtems_bsd_mutex *m, Thread_Control *owner,
{
if (first != NULL) {
Thread_Control *new_owner;
bool success;
new_owner = THREAD_RBTREE_NODE_TO_THREAD(first);
m->owner = new_owner;
_RBTree_Extract(&m->rivals, &new_owner->RBNode);
_Thread_Priority_restore_default_change_handler(new_owner);
_Thread_Lock_restore_default(new_owner);
success = _Thread_Wait_flags_try_change_critical(new_owner,
INTEND_TO_BLOCK, INTERRUPT_SATISFIED);
if (success) {
_ISR_lock_Release_and_ISR_enable(&m->lock,
_Thread_queue_Extract_critical(&m->queue, new_owner,
lock_context);
} else {
Per_CPU_Control *cpu_self;
cpu_self = _Thread_Dispatch_disable_critical();
_ISR_lock_Release_and_ISR_enable(&m->lock,
lock_context);
_Thread_Clear_state(new_owner,
STATES_WAITING_FOR_MUTEX);
_Thread_Dispatch_enable(cpu_self);
}
} else {
_ISR_lock_Release_and_ISR_enable(&m->lock, lock_context);
_Thread_queue_Release(&m->queue, lock_context);
}
if (!keep_priority) {
Per_CPU_Control *cpu_self;
cpu_self = _Thread_Dispatch_disable();
_Thread_Change_priority(owner, owner->real_priority, true);
_Thread_Restore_priority(owner);
_Thread_Dispatch_enable(cpu_self);
}
}