Add INVARIANTS support

This commit is contained in:
Sebastian Huber 2015-05-20 13:49:05 +02:00
parent 7d44707145
commit 595b333ad2
17 changed files with 378 additions and 6 deletions

View File

@ -103,6 +103,7 @@ LIB_C_FILES += freebsd/sys/libkern/fls.c
LIB_C_FILES += freebsd/sys/libkern/inet_ntoa.c LIB_C_FILES += freebsd/sys/libkern/inet_ntoa.c
LIB_C_FILES += freebsd/sys/libkern/random.c LIB_C_FILES += freebsd/sys/libkern/random.c
LIB_C_FILES += freebsd/sys/vm/uma_core.c LIB_C_FILES += freebsd/sys/vm/uma_core.c
LIB_C_FILES += freebsd/sys/vm/uma_dbg.c
LIB_C_FILES += freebsd/sys/cam/cam.c LIB_C_FILES += freebsd/sys/cam/cam.c
LIB_C_FILES += freebsd/sys/cam/scsi/scsi_all.c LIB_C_FILES += freebsd/sys/cam/scsi/scsi_all.c
LIB_C_FILES += freebsd/sys/crypto/sha1.c LIB_C_FILES += freebsd/sys/crypto/sha1.c

View File

@ -2275,7 +2275,9 @@ knote_fdclose(struct thread *td, int fd)
struct knote *kn; struct knote *kn;
int influx; int influx;
#ifndef __rtems__
FILEDESC_XLOCK_ASSERT(fdp); FILEDESC_XLOCK_ASSERT(fdp);
#endif /* __rtems__ */
/* /*
* We shouldn't have to worry about new kevents appearing on fd * We shouldn't have to worry about new kevents appearing on fd

View File

@ -947,7 +947,9 @@ intr_event_schedule_thread(struct intr_event *ie)
RANDOM_INTERRUPT); RANDOM_INTERRUPT);
} }
#ifndef __rtems__
KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name)); KASSERT(p != NULL, ("ithread %s has no process", ie->ie_name));
#endif /* __rtems__ */
/* /*
* Set it_need to tell the thread to keep running if it is already * Set it_need to tell the thread to keep running if it is already

View File

@ -179,7 +179,9 @@ _sleep(void *ident, struct lock_object *lock, int priority,
"Sleeping on \"%s\"", wmesg); "Sleeping on \"%s\"", wmesg);
KASSERT(timo != 0 || mtx_owned(&Giant) || lock != NULL, KASSERT(timo != 0 || mtx_owned(&Giant) || lock != NULL,
("sleeping without a lock")); ("sleeping without a lock"));
#ifndef __rtems__
KASSERT(p != NULL, ("msleep1")); KASSERT(p != NULL, ("msleep1"));
#endif /* __rtems__ */
KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep")); KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep"));
#ifndef __rtems__ #ifndef __rtems__
if (priority & PDROP) if (priority & PDROP)
@ -415,8 +417,10 @@ wakeup(void *ident)
wakeup_swapper = sleepq_broadcast(ident, SLEEPQ_SLEEP, 0, 0); wakeup_swapper = sleepq_broadcast(ident, SLEEPQ_SLEEP, 0, 0);
sleepq_release(ident); sleepq_release(ident);
if (wakeup_swapper) { if (wakeup_swapper) {
#ifndef __rtems__
KASSERT(ident != &proc0, KASSERT(ident != &proc0,
("wakeup and wakeup_swapper and proc0")); ("wakeup and wakeup_swapper and proc0"));
#endif /* __rtems__ */
kick_proc0(); kick_proc0();
} }
} }

View File

@ -1095,7 +1095,9 @@ again:
KASSERT(!cc_cme_migrating(cc), KASSERT(!cc_cme_migrating(cc),
("callout wrongly scheduled for migration")); ("callout wrongly scheduled for migration"));
CC_UNLOCK(cc); CC_UNLOCK(cc);
#ifndef __rtems__
KASSERT(!sq_locked, ("sleepqueue chain locked")); KASSERT(!sq_locked, ("sleepqueue chain locked"));
#endif /* __rtems__ */
return (1); return (1);
} else if ((c->c_flags & CALLOUT_DFRMIGRATION) != 0) { } else if ((c->c_flags & CALLOUT_DFRMIGRATION) != 0) {
c->c_flags &= ~CALLOUT_DFRMIGRATION; c->c_flags &= ~CALLOUT_DFRMIGRATION;
@ -1107,7 +1109,9 @@ again:
CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p", CTR3(KTR_CALLOUT, "failed to stop %p func %p arg %p",
c, c->c_func, c->c_arg); c, c->c_func, c->c_arg);
CC_UNLOCK(cc); CC_UNLOCK(cc);
#ifndef __rtems__
KASSERT(!sq_locked, ("sleepqueue chain still locked")); KASSERT(!sq_locked, ("sleepqueue chain still locked"));
#endif /* __rtems__ */
return (0); return (0);
} }
#ifndef __rtems__ #ifndef __rtems__

View File

@ -314,8 +314,10 @@ sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags,
MPASS((queue >= 0) && (queue < NR_SLEEPQS)); MPASS((queue >= 0) && (queue < NR_SLEEPQS));
/* If this thread is not allowed to sleep, die a horrible death. */ /* If this thread is not allowed to sleep, die a horrible death. */
#ifndef __rtems__
KASSERT(!(td->td_pflags & TDP_NOSLEEPING), KASSERT(!(td->td_pflags & TDP_NOSLEEPING),
("Trying sleep, but thread marked as sleeping prohibited")); ("Trying sleep, but thread marked as sleeping prohibited"));
#endif /* __rtems__ */
/* Look up the sleep queue associated with the wait channel 'wchan'. */ /* Look up the sleep queue associated with the wait channel 'wchan'. */
sq = sleepq_lookup(wchan); sq = sleepq_lookup(wchan);

View File

@ -268,7 +268,9 @@ taskqueue_enqueue_timeout(struct taskqueue *queue,
TQ_LOCK(queue); TQ_LOCK(queue);
KASSERT(timeout_task->q == NULL || timeout_task->q == queue, KASSERT(timeout_task->q == NULL || timeout_task->q == queue,
("Migrated queue")); ("Migrated queue"));
#ifndef __rtems__
KASSERT(!queue->tq_spin, ("Timeout for spin-queue")); KASSERT(!queue->tq_spin, ("Timeout for spin-queue"));
#endif /* __rtems__ */
timeout_task->q = queue; timeout_task->q = queue;
res = timeout_task->t.ta_pending; res = timeout_task->t.ta_pending;
if (ticks == 0) { if (ticks == 0) {

View File

@ -235,8 +235,10 @@ uiomove_faultflag(void *cp, int n, struct uio *uio, int nofault)
KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE, KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
("uiomove: mode")); ("uiomove: mode"));
#ifndef __rtems__
KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == td, KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == td,
("uiomove proc")); ("uiomove proc"));
#endif /* __rtems__ */
if (!nofault) if (!nofault)
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
"Calling uiomove()"); "Calling uiomove()");

View File

@ -352,7 +352,11 @@ int set_dumper(struct dumperinfo *);
int dump_write(struct dumperinfo *, void *, vm_offset_t, off_t, size_t); int dump_write(struct dumperinfo *, void *, vm_offset_t, off_t, size_t);
void dumpsys(struct dumperinfo *); void dumpsys(struct dumperinfo *);
int doadump(boolean_t); int doadump(boolean_t);
#ifndef __rtems__
extern int dumping; /* system is dumping */ extern int dumping; /* system is dumping */
#else /* __rtems__ */
#define dumping 0
#endif /* __rtems__ */
#endif /* _KERNEL */ #endif /* _KERNEL */

View File

@ -478,7 +478,11 @@ do { \
#define TD_IS_SWAPPED(td) ((td)->td_inhibitors & TDI_SWAPPED) #define TD_IS_SWAPPED(td) ((td)->td_inhibitors & TDI_SWAPPED)
#define TD_ON_LOCK(td) ((td)->td_inhibitors & TDI_LOCK) #define TD_ON_LOCK(td) ((td)->td_inhibitors & TDI_LOCK)
#define TD_AWAITING_INTR(td) ((td)->td_inhibitors & TDI_IWAIT) #define TD_AWAITING_INTR(td) ((td)->td_inhibitors & TDI_IWAIT)
#ifndef __rtems__
#define TD_IS_RUNNING(td) ((td)->td_state == TDS_RUNNING) #define TD_IS_RUNNING(td) ((td)->td_state == TDS_RUNNING)
#else /* __rtems__ */
#define TD_IS_RUNNING(td) (1)
#endif /* __rtems__ */
#define TD_ON_RUNQ(td) ((td)->td_state == TDS_RUNQ) #define TD_ON_RUNQ(td) ((td)->td_state == TDS_RUNQ)
#define TD_CAN_RUN(td) ((td)->td_state == TDS_CAN_RUN) #define TD_CAN_RUN(td) ((td)->td_state == TDS_CAN_RUN)
#define TD_IS_INHIBITED(td) ((td)->td_state == TDS_INHIBITED) #define TD_IS_INHIBITED(td) ((td)->td_state == TDS_INHIBITED)

315
freebsd/sys/vm/uma_dbg.c Normal file
View File

@ -0,0 +1,315 @@
#include <machine/rtems-bsd-kernel-space.h>
/*-
* Copyright (c) 2002, 2003, 2004, 2005 Jeffrey Roberson <jeff@FreeBSD.org>
* Copyright (c) 2004, 2005 Bosko Milekic <bmilekic@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
* uma_dbg.c Debugging features for UMA users
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <rtems/bsd/sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
#include <rtems/bsd/sys/types.h>
#include <sys/queue.h>
#include <rtems/bsd/sys/lock.h>
#include <sys/mutex.h>
#include <sys/malloc.h>
#include <vm/vm.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/uma.h>
#include <vm/uma_int.h>
#include <vm/uma_dbg.h>
static const u_int32_t uma_junk = 0xdeadc0de;
/*
* Checks an item to make sure it hasn't been overwritten since it was freed,
* prior to subsequent reallocation.
*
* Complies with standard ctor arg/return
*
*/
int
trash_ctor(void *mem, int size, void *arg, int flags)
{
int cnt;
u_int32_t *p;
cnt = size / sizeof(uma_junk);
for (p = mem; cnt > 0; cnt--, p++)
if (*p != uma_junk) {
printf("Memory modified after free %p(%d) val=%x @ %p\n",
mem, size, *p, p);
return (0);
}
return (0);
}
/*
* Fills an item with predictable garbage
*
* Complies with standard dtor arg/return
*
*/
void
trash_dtor(void *mem, int size, void *arg)
{
int cnt;
u_int32_t *p;
cnt = size / sizeof(uma_junk);
for (p = mem; cnt > 0; cnt--, p++)
*p = uma_junk;
}
/*
* Fills an item with predictable garbage
*
* Complies with standard init arg/return
*
*/
int
trash_init(void *mem, int size, int flags)
{
trash_dtor(mem, size, NULL);
return (0);
}
/*
* Checks an item to make sure it hasn't been overwritten since it was freed.
*
* Complies with standard fini arg/return
*
*/
void
trash_fini(void *mem, int size)
{
(void)trash_ctor(mem, size, NULL, 0);
}
int
mtrash_ctor(void *mem, int size, void *arg, int flags)
{
struct malloc_type **ksp;
u_int32_t *p = mem;
int cnt;
size -= sizeof(struct malloc_type *);
ksp = (struct malloc_type **)mem;
ksp += size / sizeof(struct malloc_type *);
cnt = size / sizeof(uma_junk);
for (p = mem; cnt > 0; cnt--, p++)
if (*p != uma_junk) {
printf("Memory modified after free %p(%d) val=%x @ %p\n",
mem, size, *p, p);
panic("Most recently used by %s\n", (*ksp == NULL)?
"none" : (*ksp)->ks_shortdesc);
}
return (0);
}
/*
* Fills an item with predictable garbage
*
* Complies with standard dtor arg/return
*
*/
void
mtrash_dtor(void *mem, int size, void *arg)
{
int cnt;
u_int32_t *p;
size -= sizeof(struct malloc_type *);
cnt = size / sizeof(uma_junk);
for (p = mem; cnt > 0; cnt--, p++)
*p = uma_junk;
}
/*
* Fills an item with predictable garbage
*
* Complies with standard init arg/return
*
*/
int
mtrash_init(void *mem, int size, int flags)
{
struct malloc_type **ksp;
mtrash_dtor(mem, size, NULL);
ksp = (struct malloc_type **)mem;
ksp += (size / sizeof(struct malloc_type *)) - 1;
*ksp = NULL;
return (0);
}
/*
* Checks an item to make sure it hasn't been overwritten since it was freed,
* prior to freeing it back to available memory.
*
* Complies with standard fini arg/return
*
*/
void
mtrash_fini(void *mem, int size)
{
(void)mtrash_ctor(mem, size, NULL, 0);
}
static uma_slab_t
uma_dbg_getslab(uma_zone_t zone, void *item)
{
uma_slab_t slab;
uma_keg_t keg;
u_int8_t *mem;
mem = (u_int8_t *)((unsigned long)item & (~UMA_SLAB_MASK));
if (zone->uz_flags & UMA_ZONE_VTOSLAB) {
slab = vtoslab((vm_offset_t)mem);
} else {
keg = LIST_FIRST(&zone->uz_kegs)->kl_keg;
if (keg->uk_flags & UMA_ZONE_HASH)
slab = hash_sfind(&keg->uk_hash, mem);
else
slab = (uma_slab_t)(mem + keg->uk_pgoff);
}
return (slab);
}
/*
* Set up the slab's freei data such that uma_dbg_free can function.
*
*/
void
uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item)
{
uma_keg_t keg;
uma_slabrefcnt_t slabref;
int freei;
if (slab == NULL) {
slab = uma_dbg_getslab(zone, item);
if (slab == NULL)
panic("uma: item %p did not belong to zone %s\n",
item, zone->uz_name);
}
keg = slab->us_keg;
freei = ((unsigned long)item - (unsigned long)slab->us_data)
/ keg->uk_rsize;
if (keg->uk_flags & UMA_ZONE_REFCNT) {
slabref = (uma_slabrefcnt_t)slab;
slabref->us_freelist[freei].us_item = 255;
} else {
slab->us_freelist[freei].us_item = 255;
}
return;
}
/*
* Verifies freed addresses. Checks for alignment, valid slab membership
* and duplicate frees.
*
*/
void
uma_dbg_free(uma_zone_t zone, uma_slab_t slab, void *item)
{
uma_keg_t keg;
uma_slabrefcnt_t slabref;
int freei;
if (slab == NULL) {
slab = uma_dbg_getslab(zone, item);
if (slab == NULL)
panic("uma: Freed item %p did not belong to zone %s\n",
item, zone->uz_name);
}
keg = slab->us_keg;
freei = ((unsigned long)item - (unsigned long)slab->us_data)
/ keg->uk_rsize;
if (freei >= keg->uk_ipers)
panic("zone: %s(%p) slab %p freelist %d out of range 0-%d\n",
zone->uz_name, zone, slab, freei, keg->uk_ipers-1);
if (((freei * keg->uk_rsize) + slab->us_data) != item) {
printf("zone: %s(%p) slab %p freed address %p unaligned.\n",
zone->uz_name, zone, slab, item);
panic("should be %p\n",
(freei * keg->uk_rsize) + slab->us_data);
}
if (keg->uk_flags & UMA_ZONE_REFCNT) {
slabref = (uma_slabrefcnt_t)slab;
if (slabref->us_freelist[freei].us_item != 255) {
printf("Slab at %p, freei %d = %d.\n",
slab, freei, slabref->us_freelist[freei].us_item);
panic("Duplicate free of item %p from zone %p(%s)\n",
item, zone, zone->uz_name);
}
/*
* When this is actually linked into the slab this will change.
* Until then the count of valid slabs will make sure we don't
* accidentally follow this and assume it's a valid index.
*/
slabref->us_freelist[freei].us_item = 0;
} else {
if (slab->us_freelist[freei].us_item != 255) {
printf("Slab at %p, freei %d = %d.\n",
slab, freei, slab->us_freelist[freei].us_item);
panic("Duplicate free of item %p from zone %p(%s)\n",
item, zone, zone->uz_name);
}
/*
* When this is actually linked into the slab this will change.
* Until then the count of valid slabs will make sure we don't
* accidentally follow this and assume it's a valid index.
*/
slab->us_freelist[freei].us_item = 0;
}
}

View File

@ -328,6 +328,7 @@ def base(mm):
'sys/libkern/inet_ntoa.c', 'sys/libkern/inet_ntoa.c',
'sys/libkern/random.c', 'sys/libkern/random.c',
'sys/vm/uma_core.c', 'sys/vm/uma_core.c',
'sys/vm/uma_dbg.c',
], ],
mm.generator['source']() mm.generator['source']()
) )

View File

@ -46,6 +46,7 @@
#include <rtems/bsd/sys/lock.h> #include <rtems/bsd/sys/lock.h>
#include <sys/mutex.h> #include <sys/mutex.h>
#include <sys/proc.h> #include <sys/proc.h>
#include <sys/conf.h>
static void assert_mtx(struct lock_object *lock, int what); static void assert_mtx(struct lock_object *lock, int what);
static void lock_mtx(struct lock_object *lock, int how); static void lock_mtx(struct lock_object *lock, int how);

View File

@ -93,6 +93,12 @@ rtems_bsd_page_alloc(uintptr_t size_in_bytes, int wait)
mtx_unlock(&page_heap_mtx); mtx_unlock(&page_heap_mtx);
#ifdef INVARIANTS
if (addr != NULL) {
memset(addr, 0, size_in_bytes);
}
#endif
return (addr); return (addr);
} }

View File

@ -15,7 +15,7 @@
* USA * USA
* <kevin.kirspel@optimedical.com> * <kevin.kirspel@optimedical.com>
* *
* Copyright (c) 2013 embedded brains GmbH. All rights reserved. * Copyright (c) 2013-2015 embedded brains GmbH. All rights reserved.
* *
* embedded brains GmbH * embedded brains GmbH
* Dornierstr. 4 * Dornierstr. 4
@ -79,6 +79,10 @@ struct lock_class lock_class_rw = {
#endif #endif
}; };
#define rw_wowner(rw) ((rw)->mutex.owner)
#define rw_recursed(rw) ((rw)->mutex.nest_level != 0)
void void
assert_rw(struct lock_object *lock, int what) assert_rw(struct lock_object *lock, int what)
{ {
@ -223,6 +227,7 @@ _rw_assert(struct rwlock *rw, int what, const char *file, int line)
case RA_LOCKED | RA_RECURSED: case RA_LOCKED | RA_RECURSED:
case RA_LOCKED | RA_NOTRECURSED: case RA_LOCKED | RA_NOTRECURSED:
case RA_RLOCKED: case RA_RLOCKED:
#ifndef __rtems__
#ifdef WITNESS #ifdef WITNESS
witness_assert(&rw->lock_object, what, file, line); witness_assert(&rw->lock_object, what, file, line);
#else #else
@ -250,10 +255,13 @@ _rw_assert(struct rwlock *rw, int what, const char *file, int line)
} }
#endif #endif
break; break;
#else /* __rtems__ */
/* FALLTHROUGH */
#endif /* __rtems__ */
case RA_WLOCKED: case RA_WLOCKED:
case RA_WLOCKED | RA_RECURSED: case RA_WLOCKED | RA_RECURSED:
case RA_WLOCKED | RA_NOTRECURSED: case RA_WLOCKED | RA_NOTRECURSED:
if (rw_wowner(rw) != curthread) if (rw_wowner(rw) != _Thread_Get_executing())
panic("Lock %s not exclusively locked @ %s:%d\n", panic("Lock %s not exclusively locked @ %s:%d\n",
rw->lock_object.lo_name, file, line); rw->lock_object.lo_name, file, line);
if (rw_recursed(rw)) { if (rw_recursed(rw)) {
@ -272,7 +280,7 @@ _rw_assert(struct rwlock *rw, int what, const char *file, int line)
* If we hold a write lock fail. We can't reliably check * If we hold a write lock fail. We can't reliably check
* to see if we hold a read lock or not. * to see if we hold a read lock or not.
*/ */
if (rw_wowner(rw) == curthread) if (rw_wowner(rw) == _Thread_Get_executing())
panic("Lock %s exclusively locked @ %s:%d\n", panic("Lock %s exclusively locked @ %s:%d\n",
rw->lock_object.lo_name, file, line); rw->lock_object.lo_name, file, line);
#endif #endif

View File

@ -7,7 +7,7 @@
*/ */
/* /*
* Copyright (c) 2009-2014 embedded brains GmbH. All rights reserved. * Copyright (c) 2009-2015 embedded brains GmbH. All rights reserved.
* *
* embedded brains GmbH * embedded brains GmbH
* Dornierstr. 4 * Dornierstr. 4
@ -39,6 +39,7 @@
#include <machine/rtems-bsd-kernel-space.h> #include <machine/rtems-bsd-kernel-space.h>
#include <machine/rtems-bsd-muteximpl.h> #include <machine/rtems-bsd-muteximpl.h>
#include <machine/rtems-bsd-thread.h>
#include <rtems/bsd/sys/param.h> #include <rtems/bsd/sys/param.h>
#include <rtems/bsd/sys/types.h> #include <rtems/bsd/sys/types.h>
@ -71,6 +72,10 @@ struct lock_class lock_class_sx = {
#endif #endif
}; };
#define sx_xholder(sx) ((sx)->mutex.owner)
#define sx_recursed(sx) ((sx)->mutex.nest_level != 0)
void void
assert_sx(struct lock_object *lock, int what) assert_sx(struct lock_object *lock, int what)
{ {
@ -177,9 +182,11 @@ _sx_downgrade(struct sx *sx, const char *file, int line)
void void
_sx_assert(struct sx *sx, int what, const char *file, int line) _sx_assert(struct sx *sx, int what, const char *file, int line)
{ {
#ifndef __rtems__
#ifndef WITNESS #ifndef WITNESS
int slocked = 0; int slocked = 0;
#endif #endif
#endif /* __rtems__ */
if (panicstr != NULL) if (panicstr != NULL)
return; return;
@ -187,13 +194,16 @@ _sx_assert(struct sx *sx, int what, const char *file, int line)
case SA_SLOCKED: case SA_SLOCKED:
case SA_SLOCKED | SA_NOTRECURSED: case SA_SLOCKED | SA_NOTRECURSED:
case SA_SLOCKED | SA_RECURSED: case SA_SLOCKED | SA_RECURSED:
#ifndef __rtems__
#ifndef WITNESS #ifndef WITNESS
slocked = 1; slocked = 1;
/* FALLTHROUGH */ /* FALLTHROUGH */
#endif #endif
#endif /* __rtems__ */
case SA_LOCKED: case SA_LOCKED:
case SA_LOCKED | SA_NOTRECURSED: case SA_LOCKED | SA_NOTRECURSED:
case SA_LOCKED | SA_RECURSED: case SA_LOCKED | SA_RECURSED:
#ifndef __rtems__
#ifdef WITNESS #ifdef WITNESS
witness_assert(&sx->lock_object, what, file, line); witness_assert(&sx->lock_object, what, file, line);
#else #else
@ -221,10 +231,13 @@ _sx_assert(struct sx *sx, int what, const char *file, int line)
} }
#endif #endif
break; break;
#else /* __rtems__ */
/* FALLTHROUGH */
#endif /* __rtems__ */
case SA_XLOCKED: case SA_XLOCKED:
case SA_XLOCKED | SA_NOTRECURSED: case SA_XLOCKED | SA_NOTRECURSED:
case SA_XLOCKED | SA_RECURSED: case SA_XLOCKED | SA_RECURSED:
if (sx_xholder(sx) != curthread) if (sx_xholder(sx) != _Thread_Get_executing())
panic("Lock %s not exclusively locked @ %s:%d\n", panic("Lock %s not exclusively locked @ %s:%d\n",
sx->lock_object.lo_name, file, line); sx->lock_object.lo_name, file, line);
if (sx_recursed(sx)) { if (sx_recursed(sx)) {
@ -244,7 +257,7 @@ _sx_assert(struct sx *sx, int what, const char *file, int line)
* reliably check to see if we hold a shared lock or * reliably check to see if we hold a shared lock or
* not. * not.
*/ */
if (sx_xholder(sx) == curthread) if (sx_xholder(sx) == _Thread_Get_executing())
panic("Lock %s exclusively locked @ %s:%d\n", panic("Lock %s exclusively locked @ %s:%d\n",
sx->lock_object.lo_name, file, line); sx->lock_object.lo_name, file, line);
#endif #endif

View File

@ -698,6 +698,7 @@ def build(bld):
'freebsd/sys/opencrypto/skipjack.c', 'freebsd/sys/opencrypto/skipjack.c',
'freebsd/sys/opencrypto/xform.c', 'freebsd/sys/opencrypto/xform.c',
'freebsd/sys/vm/uma_core.c', 'freebsd/sys/vm/uma_core.c',
'freebsd/sys/vm/uma_dbg.c',
'mDNSResponder/mDNSCore/CryptoAlg.c', 'mDNSResponder/mDNSCore/CryptoAlg.c',
'mDNSResponder/mDNSCore/DNSCommon.c', 'mDNSResponder/mDNSCore/DNSCommon.c',
'mDNSResponder/mDNSCore/DNSDigest.c', 'mDNSResponder/mDNSCore/DNSDigest.c',