mirror of
https://git.rtems.org/rtems-libbsd/
synced 2025-06-05 03:05:49 +08:00
Fix INVARIANTS support
This commit is contained in:
parent
5aa6ee55fc
commit
0389b30dd9
@ -253,7 +253,7 @@ struct e1000_osdep
|
||||
((struct e1000_osdep *)(hw)->back)->flash_bus_space_handle, reg, value)
|
||||
|
||||
|
||||
#if defined(INVARIANTS)
|
||||
#if defined(INVARIANTS) && !defined(__rtems__)
|
||||
#include <sys/proc.h>
|
||||
|
||||
#define ASSERT_NO_LOCKS() \
|
||||
|
@ -235,8 +235,13 @@ __sx_xunlock(struct sx *sx, struct thread *td, const char *file, int line)
|
||||
__sx_xunlock((sx), curthread, (file), (line))
|
||||
#endif /* LOCK_DEBUG > 0 || SX_NOINLINE */
|
||||
#if (LOCK_DEBUG > 0)
|
||||
#ifndef __rtems__
|
||||
#define sx_slock_(sx, file, line) \
|
||||
(void)_sx_slock((sx), 0, (file), (line))
|
||||
#else /* __rtems__ */
|
||||
#define sx_slock_(sx, file, line) \
|
||||
(void)_sx_xlock((sx), 0, (file), (line))
|
||||
#endif /* __rtems__ */
|
||||
#define sx_slock_sig_(sx, file, line) \
|
||||
_sx_slock((sx), SX_INTERRUPTIBLE, (file) , (line))
|
||||
#define sx_sunlock_(sx, file, line) \
|
||||
|
@ -338,6 +338,7 @@ static void uma_dbg_alloc(uma_zone_t zone, uma_slab_t slab, void *item);
|
||||
static SYSCTL_NODE(_vm, OID_AUTO, debug, CTLFLAG_RD, 0,
|
||||
"Memory allocation debugging");
|
||||
|
||||
#ifndef __rtems__
|
||||
static u_int dbg_divisor = 1;
|
||||
SYSCTL_UINT(_vm_debug, OID_AUTO, divisor,
|
||||
CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &dbg_divisor, 0,
|
||||
@ -349,6 +350,9 @@ SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, trashed, CTLFLAG_RD,
|
||||
&uma_dbg_cnt, "memory items debugged");
|
||||
SYSCTL_COUNTER_U64(_vm_debug, OID_AUTO, skipped, CTLFLAG_RD,
|
||||
&uma_skip_cnt, "memory items skipped, not debugged");
|
||||
#else /* __rtems__ */
|
||||
#define dbg_divisor 1
|
||||
#endif /* __rtems__ */
|
||||
#endif
|
||||
|
||||
SYSINIT(uma_startup3, SI_SUB_VM_CONF, SI_ORDER_SECOND, uma_startup3, NULL);
|
||||
@ -2245,9 +2249,11 @@ uma_startup3(void)
|
||||
{
|
||||
|
||||
#ifdef INVARIANTS
|
||||
#ifndef __rtems__
|
||||
TUNABLE_INT_FETCH("vm.debug.divisor", &dbg_divisor);
|
||||
uma_dbg_cnt = counter_u64_alloc(M_WAITOK);
|
||||
uma_skip_cnt = counter_u64_alloc(M_WAITOK);
|
||||
#endif /* __rtems__ */
|
||||
#endif
|
||||
callout_init(&uma_callout, 1);
|
||||
callout_reset(&uma_callout, UMA_TIMEOUT * hz, uma_timeout, NULL);
|
||||
@ -2792,8 +2798,10 @@ uma_zalloc_domain(uma_zone_t zone, void *udata, int domain, int flags)
|
||||
WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
|
||||
"uma_zalloc_domain: zone \"%s\"", zone->uz_name);
|
||||
}
|
||||
#ifndef __rtems__
|
||||
KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
|
||||
("uma_zalloc_domain: called with spinlock or critical section held"));
|
||||
#endif /* __rtems__ */
|
||||
|
||||
return (zone_alloc_item(zone, udata, domain, flags));
|
||||
}
|
||||
@ -3474,8 +3482,10 @@ uma_zfree_domain(uma_zone_t zone, void *item, void *udata)
|
||||
CTR2(KTR_UMA, "uma_zfree_domain thread %x zone %s", curthread,
|
||||
zone->uz_name);
|
||||
|
||||
#ifndef __rtems__
|
||||
KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
|
||||
("uma_zfree_domain: called with spinlock or critical section held"));
|
||||
#endif /* __rtems__ */
|
||||
|
||||
/* uma_zfree(..., NULL) does nothing, to match free(9). */
|
||||
if (item == NULL)
|
||||
@ -4357,6 +4367,7 @@ uma_dbg_kskip(uma_keg_t keg, void *mem)
|
||||
if (dbg_divisor == 1)
|
||||
return (false);
|
||||
|
||||
#ifndef __rtems__
|
||||
idx = (uintptr_t)mem >> PAGE_SHIFT;
|
||||
if (keg->uk_ipers > 1) {
|
||||
idx *= keg->uk_ipers;
|
||||
@ -4368,6 +4379,7 @@ uma_dbg_kskip(uma_keg_t keg, void *mem)
|
||||
return (true);
|
||||
}
|
||||
counter_u64_add(uma_dbg_cnt, 1);
|
||||
#endif /* __rtems__ */
|
||||
|
||||
return (false);
|
||||
}
|
||||
|
@ -109,8 +109,9 @@ void epoch_wait_preempt(epoch_t epoch);
|
||||
void epoch_call(epoch_t epoch, epoch_context_t ctx,
|
||||
void (*callback) (epoch_context_t));
|
||||
|
||||
int in_epoch(epoch_t epoch);
|
||||
int in_epoch_verbose(epoch_t epoch, int dump_onfail);
|
||||
int _bsd_in_epoch(epoch_t epoch);
|
||||
#define in_epoch(epoch) _bsd_in_epoch(epoch)
|
||||
#define in_epoch_verbose(epoch, dump_onfail) _bsd_in_epoch(epoch)
|
||||
|
||||
#define EPOCH_GET_RECORD(cpu_self, epoch) PER_CPU_DATA_GET_BY_OFFSET( \
|
||||
cpu_self, struct epoch_record, epoch->e_pcpu_record_offset)
|
||||
|
@ -33,6 +33,9 @@
|
||||
#include <sys/types.h>
|
||||
#include <sys/kernel.h>
|
||||
#include <sys/epoch.h>
|
||||
#ifdef INVARIANTS
|
||||
#include <sys/systm.h>
|
||||
#endif
|
||||
|
||||
#include <machine/cpu.h>
|
||||
|
||||
@ -322,3 +325,32 @@ epoch_call(epoch_t epoch, epoch_context_t ctx,
|
||||
ck_epoch_call(&er->er_record, ctx, callback);
|
||||
_Thread_Dispatch_enable(cpu_self);
|
||||
}
|
||||
|
||||
#ifdef INVARIANTS
|
||||
int
|
||||
_bsd_in_epoch(epoch_t epoch)
|
||||
{
|
||||
Per_CPU_Control *cpu_self;
|
||||
Thread_Control *executing;
|
||||
struct epoch_record *er;
|
||||
struct epoch_pcpu *epcpu;
|
||||
struct epoch_tracker *tdwait;
|
||||
int in;
|
||||
|
||||
in = 0;
|
||||
cpu_self = _Thread_Dispatch_disable();
|
||||
executing = _Per_CPU_Get_executing(cpu_self);
|
||||
epcpu = PER_CPU_DATA_GET(cpu_self, struct epoch_pcpu, epoch);
|
||||
er = EPOCH_GET_RECORD(cpu_self, epoch);
|
||||
|
||||
TAILQ_FOREACH(tdwait, &er->er_tdlist, et_link) {
|
||||
if (tdwait->et_td == executing) {
|
||||
in = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
_Thread_Dispatch_enable(cpu_self);
|
||||
return (in);
|
||||
}
|
||||
#endif
|
||||
|
Loading…
x
Reference in New Issue
Block a user