EPOCH(9): Add epoch_drain_callbacks()

This commit is contained in:
Sebastian Huber 2019-07-09 13:17:18 +02:00
parent 9ed693d723
commit 312f705d4f
3 changed files with 138 additions and 2 deletions

View File

@ -34,6 +34,8 @@
#include <sys/cdefs.h>
#ifdef _KERNEL
#include <sys/lock.h>
#include <sys/_mutex.h>
#include <sys/_sx.h>
#include <sys/pcpu.h>
#include <rtems/score/percpudata.h>
#endif
@ -66,6 +68,8 @@ struct epoch_record {
ck_epoch_record_t er_record;
struct epoch_tdlist er_tdlist;
uint32_t er_cpuid;
struct epoch_context er_drain_ctx;
struct epoch *er_parent;
} __aligned(EPOCH_ALIGN);
typedef struct epoch {
@ -73,6 +77,9 @@ typedef struct epoch {
uintptr_t e_pcpu_record_offset;
int e_flags;
SLIST_ENTRY(epoch) e_link; /* List of all epochs */
struct sx e_drain_sx;
struct mtx e_drain_mtx;
volatile int e_drain_count;
} *epoch_t;
extern struct epoch _bsd_global_epoch;
@ -110,6 +117,7 @@ void epoch_wait_preempt(epoch_t epoch);
void epoch_call(epoch_t epoch, epoch_context_t ctx,
void (*callback) (epoch_context_t));
void epoch_drain_callbacks(epoch_t epoch);
int _bsd_in_epoch(epoch_t epoch);
#define in_epoch(epoch) _bsd_in_epoch(epoch)

View File

@ -33,10 +33,11 @@
#include <sys/types.h>
#include <sys/kernel.h>
#include <sys/epoch.h>
#ifdef INVARIANTS
#include <sys/mutex.h>
#include <sys/sx.h>
#include <sys/systm.h>
#endif
#include <machine/atomic.h>
#include <machine/cpu.h>
#include <rtems.h>
@ -75,6 +76,8 @@ _bsd_epoch_init(epoch_t epoch, uintptr_t pcpu_record_offset, int flags)
ck_epoch_init(&epoch->e_epoch);
epoch->e_flags = flags;
epoch->e_pcpu_record_offset = pcpu_record_offset;
sx_init(&epoch->e_drain_sx, "epoch-drain-sx");
mtx_init(&epoch->e_drain_mtx, "epoch-drain-mtx", NULL, MTX_DEF);
cpu_count = rtems_scheduler_get_processor_maximum();
@ -89,6 +92,7 @@ _bsd_epoch_init(epoch_t epoch, uintptr_t pcpu_record_offset, int flags)
TAILQ_INIT(__DEVOLATILE(struct epoch_tdlist *,
&er->er_tdlist));
er->er_cpuid = cpu_index;
er->er_parent = epoch;
}
SLIST_INSERT_HEAD(&epoch_list, epoch, e_link);
@ -380,3 +384,77 @@ _bsd_in_epoch(epoch_t epoch)
return (in);
}
#endif
static void
epoch_drain_cb(struct epoch_context *ctx)
{
struct epoch *epoch =
__containerof(ctx, struct epoch_record, er_drain_ctx)->er_parent;
if (atomic_fetchadd_int(&epoch->e_drain_count, -1) == 1) {
mtx_lock(&epoch->e_drain_mtx);
wakeup(epoch);
mtx_unlock(&epoch->e_drain_mtx);
}
}
#ifdef RTEMS_SMP
static void
epoch_call_drain_cb(void *arg)
{
epoch_t epoch;
Per_CPU_Control *cpu;
struct epoch_record *er;
epoch = arg;
cpu = _Per_CPU_Get();
er = EPOCH_GET_RECORD(cpu, epoch);
epoch_call(epoch, &er->er_drain_ctx, epoch_drain_cb);
}
#endif
void
epoch_drain_callbacks(epoch_t epoch)
{
#ifdef RTEMS_SMP
uint32_t cpu_index;
uint32_t cpu_max;
rtems_id id;
rtems_status_code sc;
#else
struct epoch_record *er;
#endif
sx_xlock(&epoch->e_drain_sx);
mtx_lock(&epoch->e_drain_mtx);
#ifdef RTEMS_SMP
cpu_max = rtems_scheduler_get_processor_maximum();
for (cpu_index = 0; cpu_index <= cpu_max; ++cpu_index) {
sc = rtems_scheduler_ident_by_processor(cpu_index, &id);
if (sc == RTEMS_SUCCESSFUL) {
epoch->e_drain_count++;
}
}
for (cpu_index = 0; cpu_index <= cpu_max; ++cpu_index) {
sc = rtems_scheduler_ident_by_processor(cpu_index, &id);
if (sc == RTEMS_SUCCESSFUL) {
_SMP_Unicast_action(cpu_index, epoch_call_drain_cb,
epoch);
}
}
#else
epoch->e_drain_count = 1;
er = EPOCH_GET_RECORD(0, epoch);
epoch_call(epoch, &er->er_drain_ctx, epoch_drain_cb);
#endif
while (epoch->e_drain_count != 0) {
msleep(epoch, &epoch->e_drain_mtx, PZERO, "EDRAIN", 0);
}
mtx_unlock(&epoch->e_drain_mtx);
sx_xunlock(&epoch->e_drain_sx);
}

View File

@ -356,6 +356,51 @@ test_enter_list_op_exit_preempt_fini(rtems_test_parallel_context *base,
test_fini(base, "EnterListOpExitPreempt", active_workers);
}
static void
test_enter_list_op_exit_drain_body(rtems_test_parallel_context *base,
void *arg, size_t active_workers, size_t worker_index)
{
test_context *ctx;
epoch_t e;
uint32_t counter;
uint32_t removals;
uint32_t item_counter[CPU_COUNT];
ctx = (test_context *)base;
e = global_epoch;
counter = 0;
removals = 0;
memset(item_counter, 0, sizeof(item_counter));
while (!rtems_test_parallel_stop_job(&ctx->base)) {
test_item *rm;
epoch_enter(e);
++counter;
rm = test_remove_item(ctx, item_counter, &removals,
worker_index);
epoch_exit(e);
if (rm != NULL) {
epoch_call(e, &rm->ec, test_list_callback);
epoch_drain_callbacks(e);
}
}
ctx->stats.counter[worker_index] = counter;
ctx->stats.removals[worker_index] = removals;
memcpy(ctx->stats.item_counter[worker_index], item_counter,
sizeof(ctx->stats.item_counter[worker_index]));
}
static void
test_enter_list_op_exit_drain_fini(rtems_test_parallel_context *base,
void *arg, size_t active_workers)
{
test_fini(base, "EnterListOpExitDrain", active_workers);
}
static void
test_thread_local_mutex_body(rtems_test_parallel_context *base, void *arg,
size_t active_workers, size_t worker_index)
@ -442,6 +487,11 @@ static const rtems_test_parallel_job test_jobs[] = {
.body = test_enter_list_op_exit_preempt_body,
.fini = test_enter_list_op_exit_preempt_fini,
.cascade = true
}, {
.init = test_list_init,
.body = test_enter_list_op_exit_drain_body,
.fini = test_enter_list_op_exit_drain_fini,
.cascade = true
}, {
.init = test_init,
.body = test_thread_local_mutex_body,