Remove struct callout::c_cpu

This is an optimization of the callout handling.  In libbsd all callouts
are handled by the one and only timer server.
This commit is contained in:
Sebastian Huber 2018-09-14 13:46:50 +02:00
parent 08fbf18141
commit cf447b951a
2 changed files with 25 additions and 1 deletions

View File

@ -223,7 +223,11 @@ struct callout_cpu cc_cpu;
#define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock) #define CC_UNLOCK(cc) mtx_unlock_spin(&(cc)->cc_lock)
#define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED) #define CC_LOCK_ASSERT(cc) mtx_assert(&(cc)->cc_lock, MA_OWNED)
#ifndef __rtems__
static int timeout_cpu; static int timeout_cpu;
#else /* __rtems__ */
#define timeout_cpu 0
#endif /* __rtems__ */
static void callout_cpu_init(struct callout_cpu *cc, int cpu); static void callout_cpu_init(struct callout_cpu *cc, int cpu);
static void softclock_call_cc(struct callout *c, struct callout_cpu *cc, static void softclock_call_cc(struct callout *c, struct callout_cpu *cc,
@ -372,7 +376,9 @@ callout_callwheel_init(void *dummy)
* XXX: Once all timeout(9) consumers are converted this can * XXX: Once all timeout(9) consumers are converted this can
* be removed. * be removed.
*/ */
#ifndef __rtems__
timeout_cpu = PCPU_GET(cpuid); timeout_cpu = PCPU_GET(cpuid);
#endif /* __rtems__ */
cc = CC_CPU(timeout_cpu); cc = CC_CPU(timeout_cpu);
cc->cc_callout = malloc(ncallout * sizeof(struct callout), cc->cc_callout = malloc(ncallout * sizeof(struct callout),
M_CALLOUT, M_WAITOK); M_CALLOUT, M_WAITOK);
@ -653,6 +659,7 @@ static struct callout_cpu *
callout_lock(struct callout *c) callout_lock(struct callout *c)
{ {
struct callout_cpu *cc; struct callout_cpu *cc;
#ifndef __rtems__
int cpu; int cpu;
for (;;) { for (;;) {
@ -664,12 +671,15 @@ callout_lock(struct callout *c)
continue; continue;
} }
#endif #endif
#endif /* __rtems__ */
cc = CC_CPU(cpu); cc = CC_CPU(cpu);
CC_LOCK(cc); CC_LOCK(cc);
#ifndef __rtems__
if (cpu == c->c_cpu) if (cpu == c->c_cpu)
break; break;
CC_UNLOCK(cc); CC_UNLOCK(cc);
} }
#endif /* __rtems__ */
return (cc); return (cc);
} }
@ -1129,12 +1139,13 @@ callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t prec,
struct callout_cpu *cc; struct callout_cpu *cc;
#ifndef __rtems__ #ifndef __rtems__
int cancelled, direct; int cancelled, direct;
int ignore_cpu=0;
#else /* __rtems__ */ #else /* __rtems__ */
int cancelled; int cancelled;
#endif /* __rtems__ */ #endif /* __rtems__ */
int ignore_cpu=0;
cancelled = 0; cancelled = 0;
#ifndef __rtems__
if (cpu == -1) { if (cpu == -1) {
ignore_cpu = 1; ignore_cpu = 1;
} else if ((cpu >= MAXCPU) || } else if ((cpu >= MAXCPU) ||
@ -1142,6 +1153,7 @@ callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t prec,
/* Invalid CPU spec */ /* Invalid CPU spec */
panic("Invalid CPU in callout %d", cpu); panic("Invalid CPU in callout %d", cpu);
} }
#endif /* __rtems__ */
callout_when(sbt, prec, flags, &to_sbt, &precision); callout_when(sbt, prec, flags, &to_sbt, &precision);
#ifndef __rtems__ #ifndef __rtems__
@ -1159,6 +1171,7 @@ callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t prec,
("%s: direct callout %p has lock", __func__, c)); ("%s: direct callout %p has lock", __func__, c));
#endif /* __rtems__ */ #endif /* __rtems__ */
cc = callout_lock(c); cc = callout_lock(c);
#ifndef __rtems__
/* /*
* Don't allow migration of pre-allocated callouts lest they * Don't allow migration of pre-allocated callouts lest they
* become unbalanced or handle the case where the user does * become unbalanced or handle the case where the user does
@ -1168,6 +1181,7 @@ callout_reset_sbt_on(struct callout *c, sbintime_t sbt, sbintime_t prec,
ignore_cpu) { ignore_cpu) {
cpu = c->c_cpu; cpu = c->c_cpu;
} }
#endif /* __rtems__ */
if (cc_exec_curr(cc, direct) == c) { if (cc_exec_curr(cc, direct) == c) {
/* /*
@ -1288,7 +1302,11 @@ callout_schedule_on(struct callout *c, int to_ticks, int cpu)
int int
callout_schedule(struct callout *c, int to_ticks) callout_schedule(struct callout *c, int to_ticks)
{ {
#ifndef __rtems__
return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu); return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, c->c_cpu);
#else /* __rtems__ */
return callout_reset_on(c, to_ticks, c->c_func, c->c_arg, 0);
#endif /* __rtems__ */
} }
int int
@ -1594,7 +1612,9 @@ callout_init(struct callout *c, int mpsafe)
c->c_lock = &Giant.lock_object; c->c_lock = &Giant.lock_object;
c->c_iflags = 0; c->c_iflags = 0;
} }
#ifndef __rtems__
c->c_cpu = timeout_cpu; c->c_cpu = timeout_cpu;
#endif /* __rtems__ */
} }
void void
@ -1610,7 +1630,9 @@ _callout_init_lock(struct callout *c, struct lock_object *lock, int flags)
(LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class", (LC_SPINLOCK | LC_SLEEPABLE)), ("%s: invalid lock class",
__func__)); __func__));
c->c_iflags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK); c->c_iflags = flags & (CALLOUT_RETURNUNLOCKED | CALLOUT_SHAREDLOCK);
#ifndef __rtems__
c->c_cpu = timeout_cpu; c->c_cpu = timeout_cpu;
#endif /* __rtems__ */
} }
#ifdef APM_FIXUP_CALLTODO #ifdef APM_FIXUP_CALLTODO

View File

@ -59,7 +59,9 @@ struct callout {
struct lock_object *c_lock; /* lock to handle */ struct lock_object *c_lock; /* lock to handle */
short c_flags; /* User State */ short c_flags; /* User State */
short c_iflags; /* Internal State */ short c_iflags; /* Internal State */
#ifndef __rtems__
volatile int c_cpu; /* CPU we're scheduled on */ volatile int c_cpu; /* CPU we're scheduled on */
#endif /* __rtems__ */
}; };
#endif #endif