From 562783d3c51889824cd18e29a3f973dc17bd48dc Mon Sep 17 00:00:00 2001 From: Joel Sherrill Date: Fri, 9 Mar 2012 09:19:06 -0600 Subject: [PATCH] Add time support files from FreeBSD to build to resolve more symbols --- freebsd/kern/kern_tc.c | 968 ++++++++++++++++++++++++++++++++++++++++ freebsd/local/opt_ntp.h | 1 + freebsd/sys/timepps.h | 200 +++++++++ freebsd/sys/timetc.h | 78 ++++ freebsd/sys/timex.h | 238 ++++++++++ 5 files changed, 1485 insertions(+) create mode 100644 freebsd/kern/kern_tc.c create mode 100644 freebsd/local/opt_ntp.h create mode 100644 freebsd/sys/timepps.h create mode 100644 freebsd/sys/timetc.h create mode 100644 freebsd/sys/timex.h diff --git a/freebsd/kern/kern_tc.c b/freebsd/kern/kern_tc.c new file mode 100644 index 00000000..1671145b --- /dev/null +++ b/freebsd/kern/kern_tc.c @@ -0,0 +1,968 @@ +#include + +/*- + * ---------------------------------------------------------------------------- + * "THE BEER-WARE LICENSE" (Revision 42): + * wrote this file. As long as you retain this notice you + * can do whatever you want with this stuff. If we meet some day, and you think + * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp + * ---------------------------------------------------------------------------- + */ + +#include +__FBSDID("$FreeBSD$"); + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * A large step happens on boot. This constant detects such steps. + * It is relatively small so that ntp_update_second gets called enough + * in the typical 'missed a couple of seconds' case, but doesn't loop + * forever when the time step is large. + */ +#define LARGE_STEP 200 + +/* + * Implement a dummy timecounter which we can use until we get a real one + * in the air. This allows the console and other early stuff to use + * time services. + */ + +static u_int +dummy_get_timecount(struct timecounter *tc) +{ + static u_int now; + + return (++now); +} + +static struct timecounter dummy_timecounter = { + dummy_get_timecount, 0, ~0u, 1000000, "dummy", -1000000 +}; + +struct timehands { + /* These fields must be initialized by the driver. */ + struct timecounter *th_counter; + int64_t th_adjustment; + u_int64_t th_scale; + u_int th_offset_count; + struct bintime th_offset; + struct timeval th_microtime; + struct timespec th_nanotime; + /* Fields not to be copied in tc_windup start with th_generation. */ + volatile u_int th_generation; + struct timehands *th_next; +}; + +static struct timehands th0; +static struct timehands th9 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th0}; +static struct timehands th8 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th9}; +static struct timehands th7 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th8}; +static struct timehands th6 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th7}; +static struct timehands th5 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th6}; +static struct timehands th4 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th5}; +static struct timehands th3 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th4}; +static struct timehands th2 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th3}; +static struct timehands th1 = { NULL, 0, 0, 0, {0, 0}, {0, 0}, {0, 0}, 0, &th2}; +static struct timehands th0 = { + &dummy_timecounter, + 0, + (uint64_t)-1 / 1000000, + 0, + {1, 0}, + {0, 0}, + {0, 0}, + 1, + &th1 +}; + +static struct timehands *volatile timehands = &th0; +struct timecounter *timecounter = &dummy_timecounter; +static struct timecounter *timecounters = &dummy_timecounter; + +time_t time_second = 1; +time_t time_uptime = 1; + +static struct bintime boottimebin; +struct timeval boottime; +static int sysctl_kern_boottime(SYSCTL_HANDLER_ARGS); +SYSCTL_PROC(_kern, KERN_BOOTTIME, boottime, CTLTYPE_STRUCT|CTLFLAG_RD, + NULL, 0, sysctl_kern_boottime, "S,timeval", "System boottime"); + +SYSCTL_NODE(_kern, OID_AUTO, timecounter, CTLFLAG_RW, 0, ""); +SYSCTL_NODE(_kern_timecounter, OID_AUTO, tc, CTLFLAG_RW, 0, ""); + +static int timestepwarnings; +SYSCTL_INT(_kern_timecounter, OID_AUTO, stepwarnings, CTLFLAG_RW, + ×tepwarnings, 0, "Log time steps"); + +static void tc_windup(void); +static void cpu_tick_calibrate(int); + +static int +sysctl_kern_boottime(SYSCTL_HANDLER_ARGS) +{ +#ifdef SCTL_MASK32 + int tv[2]; + + if (req->flags & SCTL_MASK32) { + tv[0] = boottime.tv_sec; + tv[1] = boottime.tv_usec; + return SYSCTL_OUT(req, tv, sizeof(tv)); + } else +#endif + return SYSCTL_OUT(req, &boottime, sizeof(boottime)); +} + +static int +sysctl_kern_timecounter_get(SYSCTL_HANDLER_ARGS) +{ + u_int ncount; + struct timecounter *tc = arg1; + + ncount = tc->tc_get_timecount(tc); + return sysctl_handle_int(oidp, &ncount, 0, req); +} + +static int +sysctl_kern_timecounter_freq(SYSCTL_HANDLER_ARGS) +{ + u_int64_t freq; + struct timecounter *tc = arg1; + + freq = tc->tc_frequency; + return sysctl_handle_quad(oidp, &freq, 0, req); +} + +/* + * Return the difference between the timehands' counter value now and what + * was when we copied it to the timehands' offset_count. + */ +static __inline u_int +tc_delta(struct timehands *th) +{ + struct timecounter *tc; + + tc = th->th_counter; + return ((tc->tc_get_timecount(tc) - th->th_offset_count) & + tc->tc_counter_mask); +} + +/* + * Functions for reading the time. We have to loop until we are sure that + * the timehands that we operated on was not updated under our feet. See + * the comment in for a description of these 12 functions. + */ + +void +binuptime(struct bintime *bt) +{ + struct timehands *th; + u_int gen; + + do { + th = timehands; + gen = th->th_generation; + *bt = th->th_offset; + bintime_addx(bt, th->th_scale * tc_delta(th)); + } while (gen == 0 || gen != th->th_generation); +} + +void +nanouptime(struct timespec *tsp) +{ + struct bintime bt; + + binuptime(&bt); + bintime2timespec(&bt, tsp); +} + +void +microuptime(struct timeval *tvp) +{ + struct bintime bt; + + binuptime(&bt); + bintime2timeval(&bt, tvp); +} + +void +bintime(struct bintime *bt) +{ + + binuptime(bt); + bintime_add(bt, &boottimebin); +} + +void +nanotime(struct timespec *tsp) +{ + struct bintime bt; + + bintime(&bt); + bintime2timespec(&bt, tsp); +} + +void +microtime(struct timeval *tvp) +{ + struct bintime bt; + + bintime(&bt); + bintime2timeval(&bt, tvp); +} + +void +getbinuptime(struct bintime *bt) +{ + struct timehands *th; + u_int gen; + + do { + th = timehands; + gen = th->th_generation; + *bt = th->th_offset; + } while (gen == 0 || gen != th->th_generation); +} + +void +getnanouptime(struct timespec *tsp) +{ + struct timehands *th; + u_int gen; + + do { + th = timehands; + gen = th->th_generation; + bintime2timespec(&th->th_offset, tsp); + } while (gen == 0 || gen != th->th_generation); +} + +void +getmicrouptime(struct timeval *tvp) +{ + struct timehands *th; + u_int gen; + + do { + th = timehands; + gen = th->th_generation; + bintime2timeval(&th->th_offset, tvp); + } while (gen == 0 || gen != th->th_generation); +} + +void +getbintime(struct bintime *bt) +{ + struct timehands *th; + u_int gen; + + do { + th = timehands; + gen = th->th_generation; + *bt = th->th_offset; + } while (gen == 0 || gen != th->th_generation); + bintime_add(bt, &boottimebin); +} + +void +getnanotime(struct timespec *tsp) +{ + struct timehands *th; + u_int gen; + + do { + th = timehands; + gen = th->th_generation; + *tsp = th->th_nanotime; + } while (gen == 0 || gen != th->th_generation); +} + +void +getmicrotime(struct timeval *tvp) +{ + struct timehands *th; + u_int gen; + + do { + th = timehands; + gen = th->th_generation; + *tvp = th->th_microtime; + } while (gen == 0 || gen != th->th_generation); +} + +/* + * Initialize a new timecounter and possibly use it. + */ +void +tc_init(struct timecounter *tc) +{ + u_int u; + struct sysctl_oid *tc_root; + + u = tc->tc_frequency / tc->tc_counter_mask; + /* XXX: We need some margin here, 10% is a guess */ + u *= 11; + u /= 10; + if (u > hz && tc->tc_quality >= 0) { + tc->tc_quality = -2000; + if (bootverbose) { + printf("Timecounter \"%s\" frequency %ju Hz", + tc->tc_name, (uintmax_t)tc->tc_frequency); + printf(" -- Insufficient hz, needs at least %u\n", u); + } + } else if (tc->tc_quality >= 0 || bootverbose) { + printf("Timecounter \"%s\" frequency %ju Hz quality %d\n", + tc->tc_name, (uintmax_t)tc->tc_frequency, + tc->tc_quality); + } + + tc->tc_next = timecounters; + timecounters = tc; + /* + * Set up sysctl tree for this counter. + */ + tc_root = SYSCTL_ADD_NODE(NULL, + SYSCTL_STATIC_CHILDREN(_kern_timecounter_tc), OID_AUTO, tc->tc_name, + CTLFLAG_RW, 0, "timecounter description"); + SYSCTL_ADD_UINT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO, + "mask", CTLFLAG_RD, &(tc->tc_counter_mask), 0, + "mask for implemented bits"); + SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO, + "counter", CTLTYPE_UINT | CTLFLAG_RD, tc, sizeof(*tc), + sysctl_kern_timecounter_get, "IU", "current timecounter value"); + SYSCTL_ADD_PROC(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO, + "frequency", CTLTYPE_QUAD | CTLFLAG_RD, tc, sizeof(*tc), + sysctl_kern_timecounter_freq, "QU", "timecounter frequency"); + SYSCTL_ADD_INT(NULL, SYSCTL_CHILDREN(tc_root), OID_AUTO, + "quality", CTLFLAG_RD, &(tc->tc_quality), 0, + "goodness of time counter"); + /* + * Never automatically use a timecounter with negative quality. + * Even though we run on the dummy counter, switching here may be + * worse since this timecounter may not be monotonous. + */ + if (tc->tc_quality < 0) + return; + if (tc->tc_quality < timecounter->tc_quality) + return; + if (tc->tc_quality == timecounter->tc_quality && + tc->tc_frequency < timecounter->tc_frequency) + return; + (void)tc->tc_get_timecount(tc); + (void)tc->tc_get_timecount(tc); + timecounter = tc; +} + +/* Report the frequency of the current timecounter. */ +u_int64_t +tc_getfrequency(void) +{ + + return (timehands->th_counter->tc_frequency); +} + +/* + * Step our concept of UTC. This is done by modifying our estimate of + * when we booted. + * XXX: not locked. + */ +void +tc_setclock(struct timespec *ts) +{ + struct timespec tbef, taft; + struct bintime bt, bt2; + + cpu_tick_calibrate(1); + nanotime(&tbef); + timespec2bintime(ts, &bt); + binuptime(&bt2); + bintime_sub(&bt, &bt2); + bintime_add(&bt2, &boottimebin); + boottimebin = bt; + bintime2timeval(&bt, &boottime); + + /* XXX fiddle all the little crinkly bits around the fiords... */ + tc_windup(); + nanotime(&taft); + if (timestepwarnings) { + log(LOG_INFO, + "Time stepped from %jd.%09ld to %jd.%09ld (%jd.%09ld)\n", + (intmax_t)tbef.tv_sec, tbef.tv_nsec, + (intmax_t)taft.tv_sec, taft.tv_nsec, + (intmax_t)ts->tv_sec, ts->tv_nsec); + } + cpu_tick_calibrate(1); +} + +/* + * Initialize the next struct timehands in the ring and make + * it the active timehands. Along the way we might switch to a different + * timecounter and/or do seconds processing in NTP. Slightly magic. + */ +static void +tc_windup(void) +{ + struct bintime bt; + struct timehands *th, *tho; + u_int64_t scale; + u_int delta, ncount, ogen; + int i; + time_t t; + + /* + * Make the next timehands a copy of the current one, but do not + * overwrite the generation or next pointer. While we update + * the contents, the generation must be zero. + */ + tho = timehands; + th = tho->th_next; + ogen = th->th_generation; + th->th_generation = 0; + bcopy(tho, th, offsetof(struct timehands, th_generation)); + + /* + * Capture a timecounter delta on the current timecounter and if + * changing timecounters, a counter value from the new timecounter. + * Update the offset fields accordingly. + */ + delta = tc_delta(th); + if (th->th_counter != timecounter) + ncount = timecounter->tc_get_timecount(timecounter); + else + ncount = 0; + th->th_offset_count += delta; + th->th_offset_count &= th->th_counter->tc_counter_mask; + while (delta > th->th_counter->tc_frequency) { + /* Eat complete unadjusted seconds. */ + delta -= th->th_counter->tc_frequency; + th->th_offset.sec++; + } + if ((delta > th->th_counter->tc_frequency / 2) && + (th->th_scale * delta < ((uint64_t)1 << 63))) { + /* The product th_scale * delta just barely overflows. */ + th->th_offset.sec++; + } + bintime_addx(&th->th_offset, th->th_scale * delta); + + /* + * Hardware latching timecounters may not generate interrupts on + * PPS events, so instead we poll them. There is a finite risk that + * the hardware might capture a count which is later than the one we + * got above, and therefore possibly in the next NTP second which might + * have a different rate than the current NTP second. It doesn't + * matter in practice. + */ + if (tho->th_counter->tc_poll_pps) + tho->th_counter->tc_poll_pps(tho->th_counter); + + /* + * Deal with NTP second processing. The for loop normally + * iterates at most once, but in extreme situations it might + * keep NTP sane if timeouts are not run for several seconds. + * At boot, the time step can be large when the TOD hardware + * has been read, so on really large steps, we call + * ntp_update_second only twice. We need to call it twice in + * case we missed a leap second. + */ + bt = th->th_offset; + bintime_add(&bt, &boottimebin); + i = bt.sec - tho->th_microtime.tv_sec; + if (i > LARGE_STEP) + i = 2; + for (; i > 0; i--) { + t = bt.sec; + ntp_update_second(&th->th_adjustment, &bt.sec); + if (bt.sec != t) + boottimebin.sec += bt.sec - t; + } + /* Update the UTC timestamps used by the get*() functions. */ + /* XXX shouldn't do this here. Should force non-`get' versions. */ + bintime2timeval(&bt, &th->th_microtime); + bintime2timespec(&bt, &th->th_nanotime); + + /* Now is a good time to change timecounters. */ + if (th->th_counter != timecounter) { + th->th_counter = timecounter; + th->th_offset_count = ncount; + } + + /*- + * Recalculate the scaling factor. We want the number of 1/2^64 + * fractions of a second per period of the hardware counter, taking + * into account the th_adjustment factor which the NTP PLL/adjtime(2) + * processing provides us with. + * + * The th_adjustment is nanoseconds per second with 32 bit binary + * fraction and we want 64 bit binary fraction of second: + * + * x = a * 2^32 / 10^9 = a * 4.294967296 + * + * The range of th_adjustment is +/- 5000PPM so inside a 64bit int + * we can only multiply by about 850 without overflowing, that + * leaves no suitably precise fractions for multiply before divide. + * + * Divide before multiply with a fraction of 2199/512 results in a + * systematic undercompensation of 10PPM of th_adjustment. On a + * 5000PPM adjustment this is a 0.05PPM error. This is acceptable. + * + * We happily sacrifice the lowest of the 64 bits of our result + * to the goddess of code clarity. + * + */ + scale = (u_int64_t)1 << 63; + scale += (th->th_adjustment / 1024) * 2199; + scale /= th->th_counter->tc_frequency; + th->th_scale = scale * 2; + + /* + * Now that the struct timehands is again consistent, set the new + * generation number, making sure to not make it zero. + */ + if (++ogen == 0) + ogen = 1; + th->th_generation = ogen; + + /* Go live with the new struct timehands. */ + time_second = th->th_microtime.tv_sec; + time_uptime = th->th_offset.sec; + timehands = th; +} + +/* Report or change the active timecounter hardware. */ +static int +sysctl_kern_timecounter_hardware(SYSCTL_HANDLER_ARGS) +{ + char newname[32]; + struct timecounter *newtc, *tc; + int error; + + tc = timecounter; + strlcpy(newname, tc->tc_name, sizeof(newname)); + + error = sysctl_handle_string(oidp, &newname[0], sizeof(newname), req); + if (error != 0 || req->newptr == NULL || + strcmp(newname, tc->tc_name) == 0) + return (error); + for (newtc = timecounters; newtc != NULL; newtc = newtc->tc_next) { + if (strcmp(newname, newtc->tc_name) != 0) + continue; + + /* Warm up new timecounter. */ + (void)newtc->tc_get_timecount(newtc); + (void)newtc->tc_get_timecount(newtc); + + timecounter = newtc; + return (0); + } + return (EINVAL); +} + +SYSCTL_PROC(_kern_timecounter, OID_AUTO, hardware, CTLTYPE_STRING | CTLFLAG_RW, + 0, 0, sysctl_kern_timecounter_hardware, "A", + "Timecounter hardware selected"); + + +/* Report or change the active timecounter hardware. */ +static int +sysctl_kern_timecounter_choice(SYSCTL_HANDLER_ARGS) +{ + char buf[32], *spc; + struct timecounter *tc; + int error; + + spc = ""; + error = 0; + for (tc = timecounters; error == 0 && tc != NULL; tc = tc->tc_next) { + sprintf(buf, "%s%s(%d)", + spc, tc->tc_name, tc->tc_quality); + error = SYSCTL_OUT(req, buf, strlen(buf)); + spc = " "; + } + return (error); +} + +SYSCTL_PROC(_kern_timecounter, OID_AUTO, choice, CTLTYPE_STRING | CTLFLAG_RD, + 0, 0, sysctl_kern_timecounter_choice, "A", "Timecounter hardware detected"); + +/* + * RFC 2783 PPS-API implementation. + */ + +int +pps_ioctl(u_long cmd, caddr_t data, struct pps_state *pps) +{ + pps_params_t *app; + struct pps_fetch_args *fapi; +#ifdef PPS_SYNC + struct pps_kcbind_args *kapi; +#endif + + KASSERT(pps != NULL, ("NULL pps pointer in pps_ioctl")); + switch (cmd) { + case PPS_IOC_CREATE: + return (0); + case PPS_IOC_DESTROY: + return (0); + case PPS_IOC_SETPARAMS: + app = (pps_params_t *)data; + if (app->mode & ~pps->ppscap) + return (EINVAL); + pps->ppsparam = *app; + return (0); + case PPS_IOC_GETPARAMS: + app = (pps_params_t *)data; + *app = pps->ppsparam; + app->api_version = PPS_API_VERS_1; + return (0); + case PPS_IOC_GETCAP: + *(int*)data = pps->ppscap; + return (0); + case PPS_IOC_FETCH: + fapi = (struct pps_fetch_args *)data; + if (fapi->tsformat && fapi->tsformat != PPS_TSFMT_TSPEC) + return (EINVAL); + if (fapi->timeout.tv_sec || fapi->timeout.tv_nsec) + return (EOPNOTSUPP); + pps->ppsinfo.current_mode = pps->ppsparam.mode; + fapi->pps_info_buf = pps->ppsinfo; + return (0); + case PPS_IOC_KCBIND: +#ifdef PPS_SYNC + kapi = (struct pps_kcbind_args *)data; + /* XXX Only root should be able to do this */ + if (kapi->tsformat && kapi->tsformat != PPS_TSFMT_TSPEC) + return (EINVAL); + if (kapi->kernel_consumer != PPS_KC_HARDPPS) + return (EINVAL); + if (kapi->edge & ~pps->ppscap) + return (EINVAL); + pps->kcmode = kapi->edge; + return (0); +#else + return (EOPNOTSUPP); +#endif + default: + return (ENOIOCTL); + } +} + +void +pps_init(struct pps_state *pps) +{ + pps->ppscap |= PPS_TSFMT_TSPEC; + if (pps->ppscap & PPS_CAPTUREASSERT) + pps->ppscap |= PPS_OFFSETASSERT; + if (pps->ppscap & PPS_CAPTURECLEAR) + pps->ppscap |= PPS_OFFSETCLEAR; +} + +void +pps_capture(struct pps_state *pps) +{ + struct timehands *th; + + KASSERT(pps != NULL, ("NULL pps pointer in pps_capture")); + th = timehands; + pps->capgen = th->th_generation; + pps->capth = th; + pps->capcount = th->th_counter->tc_get_timecount(th->th_counter); + if (pps->capgen != th->th_generation) + pps->capgen = 0; +} + +void +pps_event(struct pps_state *pps, int event) +{ + struct bintime bt; + struct timespec ts, *tsp, *osp; + u_int tcount, *pcount; + int foff, fhard; + pps_seq_t *pseq; + + KASSERT(pps != NULL, ("NULL pps pointer in pps_event")); + /* If the timecounter was wound up underneath us, bail out. */ + if (pps->capgen == 0 || pps->capgen != pps->capth->th_generation) + return; + + /* Things would be easier with arrays. */ + if (event == PPS_CAPTUREASSERT) { + tsp = &pps->ppsinfo.assert_timestamp; + osp = &pps->ppsparam.assert_offset; + foff = pps->ppsparam.mode & PPS_OFFSETASSERT; + fhard = pps->kcmode & PPS_CAPTUREASSERT; + pcount = &pps->ppscount[0]; + pseq = &pps->ppsinfo.assert_sequence; + } else { + tsp = &pps->ppsinfo.clear_timestamp; + osp = &pps->ppsparam.clear_offset; + foff = pps->ppsparam.mode & PPS_OFFSETCLEAR; + fhard = pps->kcmode & PPS_CAPTURECLEAR; + pcount = &pps->ppscount[1]; + pseq = &pps->ppsinfo.clear_sequence; + } + + /* + * If the timecounter changed, we cannot compare the count values, so + * we have to drop the rest of the PPS-stuff until the next event. + */ + if (pps->ppstc != pps->capth->th_counter) { + pps->ppstc = pps->capth->th_counter; + *pcount = pps->capcount; + pps->ppscount[2] = pps->capcount; + return; + } + + /* Convert the count to a timespec. */ + tcount = pps->capcount - pps->capth->th_offset_count; + tcount &= pps->capth->th_counter->tc_counter_mask; + bt = pps->capth->th_offset; + bintime_addx(&bt, pps->capth->th_scale * tcount); + bintime_add(&bt, &boottimebin); + bintime2timespec(&bt, &ts); + + /* If the timecounter was wound up underneath us, bail out. */ + if (pps->capgen != pps->capth->th_generation) + return; + + *pcount = pps->capcount; + (*pseq)++; + *tsp = ts; + + if (foff) { + timespecadd(tsp, osp); + if (tsp->tv_nsec < 0) { + tsp->tv_nsec += 1000000000; + tsp->tv_sec -= 1; + } + } +#ifdef PPS_SYNC + if (fhard) { + u_int64_t scale; + + /* + * Feed the NTP PLL/FLL. + * The FLL wants to know how many (hardware) nanoseconds + * elapsed since the previous event. + */ + tcount = pps->capcount - pps->ppscount[2]; + pps->ppscount[2] = pps->capcount; + tcount &= pps->capth->th_counter->tc_counter_mask; + scale = (u_int64_t)1 << 63; + scale /= pps->capth->th_counter->tc_frequency; + scale *= 2; + bt.sec = 0; + bt.frac = 0; + bintime_addx(&bt, scale * tcount); + bintime2timespec(&bt, &ts); + hardpps(tsp, ts.tv_nsec + 1000000000 * ts.tv_sec); + } +#endif +} + +/* + * Timecounters need to be updated every so often to prevent the hardware + * counter from overflowing. Updating also recalculates the cached values + * used by the get*() family of functions, so their precision depends on + * the update frequency. + */ + +static int tc_tick; +SYSCTL_INT(_kern_timecounter, OID_AUTO, tick, CTLFLAG_RD, &tc_tick, 0, + "Approximate number of hardclock ticks in a millisecond"); + +void +tc_ticktock(void) +{ + static int count; + static time_t last_calib; + + if (++count < tc_tick) + return; + count = 0; + tc_windup(); + if (time_uptime != last_calib && !(time_uptime & 0xf)) { + cpu_tick_calibrate(0); + last_calib = time_uptime; + } +} + +static void +inittimecounter(void *dummy) +{ + u_int p; + + /* + * Set the initial timeout to + * max(1, ). + * People should probably not use the sysctl to set the timeout + * to smaller than its inital value, since that value is the + * smallest reasonable one. If they want better timestamps they + * should use the non-"get"* functions. + */ + if (hz > 1000) + tc_tick = (hz + 500) / 1000; + else + tc_tick = 1; + p = (tc_tick * 1000000) / hz; + printf("Timecounters tick every %d.%03u msec\n", p / 1000, p % 1000); + + /* warm up new timecounter (again) and get rolling. */ + (void)timecounter->tc_get_timecount(timecounter); + (void)timecounter->tc_get_timecount(timecounter); +} + +SYSINIT(timecounter, SI_SUB_CLOCKS, SI_ORDER_SECOND, inittimecounter, NULL); + +/* Cpu tick handling -------------------------------------------------*/ + +static int cpu_tick_variable; +static uint64_t cpu_tick_frequency; + +static uint64_t +tc_cpu_ticks(void) +{ + static uint64_t base; + static unsigned last; + unsigned u; + struct timecounter *tc; + + tc = timehands->th_counter; + u = tc->tc_get_timecount(tc) & tc->tc_counter_mask; + if (u < last) + base += (uint64_t)tc->tc_counter_mask + 1; + last = u; + return (u + base); +} + +/* + * This function gets called every 16 seconds on only one designated + * CPU in the system from hardclock() via tc_ticktock(). + * + * Whenever the real time clock is stepped we get called with reset=1 + * to make sure we handle suspend/resume and similar events correctly. + */ + +static void +cpu_tick_calibrate(int reset) +{ + static uint64_t c_last; + uint64_t c_this, c_delta; + static struct bintime t_last; + struct bintime t_this, t_delta; + uint32_t divi; + + if (reset) { + /* The clock was stepped, abort & reset */ + t_last.sec = 0; + return; + } + + /* we don't calibrate fixed rate cputicks */ + if (!cpu_tick_variable) + return; + + getbinuptime(&t_this); + c_this = cpu_ticks(); + if (t_last.sec != 0) { + c_delta = c_this - c_last; + t_delta = t_this; + bintime_sub(&t_delta, &t_last); + /* + * Validate that 16 +/- 1/256 seconds passed. + * After division by 16 this gives us a precision of + * roughly 250PPM which is sufficient + */ + if (t_delta.sec > 16 || ( + t_delta.sec == 16 && t_delta.frac >= (0x01LL << 56))) { + /* too long */ + if (bootverbose) + printf("t_delta %ju.%016jx too long\n", + (uintmax_t)t_delta.sec, + (uintmax_t)t_delta.frac); + } else if (t_delta.sec < 15 || + (t_delta.sec == 15 && t_delta.frac <= (0xffLL << 56))) { + /* too short */ + if (bootverbose) + printf("t_delta %ju.%016jx too short\n", + (uintmax_t)t_delta.sec, + (uintmax_t)t_delta.frac); + } else { + /* just right */ + /* + * Headroom: + * 2^(64-20) / 16[s] = + * 2^(44) / 16[s] = + * 17.592.186.044.416 / 16 = + * 1.099.511.627.776 [Hz] + */ + divi = t_delta.sec << 20; + divi |= t_delta.frac >> (64 - 20); + c_delta <<= 20; + c_delta /= divi; + if (c_delta > cpu_tick_frequency) { + if (0 && bootverbose) + printf("cpu_tick increased to %ju Hz\n", + c_delta); + cpu_tick_frequency = c_delta; + } + } + } + c_last = c_this; + t_last = t_this; +} + +void +set_cputicker(cpu_tick_f *func, uint64_t freq, unsigned var) +{ + + if (func == NULL) { + cpu_ticks = tc_cpu_ticks; + } else { + cpu_tick_frequency = freq; + cpu_tick_variable = var; + cpu_ticks = func; + } +} + +uint64_t +cpu_tickrate(void) +{ + + if (cpu_ticks == tc_cpu_ticks) + return (tc_getfrequency()); + return (cpu_tick_frequency); +} + +/* + * We need to be slightly careful converting cputicks to microseconds. + * There is plenty of margin in 64 bits of microseconds (half a million + * years) and in 64 bits at 4 GHz (146 years), but if we do a multiply + * before divide conversion (to retain precision) we find that the + * margin shrinks to 1.5 hours (one millionth of 146y). + * With a three prong approach we never lose significant bits, no + * matter what the cputick rate and length of timeinterval is. + */ + +uint64_t +cputick2usec(uint64_t tick) +{ + + if (tick > 18446744073709551LL) /* floor(2^64 / 1000) */ + return (tick / (cpu_tickrate() / 1000000LL)); + else if (tick > 18446744073709LL) /* floor(2^64 / 1000000) */ + return ((tick * 1000LL) / (cpu_tickrate() / 1000LL)); + else + return ((tick * 1000000LL) / cpu_tickrate()); +} + +cpu_tick_f *cpu_ticks = tc_cpu_ticks; diff --git a/freebsd/local/opt_ntp.h b/freebsd/local/opt_ntp.h new file mode 100644 index 00000000..936ffd88 --- /dev/null +++ b/freebsd/local/opt_ntp.h @@ -0,0 +1 @@ +/* EMPTY */ diff --git a/freebsd/sys/timepps.h b/freebsd/sys/timepps.h new file mode 100644 index 00000000..1e5e84fc --- /dev/null +++ b/freebsd/sys/timepps.h @@ -0,0 +1,200 @@ +/*- + * ---------------------------------------------------------------------------- + * "THE BEER-WARE LICENSE" (Revision 42): + * wrote this file. As long as you retain this notice you + * can do whatever you want with this stuff. If we meet some day, and you think + * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp + * ---------------------------------------------------------------------------- + * + * $FreeBSD$ + * + * The is a FreeBSD version of the RFC 2783 API for Pulse Per Second + * timing interfaces. + */ + +#ifndef _SYS_TIMEPPS_HH_ +#define _SYS_TIMEPPS_HH_ + +#include +#include + +#define PPS_API_VERS_1 1 + +typedef int pps_handle_t; + +typedef unsigned pps_seq_t; + +typedef struct ntp_fp { + unsigned int integral; + unsigned int fractional; +} ntp_fp_t; + +typedef union pps_timeu { + struct timespec tspec; + ntp_fp_t ntpfp; + unsigned long longpad[3]; +} pps_timeu_t; + +typedef struct { + pps_seq_t assert_sequence; /* assert event seq # */ + pps_seq_t clear_sequence; /* clear event seq # */ + pps_timeu_t assert_tu; + pps_timeu_t clear_tu; + int current_mode; /* current mode bits */ +} pps_info_t; + +#define assert_timestamp assert_tu.tspec +#define clear_timestamp clear_tu.tspec + +#define assert_timestamp_ntpfp assert_tu.ntpfp +#define clear_timestamp_ntpfp clear_tu.ntpfp + +typedef struct { + int api_version; /* API version # */ + int mode; /* mode bits */ + pps_timeu_t assert_off_tu; + pps_timeu_t clear_off_tu; +} pps_params_t; + +#define assert_offset assert_off_tu.tspec +#define clear_offset clear_off_tu.tspec + +#define assert_offset_ntpfp assert_off_tu.ntpfp +#define clear_offset_ntpfp clear_off_tu.ntpfp + + +#define PPS_CAPTUREASSERT 0x01 +#define PPS_CAPTURECLEAR 0x02 +#define PPS_CAPTUREBOTH 0x03 + +#define PPS_OFFSETASSERT 0x10 +#define PPS_OFFSETCLEAR 0x20 + +#define PPS_ECHOASSERT 0x40 +#define PPS_ECHOCLEAR 0x80 + +#define PPS_CANWAIT 0x100 +#define PPS_CANPOLL 0x200 + +#define PPS_TSFMT_TSPEC 0x1000 +#define PPS_TSFMT_NTPFP 0x2000 + +#define PPS_KC_HARDPPS 0 +#define PPS_KC_HARDPPS_PLL 1 +#define PPS_KC_HARDPPS_FLL 2 + +struct pps_fetch_args { + int tsformat; + pps_info_t pps_info_buf; + struct timespec timeout; +}; + +struct pps_kcbind_args { + int kernel_consumer; + int edge; + int tsformat; +}; + +#define PPS_IOC_CREATE _IO('1', 1) +#define PPS_IOC_DESTROY _IO('1', 2) +#define PPS_IOC_SETPARAMS _IOW('1', 3, pps_params_t) +#define PPS_IOC_GETPARAMS _IOR('1', 4, pps_params_t) +#define PPS_IOC_GETCAP _IOR('1', 5, int) +#define PPS_IOC_FETCH _IOWR('1', 6, struct pps_fetch_args) +#define PPS_IOC_KCBIND _IOW('1', 7, struct pps_kcbind_args) + +#ifdef _KERNEL + +struct pps_state { + /* Capture information. */ + struct timehands *capth; + unsigned capgen; + unsigned capcount; + + /* State information. */ + pps_params_t ppsparam; + pps_info_t ppsinfo; + int kcmode; + int ppscap; + struct timecounter *ppstc; + unsigned ppscount[3]; +}; + +void pps_capture(struct pps_state *pps); +void pps_event(struct pps_state *pps, int event); +void pps_init(struct pps_state *pps); +int pps_ioctl(unsigned long cmd, caddr_t data, struct pps_state *pps); +void hardpps(struct timespec *tsp, long nsec); + +#else /* !_KERNEL */ + +static __inline int +time_pps_create(int filedes, pps_handle_t *handle) +{ + int error; + + *handle = -1; + error = ioctl(filedes, PPS_IOC_CREATE, 0); + if (error < 0) + return (-1); + *handle = filedes; + return (0); +} + +static __inline int +time_pps_destroy(pps_handle_t handle) +{ + return (ioctl(handle, PPS_IOC_DESTROY, 0)); +} + +static __inline int +time_pps_setparams(pps_handle_t handle, const pps_params_t *ppsparams) +{ + return (ioctl(handle, PPS_IOC_SETPARAMS, ppsparams)); +} + +static __inline int +time_pps_getparams(pps_handle_t handle, pps_params_t *ppsparams) +{ + return (ioctl(handle, PPS_IOC_GETPARAMS, ppsparams)); +} + +static __inline int +time_pps_getcap(pps_handle_t handle, int *mode) +{ + return (ioctl(handle, PPS_IOC_GETCAP, mode)); +} + +static __inline int +time_pps_fetch(pps_handle_t handle, const int tsformat, + pps_info_t *ppsinfobuf, const struct timespec *timeout) +{ + int error; + struct pps_fetch_args arg; + + arg.tsformat = tsformat; + if (timeout == NULL) { + arg.timeout.tv_sec = -1; + arg.timeout.tv_nsec = -1; + } else + arg.timeout = *timeout; + error = ioctl(handle, PPS_IOC_FETCH, &arg); + *ppsinfobuf = arg.pps_info_buf; + return (error); +} + +static __inline int +time_pps_kcbind(pps_handle_t handle, const int kernel_consumer, + const int edge, const int tsformat) +{ + struct pps_kcbind_args arg; + + arg.kernel_consumer = kernel_consumer; + arg.edge = edge; + arg.tsformat = tsformat; + return (ioctl(handle, PPS_IOC_KCBIND, &arg)); +} + +#endif /* KERNEL */ + +#endif /* !_SYS_TIMEPPS_HH_ */ diff --git a/freebsd/sys/timetc.h b/freebsd/sys/timetc.h new file mode 100644 index 00000000..6b1c9b91 --- /dev/null +++ b/freebsd/sys/timetc.h @@ -0,0 +1,78 @@ +/*- + * ---------------------------------------------------------------------------- + * "THE BEER-WARE LICENSE" (Revision 42): + * wrote this file. As long as you retain this notice you + * can do whatever you want with this stuff. If we meet some day, and you think + * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp + * ---------------------------------------------------------------------------- + * + * $FreeBSD$ + */ + +#ifndef _SYS_TIMETC_HH_ +#define _SYS_TIMETC_HH_ + +#ifndef _KERNEL +#error "no user-serviceable parts inside" +#endif + +/*- + * `struct timecounter' is the interface between the hardware which implements + * a timecounter and the MI code which uses this to keep track of time. + * + * A timecounter is a binary counter which has two properties: + * * it runs at a fixed, known frequency. + * * it has sufficient bits to not roll over in less than approximately + * max(2 msec, 2/HZ seconds). (The value 2 here is really 1 + delta, + * for some indeterminate value of delta.) + */ + +struct timecounter; +typedef u_int timecounter_get_t(struct timecounter *); +typedef void timecounter_pps_t(struct timecounter *); + +struct timecounter { + timecounter_get_t *tc_get_timecount; + /* + * This function reads the counter. It is not required to + * mask any unimplemented bits out, as long as they are + * constant. + */ + timecounter_pps_t *tc_poll_pps; + /* + * This function is optional. It will be called whenever the + * timecounter is rewound, and is intended to check for PPS + * events. Normal hardware does not need it but timecounters + * which latch PPS in hardware (like sys/pci/xrpu.c) do. + */ + u_int tc_counter_mask; + /* This mask should mask off any unimplemented bits. */ + u_int64_t tc_frequency; + /* Frequency of the counter in Hz. */ + char *tc_name; + /* Name of the timecounter. */ + int tc_quality; + /* + * Used to determine if this timecounter is better than + * another timecounter higher means better. Negative + * means "only use at explicit request". + */ + + void *tc_priv; + /* Pointer to the timecounter's private parts. */ + struct timecounter *tc_next; + /* Pointer to the next timecounter. */ +}; + +extern struct timecounter *timecounter; + +u_int64_t tc_getfrequency(void); +void tc_init(struct timecounter *tc); +void tc_setclock(struct timespec *ts); +void tc_ticktock(void); + +#ifdef SYSCTL_DECL +SYSCTL_DECL(_kern_timecounter); +#endif + +#endif /* !_SYS_TIMETC_HH_ */ diff --git a/freebsd/sys/timex.h b/freebsd/sys/timex.h new file mode 100644 index 00000000..838a0e05 --- /dev/null +++ b/freebsd/sys/timex.h @@ -0,0 +1,238 @@ +/*- + *********************************************************************** + * * + * Copyright (c) David L. Mills 1993-2001 * + * * + * Permission to use, copy, modify, and distribute this software and * + * its documentation for any purpose and without fee is hereby * + * granted, provided that the above copyright notice appears in all * + * copies and that both the copyright notice and this permission * + * notice appear in supporting documentation, and that the name * + * University of Delaware not be used in advertising or publicity * + * pertaining to distribution of the software without specific, * + * written prior permission. The University of Delaware makes no * + * representations about the suitability this software for any * + * purpose. It is provided "as is" without express or implied * + * warranty. * + * * + **********************************************************************/ + +/* + * Modification history timex.h + * + * 16 Aug 00 David L. Mills + * API Version 4. Added MOD_TAI and tai member of ntptimeval + * structure. + * + * 17 Nov 98 David L. Mills + * Revised for nanosecond kernel and user interface. + * + * 26 Sep 94 David L. Mills + * Added defines for hybrid phase/frequency-lock loop. + * + * 19 Mar 94 David L. Mills + * Moved defines from kernel routines to header file and added new + * defines for PPS phase-lock loop. + * + * 20 Feb 94 David L. Mills + * Revised status codes and structures for external clock and PPS + * signal discipline. + * + * 28 Nov 93 David L. Mills + * Adjusted parameters to improve stability and increase poll + * interval. + * + * 17 Sep 93 David L. Mills + * Created file + * + * $FreeBSD$ + */ +/* + * This header file defines the Network Time Protocol (NTP) interfaces + * for user and daemon application programs. These are implemented using + * defined syscalls and data structures and require specific kernel + * support. + * + * The original precision time kernels developed from 1993 have an + * ultimate resolution of one microsecond; however, the most recent + * kernels have an ultimate resolution of one nanosecond. In these + * kernels, a ntp_adjtime() syscalls can be used to determine which + * resolution is in use and to select either one at any time. The + * resolution selected affects the scaling of certain fields in the + * ntp_gettime() and ntp_adjtime() syscalls, as described below. + * + * NAME + * ntp_gettime - NTP user application interface + * + * SYNOPSIS + * #include + * + * int ntp_gettime(struct ntptimeval *ntv); + * + * DESCRIPTION + * The time returned by ntp_gettime() is in a timespec structure, + * but may be in either microsecond (seconds and microseconds) or + * nanosecond (seconds and nanoseconds) format. The particular + * format in use is determined by the STA_NANO bit of the status + * word returned by the ntp_adjtime() syscall. + * + * NAME + * ntp_adjtime - NTP daemon application interface + * + * SYNOPSIS + * #include + * #include + * + * int syscall(SYS_ntp_adjtime, tptr); + * int SYS_ntp_adjtime; + * struct timex *tptr; + * + * DESCRIPTION + * Certain fields of the timex structure are interpreted in either + * microseconds or nanoseconds according to the state of the + * STA_NANO bit in the status word. See the description below for + * further information. + */ +#ifndef _SYS_TIMEX_HH_ +#define _SYS_TIMEX_HH_ 1 +#define NTP_API 4 /* NTP API version */ + +#ifndef __rtems__ +#ifndef MSDOS /* Microsoft specific */ +#include +#endif /* MSDOS */ +#endif + +/* + * The following defines establish the performance envelope of the + * kernel discipline loop. Phase or frequency errors greater than + * NAXPHASE or MAXFREQ are clamped to these maxima. For update intervals + * less than MINSEC, the loop always operates in PLL mode; while, for + * update intervals greater than MAXSEC, the loop always operates in FLL + * mode. Between these two limits the operating mode is selected by the + * STA_FLL bit in the status word. + */ +#define MAXPHASE 500000000L /* max phase error (ns) */ +#define MAXFREQ 500000L /* max freq error (ns/s) */ +#define MINSEC 256 /* min FLL update interval (s) */ +#define MAXSEC 2048 /* max PLL update interval (s) */ +#define NANOSECOND 1000000000L /* nanoseconds in one second */ +#define SCALE_PPM (65536 / 1000) /* crude ns/s to scaled PPM */ +#define MAXTC 10 /* max time constant */ + +/* + * The following defines and structures define the user interface for + * the ntp_gettime() and ntp_adjtime() syscalls. + * + * Control mode codes (timex.modes) + */ +#define MOD_OFFSET 0x0001 /* set time offset */ +#define MOD_FREQUENCY 0x0002 /* set frequency offset */ +#define MOD_MAXERROR 0x0004 /* set maximum time error */ +#define MOD_ESTERROR 0x0008 /* set estimated time error */ +#define MOD_STATUS 0x0010 /* set clock status bits */ +#define MOD_TIMECONST 0x0020 /* set PLL time constant */ +#define MOD_PPSMAX 0x0040 /* set PPS maximum averaging time */ +#define MOD_TAI 0x0080 /* set TAI offset */ +#define MOD_MICRO 0x1000 /* select microsecond resolution */ +#define MOD_NANO 0x2000 /* select nanosecond resolution */ +#define MOD_CLKB 0x4000 /* select clock B */ +#define MOD_CLKA 0x8000 /* select clock A */ + +/* + * Status codes (timex.status) + */ +#define STA_PLL 0x0001 /* enable PLL updates (rw) */ +#define STA_PPSFREQ 0x0002 /* enable PPS freq discipline (rw) */ +#define STA_PPSTIME 0x0004 /* enable PPS time discipline (rw) */ +#define STA_FLL 0x0008 /* enable FLL mode (rw) */ +#define STA_INS 0x0010 /* insert leap (rw) */ +#define STA_DEL 0x0020 /* delete leap (rw) */ +#define STA_UNSYNC 0x0040 /* clock unsynchronized (rw) */ +#define STA_FREQHOLD 0x0080 /* hold frequency (rw) */ +#define STA_PPSSIGNAL 0x0100 /* PPS signal present (ro) */ +#define STA_PPSJITTER 0x0200 /* PPS signal jitter exceeded (ro) */ +#define STA_PPSWANDER 0x0400 /* PPS signal wander exceeded (ro) */ +#define STA_PPSERROR 0x0800 /* PPS signal calibration error (ro) */ +#define STA_CLOCKERR 0x1000 /* clock hardware fault (ro) */ +#define STA_NANO 0x2000 /* resolution (0 = us, 1 = ns) (ro) */ +#define STA_MODE 0x4000 /* mode (0 = PLL, 1 = FLL) (ro) */ +#define STA_CLK 0x8000 /* clock source (0 = A, 1 = B) (ro) */ + +#define STA_RONLY (STA_PPSSIGNAL | STA_PPSJITTER | STA_PPSWANDER | \ + STA_PPSERROR | STA_CLOCKERR | STA_NANO | STA_MODE | STA_CLK) + +/* + * Clock states (time_state) + */ +#define TIME_OK 0 /* no leap second warning */ +#define TIME_INS 1 /* insert leap second warning */ +#define TIME_DEL 2 /* delete leap second warning */ +#define TIME_OOP 3 /* leap second in progress */ +#define TIME_WAIT 4 /* leap second has occured */ +#define TIME_ERROR 5 /* error (see status word) */ + +/* + * NTP user interface (ntp_gettime()) - used to read kernel clock values + * + * Note: The time member is in microseconds if STA_NANO is zero and + * nanoseconds if not. + */ +struct ntptimeval { + struct timespec time; /* current time (ns) (ro) */ + long maxerror; /* maximum error (us) (ro) */ + long esterror; /* estimated error (us) (ro) */ + long tai; /* TAI offset */ + int time_state; /* time status */ +}; + +/* + * NTP daemon interface (ntp_adjtime()) - used to discipline CPU clock + * oscillator and determine status. + * + * Note: The offset, precision and jitter members are in microseconds if + * STA_NANO is zero and nanoseconds if not. + */ +struct timex { + unsigned int modes; /* clock mode bits (wo) */ + long offset; /* time offset (ns/us) (rw) */ + long freq; /* frequency offset (scaled PPM) (rw) */ + long maxerror; /* maximum error (us) (rw) */ + long esterror; /* estimated error (us) (rw) */ + int status; /* clock status bits (rw) */ + long constant; /* poll interval (log2 s) (rw) */ + long precision; /* clock precision (ns/us) (ro) */ + long tolerance; /* clock frequency tolerance (scaled + * PPM) (ro) */ + /* + * The following read-only structure members are implemented + * only if the PPS signal discipline is configured in the + * kernel. They are included in all configurations to insure + * portability. + */ + long ppsfreq; /* PPS frequency (scaled PPM) (ro) */ + long jitter; /* PPS jitter (ns/us) (ro) */ + int shift; /* interval duration (s) (shift) (ro) */ + long stabil; /* PPS stability (scaled PPM) (ro) */ + long jitcnt; /* jitter limit exceeded (ro) */ + long calcnt; /* calibration intervals (ro) */ + long errcnt; /* calibration errors (ro) */ + long stbcnt; /* stability limit exceeded (ro) */ +}; + +#ifdef __FreeBSD__ + +#ifdef _KERNEL +void ntp_update_second(int64_t *adjustment, time_t *newsec); +#else /* !_KERNEL */ +#include + +__BEGIN_DECLS +int ntp_adjtime(struct timex *); +int ntp_gettime(struct ntptimeval *); +__END_DECLS +#endif /* _KERNEL */ + +#endif /* __FreeBSD__ */ + +#endif /* !_SYS_TIMEX_HH_ */