Source code pulled from OpenBSD for OpenNTPD. The place to contribute to this code is via the OpenBSD CVS tree.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

818 lines
19 KiB

19 years ago
18 years ago
19 years ago
19 years ago
  1. /* $OpenBSD: ntp.c,v 1.157 2019/06/27 15:18:42 otto Exp $ */
  2. /*
  3. * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
  4. * Copyright (c) 2004 Alexander Guy <alexander.guy@andern.org>
  5. *
  6. * Permission to use, copy, modify, and distribute this software for any
  7. * purpose with or without fee is hereby granted, provided that the above
  8. * copyright notice and this permission notice appear in all copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  11. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  12. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  13. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  14. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  15. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  16. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <sys/types.h>
  19. #include <sys/time.h>
  20. #include <sys/stat.h>
  21. #include <errno.h>
  22. #include <fcntl.h>
  23. #include <paths.h>
  24. #include <poll.h>
  25. #include <pwd.h>
  26. #include <signal.h>
  27. #include <stdlib.h>
  28. #include <string.h>
  29. #include <syslog.h>
  30. #include <time.h>
  31. #include <unistd.h>
  32. #include <err.h>
  33. #include "ntpd.h"
  34. #define PFD_PIPE_MAIN 0
  35. #define PFD_PIPE_DNS 1
  36. #define PFD_SOCK_CTL 2
  37. #define PFD_MAX 3
  38. volatile sig_atomic_t ntp_quit = 0;
  39. struct imsgbuf *ibuf_main;
  40. struct imsgbuf *ibuf_dns;
  41. struct ntpd_conf *conf;
  42. struct ctl_conns ctl_conns;
  43. u_int peer_cnt;
  44. u_int sensors_cnt;
  45. extern u_int constraint_cnt;
  46. void ntp_sighdlr(int);
  47. int ntp_dispatch_imsg(void);
  48. int ntp_dispatch_imsg_dns(void);
  49. void peer_add(struct ntp_peer *);
  50. void peer_remove(struct ntp_peer *);
  51. void
  52. ntp_sighdlr(int sig)
  53. {
  54. switch (sig) {
  55. case SIGINT:
  56. case SIGTERM:
  57. ntp_quit = 1;
  58. break;
  59. }
  60. }
  61. void
  62. ntp_main(struct ntpd_conf *nconf, struct passwd *pw, int argc, char **argv)
  63. {
  64. int a, b, nfds, i, j, idx_peers, timeout;
  65. int nullfd, pipe_dns[2], idx_clients;
  66. int ctls;
  67. int fd_ctl;
  68. u_int pfd_elms = 0, idx2peer_elms = 0;
  69. u_int listener_cnt, new_cnt, sent_cnt, trial_cnt;
  70. u_int ctl_cnt;
  71. struct pollfd *pfd = NULL;
  72. struct servent *se;
  73. struct listen_addr *la;
  74. struct ntp_peer *p;
  75. struct ntp_peer **idx2peer = NULL;
  76. struct ntp_sensor *s, *next_s;
  77. struct constraint *cstr;
  78. struct timespec tp;
  79. struct stat stb;
  80. struct ctl_conn *cc;
  81. time_t nextaction, last_sensor_scan = 0, now;
  82. void *newp;
  83. if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, PF_UNSPEC,
  84. pipe_dns) == -1)
  85. fatal("socketpair");
  86. start_child(NTPDNS_PROC_NAME, pipe_dns[1], argc, argv);
  87. log_init(nconf->debug ? LOG_TO_STDERR : LOG_TO_SYSLOG, nconf->verbose,
  88. LOG_DAEMON);
  89. if (!nconf->debug && setsid() == -1)
  90. fatal("setsid");
  91. log_procinit("ntp");
  92. if ((se = getservbyname("ntp", "udp")) == NULL)
  93. fatal("getservbyname");
  94. /* Start control socket. */
  95. if ((fd_ctl = control_init(CTLSOCKET)) == -1)
  96. fatalx("control socket init failed");
  97. if (control_listen(fd_ctl) == -1)
  98. fatalx("control socket listen failed");
  99. if ((nullfd = open("/dev/null", O_RDWR, 0)) == -1)
  100. fatal(NULL);
  101. if (stat(pw->pw_dir, &stb) == -1) {
  102. fatal("privsep dir %s could not be opened", pw->pw_dir);
  103. }
  104. if (stb.st_uid != 0 || (stb.st_mode & (S_IWGRP|S_IWOTH)) != 0) {
  105. fatalx("bad privsep dir %s permissions: %o",
  106. pw->pw_dir, stb.st_mode);
  107. }
  108. if (chroot(pw->pw_dir) == -1)
  109. fatal("chroot");
  110. if (chdir("/") == -1)
  111. fatal("chdir(\"/\")");
  112. if (!nconf->debug) {
  113. dup2(nullfd, STDIN_FILENO);
  114. dup2(nullfd, STDOUT_FILENO);
  115. dup2(nullfd, STDERR_FILENO);
  116. }
  117. close(nullfd);
  118. setproctitle("ntp engine");
  119. conf = nconf;
  120. setup_listeners(se, conf, &listener_cnt);
  121. if (setgroups(1, &pw->pw_gid) ||
  122. setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
  123. setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
  124. fatal("can't drop privileges");
  125. endservent();
  126. /* The ntp process will want to open NTP client sockets -> "inet" */
  127. if (pledge("stdio inet", NULL) == -1)
  128. err(1, "pledge");
  129. signal(SIGTERM, ntp_sighdlr);
  130. signal(SIGINT, ntp_sighdlr);
  131. signal(SIGPIPE, SIG_IGN);
  132. signal(SIGHUP, SIG_IGN);
  133. signal(SIGCHLD, SIG_DFL);
  134. if ((ibuf_main = malloc(sizeof(struct imsgbuf))) == NULL)
  135. fatal(NULL);
  136. imsg_init(ibuf_main, PARENT_SOCK_FILENO);
  137. if ((ibuf_dns = malloc(sizeof(struct imsgbuf))) == NULL)
  138. fatal(NULL);
  139. imsg_init(ibuf_dns, pipe_dns[0]);
  140. constraint_cnt = 0;
  141. conf->constraint_median = 0;
  142. conf->constraint_last = getmonotime();
  143. TAILQ_FOREACH(cstr, &conf->constraints, entry)
  144. constraint_cnt += constraint_init(cstr);
  145. TAILQ_FOREACH(p, &conf->ntp_peers, entry)
  146. client_peer_init(p);
  147. memset(&conf->status, 0, sizeof(conf->status));
  148. conf->freq.num = 0;
  149. conf->freq.samples = 0;
  150. conf->freq.x = 0.0;
  151. conf->freq.xx = 0.0;
  152. conf->freq.xy = 0.0;
  153. conf->freq.y = 0.0;
  154. conf->freq.overall_offset = 0.0;
  155. conf->status.synced = 0;
  156. clock_getres(CLOCK_REALTIME, &tp);
  157. b = 1000000000 / tp.tv_nsec; /* convert to Hz */
  158. for (a = 0; b > 1; a--, b >>= 1)
  159. ;
  160. conf->status.precision = a;
  161. conf->scale = 1;
  162. TAILQ_INIT(&ctl_conns);
  163. sensor_init();
  164. log_info("ntp engine ready");
  165. ctl_cnt = 0;
  166. peer_cnt = 0;
  167. TAILQ_FOREACH(p, &conf->ntp_peers, entry)
  168. peer_cnt++;
  169. while (ntp_quit == 0) {
  170. if (peer_cnt > idx2peer_elms) {
  171. if ((newp = reallocarray(idx2peer, peer_cnt,
  172. sizeof(*idx2peer))) == NULL) {
  173. /* panic for now */
  174. log_warn("could not resize idx2peer from %u -> "
  175. "%u entries", idx2peer_elms, peer_cnt);
  176. fatalx("exiting");
  177. }
  178. idx2peer = newp;
  179. idx2peer_elms = peer_cnt;
  180. }
  181. new_cnt = PFD_MAX +
  182. peer_cnt + listener_cnt + ctl_cnt;
  183. if (new_cnt > pfd_elms) {
  184. if ((newp = reallocarray(pfd, new_cnt,
  185. sizeof(*pfd))) == NULL) {
  186. /* panic for now */
  187. log_warn("could not resize pfd from %u -> "
  188. "%u entries", pfd_elms, new_cnt);
  189. fatalx("exiting");
  190. }
  191. pfd = newp;
  192. pfd_elms = new_cnt;
  193. }
  194. memset(pfd, 0, sizeof(*pfd) * pfd_elms);
  195. memset(idx2peer, 0, sizeof(*idx2peer) * idx2peer_elms);
  196. nextaction = getmonotime() + 3600;
  197. pfd[PFD_PIPE_MAIN].fd = ibuf_main->fd;
  198. pfd[PFD_PIPE_MAIN].events = POLLIN;
  199. pfd[PFD_PIPE_DNS].fd = ibuf_dns->fd;
  200. pfd[PFD_PIPE_DNS].events = POLLIN;
  201. pfd[PFD_SOCK_CTL].fd = fd_ctl;
  202. pfd[PFD_SOCK_CTL].events = POLLIN;
  203. i = PFD_MAX;
  204. TAILQ_FOREACH(la, &conf->listen_addrs, entry) {
  205. pfd[i].fd = la->fd;
  206. pfd[i].events = POLLIN;
  207. i++;
  208. }
  209. idx_peers = i;
  210. sent_cnt = trial_cnt = 0;
  211. TAILQ_FOREACH(p, &conf->ntp_peers, entry) {
  212. if (constraint_cnt && conf->constraint_median == 0)
  213. continue;
  214. if (p->next > 0 && p->next <= getmonotime()) {
  215. if (p->state > STATE_DNS_INPROGRESS)
  216. trial_cnt++;
  217. if (client_query(p) == 0)
  218. sent_cnt++;
  219. }
  220. if (p->deadline > 0 && p->deadline <= getmonotime()) {
  221. timeout = 300;
  222. log_debug("no reply from %s received in time, "
  223. "next query %ds", log_sockaddr(
  224. (struct sockaddr *)&p->addr->ss), timeout);
  225. if (p->trustlevel >= TRUSTLEVEL_BADPEER &&
  226. (p->trustlevel /= 2) < TRUSTLEVEL_BADPEER)
  227. log_info("peer %s now invalid",
  228. log_sockaddr(
  229. (struct sockaddr *)&p->addr->ss));
  230. client_nextaddr(p);
  231. set_next(p, timeout);
  232. }
  233. if (p->senderrors > MAX_SEND_ERRORS) {
  234. log_debug("failed to send query to %s, "
  235. "next query %ds", log_sockaddr(
  236. (struct sockaddr *)&p->addr->ss),
  237. INTERVAL_QUERY_PATHETIC);
  238. p->senderrors = 0;
  239. client_nextaddr(p);
  240. set_next(p, INTERVAL_QUERY_PATHETIC);
  241. }
  242. if (p->next > 0 && p->next < nextaction)
  243. nextaction = p->next;
  244. if (p->deadline > 0 && p->deadline < nextaction)
  245. nextaction = p->deadline;
  246. if (p->state == STATE_QUERY_SENT &&
  247. p->query->fd != -1) {
  248. pfd[i].fd = p->query->fd;
  249. pfd[i].events = POLLIN;
  250. idx2peer[i - idx_peers] = p;
  251. i++;
  252. }
  253. }
  254. idx_clients = i;
  255. if (!TAILQ_EMPTY(&conf->ntp_conf_sensors)) {
  256. if (last_sensor_scan == 0 ||
  257. last_sensor_scan + SENSOR_SCAN_INTERVAL <= getmonotime()) {
  258. sensors_cnt = sensor_scan();
  259. last_sensor_scan = getmonotime();
  260. }
  261. if (sensors_cnt == 0 &&
  262. nextaction > last_sensor_scan + SENSOR_SCAN_INTERVAL)
  263. nextaction = last_sensor_scan + SENSOR_SCAN_INTERVAL;
  264. sensors_cnt = 0;
  265. TAILQ_FOREACH(s, &conf->ntp_sensors, entry) {
  266. if (conf->settime && s->offsets[0].offset)
  267. priv_settime(s->offsets[0].offset, NULL);
  268. sensors_cnt++;
  269. if (s->next > 0 && s->next < nextaction)
  270. nextaction = s->next;
  271. }
  272. }
  273. if (conf->settime &&
  274. ((trial_cnt > 0 && sent_cnt == 0) ||
  275. (peer_cnt == 0 && sensors_cnt == 0)))
  276. priv_settime(0, "no valid peers configured");
  277. TAILQ_FOREACH(cstr, &conf->constraints, entry) {
  278. if (constraint_query(cstr) == -1)
  279. continue;
  280. }
  281. if (ibuf_main->w.queued > 0)
  282. pfd[PFD_PIPE_MAIN].events |= POLLOUT;
  283. if (ibuf_dns->w.queued > 0)
  284. pfd[PFD_PIPE_DNS].events |= POLLOUT;
  285. TAILQ_FOREACH(cc, &ctl_conns, entry) {
  286. pfd[i].fd = cc->ibuf.fd;
  287. pfd[i].events = POLLIN;
  288. if (cc->ibuf.w.queued > 0)
  289. pfd[i].events |= POLLOUT;
  290. i++;
  291. }
  292. ctls = i;
  293. now = getmonotime();
  294. timeout = nextaction - now;
  295. if (timeout < 0)
  296. timeout = 0;
  297. if ((nfds = poll(pfd, i, timeout * 1000)) == -1)
  298. if (errno != EINTR) {
  299. log_warn("poll error");
  300. ntp_quit = 1;
  301. }
  302. if (nfds > 0 && (pfd[PFD_PIPE_MAIN].revents & POLLOUT))
  303. if (msgbuf_write(&ibuf_main->w) <= 0 &&
  304. errno != EAGAIN) {
  305. log_warn("pipe write error (to parent)");
  306. ntp_quit = 1;
  307. }
  308. if (nfds > 0 && pfd[PFD_PIPE_MAIN].revents & (POLLIN|POLLERR)) {
  309. nfds--;
  310. if (ntp_dispatch_imsg() == -1) {
  311. log_warn("pipe write error (from main)");
  312. ntp_quit = 1;
  313. }
  314. }
  315. if (nfds > 0 && (pfd[PFD_PIPE_DNS].revents & POLLOUT))
  316. if (msgbuf_write(&ibuf_dns->w) <= 0 &&
  317. errno != EAGAIN) {
  318. log_warn("pipe write error (to dns engine)");
  319. ntp_quit = 1;
  320. }
  321. if (nfds > 0 && pfd[PFD_PIPE_DNS].revents & (POLLIN|POLLERR)) {
  322. nfds--;
  323. if (ntp_dispatch_imsg_dns() == -1) {
  324. log_warn("pipe write error (from dns engine)");
  325. ntp_quit = 1;
  326. }
  327. }
  328. if (nfds > 0 && pfd[PFD_SOCK_CTL].revents & (POLLIN|POLLERR)) {
  329. nfds--;
  330. ctl_cnt += control_accept(fd_ctl);
  331. }
  332. for (j = PFD_MAX; nfds > 0 && j < idx_peers; j++)
  333. if (pfd[j].revents & (POLLIN|POLLERR)) {
  334. nfds--;
  335. if (server_dispatch(pfd[j].fd, conf) == -1) {
  336. log_warn("pipe write error (conf)");
  337. ntp_quit = 1;
  338. }
  339. }
  340. for (; nfds > 0 && j < idx_clients; j++) {
  341. if (pfd[j].revents & (POLLIN|POLLERR)) {
  342. nfds--;
  343. if (client_dispatch(idx2peer[j - idx_peers],
  344. conf->settime, conf->automatic) == -1) {
  345. log_warn("pipe write error (settime)");
  346. ntp_quit = 1;
  347. }
  348. }
  349. }
  350. for (; nfds > 0 && j < ctls; j++) {
  351. nfds -= control_dispatch_msg(&pfd[j], &ctl_cnt);
  352. }
  353. for (s = TAILQ_FIRST(&conf->ntp_sensors); s != NULL;
  354. s = next_s) {
  355. next_s = TAILQ_NEXT(s, entry);
  356. if (s->next <= getmonotime())
  357. sensor_query(s);
  358. }
  359. }
  360. msgbuf_write(&ibuf_main->w);
  361. msgbuf_clear(&ibuf_main->w);
  362. free(ibuf_main);
  363. msgbuf_write(&ibuf_dns->w);
  364. msgbuf_clear(&ibuf_dns->w);
  365. free(ibuf_dns);
  366. log_info("ntp engine exiting");
  367. exit(0);
  368. }
  369. int
  370. ntp_dispatch_imsg(void)
  371. {
  372. struct imsg imsg;
  373. int n;
  374. if (((n = imsg_read(ibuf_main)) == -1 && errno != EAGAIN) || n == 0)
  375. return (-1);
  376. for (;;) {
  377. if ((n = imsg_get(ibuf_main, &imsg)) == -1)
  378. return (-1);
  379. if (n == 0)
  380. break;
  381. switch (imsg.hdr.type) {
  382. case IMSG_ADJTIME:
  383. memcpy(&n, imsg.data, sizeof(n));
  384. if (n == 1 && !conf->status.synced) {
  385. log_info("clock is now synced");
  386. conf->status.synced = 1;
  387. priv_dns(IMSG_SYNCED, NULL, 0);
  388. } else if (n == 0 && conf->status.synced) {
  389. log_info("clock is now unsynced");
  390. conf->status.synced = 0;
  391. priv_dns(IMSG_UNSYNCED, NULL, 0);
  392. }
  393. break;
  394. case IMSG_CONSTRAINT_RESULT:
  395. constraint_msg_result(imsg.hdr.peerid,
  396. imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
  397. break;
  398. case IMSG_CONSTRAINT_CLOSE:
  399. constraint_msg_close(imsg.hdr.peerid,
  400. imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
  401. break;
  402. default:
  403. break;
  404. }
  405. imsg_free(&imsg);
  406. }
  407. return (0);
  408. }
  409. int
  410. ntp_dispatch_imsg_dns(void)
  411. {
  412. struct imsg imsg;
  413. struct ntp_peer *peer, *npeer, *tmp;
  414. u_int16_t dlen;
  415. u_char *p;
  416. struct ntp_addr *h;
  417. int n;
  418. if (((n = imsg_read(ibuf_dns)) == -1 && errno != EAGAIN) || n == 0)
  419. return (-1);
  420. for (;;) {
  421. if ((n = imsg_get(ibuf_dns, &imsg)) == -1)
  422. return (-1);
  423. if (n == 0)
  424. break;
  425. switch (imsg.hdr.type) {
  426. case IMSG_HOST_DNS:
  427. TAILQ_FOREACH(peer, &conf->ntp_peers, entry)
  428. if (peer->id == imsg.hdr.peerid)
  429. break;
  430. if (peer == NULL) {
  431. log_warnx("IMSG_HOST_DNS with invalid peerID");
  432. break;
  433. }
  434. if (peer->addr != NULL) {
  435. log_warnx("IMSG_HOST_DNS but addr != NULL!");
  436. break;
  437. }
  438. /*
  439. * For the redo dns case we want to have only one clone
  440. * of the pool peer, since it wil be cloned again
  441. */
  442. if (peer->addr_head.pool) {
  443. TAILQ_FOREACH_SAFE(npeer, &conf->ntp_peers,
  444. entry, tmp) {
  445. if (npeer->id == peer->id)
  446. continue;
  447. if (strcmp(npeer->addr_head.name,
  448. peer->addr_head.name) == 0)
  449. peer_remove(npeer);
  450. }
  451. }
  452. dlen = imsg.hdr.len - IMSG_HEADER_SIZE;
  453. if (dlen == 0) { /* no data -> temp error */
  454. log_warnx("DNS lookup tempfail");
  455. peer->state = STATE_DNS_TEMPFAIL;
  456. if (++conf->tmpfail > TRIES_AUTO_DNSFAIL)
  457. priv_settime(0, "of dns failures");
  458. break;
  459. }
  460. p = (u_char *)imsg.data;
  461. while (dlen >= sizeof(struct sockaddr_storage) +
  462. sizeof(int)) {
  463. if ((h = calloc(1, sizeof(struct ntp_addr))) ==
  464. NULL)
  465. fatal(NULL);
  466. memcpy(&h->ss, p, sizeof(h->ss));
  467. p += sizeof(h->ss);
  468. dlen -= sizeof(h->ss);
  469. memcpy(&h->notauth, p, sizeof(int));
  470. p += sizeof(int);
  471. dlen -= sizeof(int);
  472. if (peer->addr_head.pool) {
  473. npeer = new_peer();
  474. npeer->weight = peer->weight;
  475. npeer->query_addr4 = peer->query_addr4;
  476. npeer->query_addr6 = peer->query_addr6;
  477. h->next = NULL;
  478. npeer->addr = h;
  479. npeer->addr_head.a = h;
  480. npeer->addr_head.name =
  481. peer->addr_head.name;
  482. npeer->addr_head.pool = 1;
  483. client_peer_init(npeer);
  484. npeer->state = STATE_DNS_DONE;
  485. peer_add(npeer);
  486. } else {
  487. h->next = peer->addr;
  488. peer->addr = h;
  489. peer->addr_head.a = peer->addr;
  490. peer->state = STATE_DNS_DONE;
  491. }
  492. }
  493. if (dlen != 0)
  494. fatalx("IMSG_HOST_DNS: dlen != 0");
  495. if (peer->addr_head.pool)
  496. peer_remove(peer);
  497. else
  498. client_addr_init(peer);
  499. break;
  500. case IMSG_CONSTRAINT_DNS:
  501. constraint_msg_dns(imsg.hdr.peerid,
  502. imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
  503. break;
  504. case IMSG_PROBE_ROOT:
  505. dlen = imsg.hdr.len - IMSG_HEADER_SIZE;
  506. if (dlen != sizeof(int))
  507. fatalx("IMSG_PROBE_ROOT");
  508. memcpy(&n, imsg.data, sizeof(int));
  509. if (n < 0)
  510. priv_settime(0, "dns probe failed");
  511. break;
  512. default:
  513. break;
  514. }
  515. imsg_free(&imsg);
  516. }
  517. return (0);
  518. }
  519. void
  520. peer_add(struct ntp_peer *p)
  521. {
  522. TAILQ_INSERT_TAIL(&conf->ntp_peers, p, entry);
  523. peer_cnt++;
  524. }
  525. void
  526. peer_remove(struct ntp_peer *p)
  527. {
  528. TAILQ_REMOVE(&conf->ntp_peers, p, entry);
  529. free(p);
  530. peer_cnt--;
  531. }
  532. void
  533. peer_addr_head_clear(struct ntp_peer *p)
  534. {
  535. struct ntp_addr *a = p->addr_head.a;
  536. while (a) {
  537. struct ntp_addr *next = a->next;
  538. free(a);
  539. a = next;
  540. }
  541. p->addr_head.a = NULL;
  542. p->addr = NULL;
  543. }
  544. static void
  545. priv_adjfreq(double offset)
  546. {
  547. double curtime, freq;
  548. if (!conf->status.synced){
  549. conf->freq.samples = 0;
  550. return;
  551. }
  552. conf->freq.samples++;
  553. if (conf->freq.samples <= 0)
  554. return;
  555. conf->freq.overall_offset += offset;
  556. offset = conf->freq.overall_offset;
  557. curtime = gettime_corrected();
  558. conf->freq.xy += offset * curtime;
  559. conf->freq.x += curtime;
  560. conf->freq.y += offset;
  561. conf->freq.xx += curtime * curtime;
  562. if (conf->freq.samples % FREQUENCY_SAMPLES != 0)
  563. return;
  564. freq =
  565. (conf->freq.xy - conf->freq.x * conf->freq.y / conf->freq.samples)
  566. /
  567. (conf->freq.xx - conf->freq.x * conf->freq.x / conf->freq.samples);
  568. if (freq > MAX_FREQUENCY_ADJUST)
  569. freq = MAX_FREQUENCY_ADJUST;
  570. else if (freq < -MAX_FREQUENCY_ADJUST)
  571. freq = -MAX_FREQUENCY_ADJUST;
  572. imsg_compose(ibuf_main, IMSG_ADJFREQ, 0, 0, -1, &freq, sizeof(freq));
  573. conf->filters |= FILTER_ADJFREQ;
  574. conf->freq.xy = 0.0;
  575. conf->freq.x = 0.0;
  576. conf->freq.y = 0.0;
  577. conf->freq.xx = 0.0;
  578. conf->freq.samples = 0;
  579. conf->freq.overall_offset = 0.0;
  580. conf->freq.num++;
  581. }
  582. int
  583. priv_adjtime(void)
  584. {
  585. struct ntp_peer *p;
  586. struct ntp_sensor *s;
  587. int offset_cnt = 0, i = 0, j;
  588. struct ntp_offset **offsets;
  589. double offset_median;
  590. TAILQ_FOREACH(p, &conf->ntp_peers, entry) {
  591. if (p->trustlevel < TRUSTLEVEL_BADPEER)
  592. continue;
  593. if (!p->update.good)
  594. return (1);
  595. offset_cnt += p->weight;
  596. }
  597. TAILQ_FOREACH(s, &conf->ntp_sensors, entry) {
  598. if (!s->update.good)
  599. continue;
  600. offset_cnt += s->weight;
  601. }
  602. if (offset_cnt == 0)
  603. return (1);
  604. if ((offsets = calloc(offset_cnt, sizeof(struct ntp_offset *))) == NULL)
  605. fatal("calloc priv_adjtime");
  606. TAILQ_FOREACH(p, &conf->ntp_peers, entry) {
  607. if (p->trustlevel < TRUSTLEVEL_BADPEER)
  608. continue;
  609. for (j = 0; j < p->weight; j++)
  610. offsets[i++] = &p->update;
  611. }
  612. TAILQ_FOREACH(s, &conf->ntp_sensors, entry) {
  613. if (!s->update.good)
  614. continue;
  615. for (j = 0; j < s->weight; j++)
  616. offsets[i++] = &s->update;
  617. }
  618. qsort(offsets, offset_cnt, sizeof(struct ntp_offset *), offset_compare);
  619. i = offset_cnt / 2;
  620. if (offset_cnt % 2 == 0)
  621. if (offsets[i - 1]->delay < offsets[i]->delay)
  622. i -= 1;
  623. offset_median = offsets[i]->offset;
  624. conf->status.rootdelay = offsets[i]->delay;
  625. conf->status.stratum = offsets[i]->status.stratum;
  626. conf->status.leap = offsets[i]->status.leap;
  627. imsg_compose(ibuf_main, IMSG_ADJTIME, 0, 0, -1,
  628. &offset_median, sizeof(offset_median));
  629. priv_adjfreq(offset_median);
  630. conf->status.reftime = gettime();
  631. conf->status.stratum++; /* one more than selected peer */
  632. if (conf->status.stratum > NTP_MAXSTRATUM)
  633. conf->status.stratum = NTP_MAXSTRATUM;
  634. update_scale(offset_median);
  635. conf->status.refid = offsets[i]->status.send_refid;
  636. free(offsets);
  637. TAILQ_FOREACH(p, &conf->ntp_peers, entry) {
  638. for (i = 0; i < OFFSET_ARRAY_SIZE; i++)
  639. p->reply[i].offset -= offset_median;
  640. p->update.good = 0;
  641. }
  642. TAILQ_FOREACH(s, &conf->ntp_sensors, entry) {
  643. for (i = 0; i < SENSOR_OFFSETS; i++)
  644. s->offsets[i].offset -= offset_median;
  645. s->update.offset -= offset_median;
  646. }
  647. return (0);
  648. }
  649. int
  650. offset_compare(const void *aa, const void *bb)
  651. {
  652. const struct ntp_offset * const *a;
  653. const struct ntp_offset * const *b;
  654. a = aa;
  655. b = bb;
  656. if ((*a)->offset < (*b)->offset)
  657. return (-1);
  658. else if ((*a)->offset > (*b)->offset)
  659. return (1);
  660. else
  661. return (0);
  662. }
  663. void
  664. priv_settime(double offset, char *msg)
  665. {
  666. if (offset == 0)
  667. log_info("cancel settime because %s", msg);
  668. imsg_compose(ibuf_main, IMSG_SETTIME, 0, 0, -1,
  669. &offset, sizeof(offset));
  670. conf->settime = 0;
  671. }
  672. void
  673. priv_dns(int cmd, char *name, u_int32_t peerid)
  674. {
  675. u_int16_t dlen = 0;
  676. if (name != NULL)
  677. dlen = strlen(name) + 1;
  678. imsg_compose(ibuf_dns, cmd, peerid, 0, -1, name, dlen);
  679. }
  680. void
  681. update_scale(double offset)
  682. {
  683. offset += getoffset();
  684. if (offset < 0)
  685. offset = -offset;
  686. if (offset > QSCALE_OFF_MAX || !conf->status.synced ||
  687. conf->freq.num < 3)
  688. conf->scale = 1;
  689. else if (offset < QSCALE_OFF_MIN)
  690. conf->scale = QSCALE_OFF_MAX / QSCALE_OFF_MIN;
  691. else
  692. conf->scale = QSCALE_OFF_MAX / offset;
  693. }
  694. time_t
  695. scale_interval(time_t requested)
  696. {
  697. time_t interval, r;
  698. interval = requested * conf->scale;
  699. r = arc4random_uniform(MAXIMUM(5, interval / 10));
  700. return (interval + r);
  701. }
  702. time_t
  703. error_interval(void)
  704. {
  705. time_t interval, r;
  706. interval = INTERVAL_QUERY_PATHETIC * QSCALE_OFF_MAX / QSCALE_OFF_MIN;
  707. r = arc4random_uniform(interval / 10);
  708. return (interval + r);
  709. }