Source code pulled from OpenBSD for OpenNTPD. The place to contribute to this code is via the OpenBSD CVS tree.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

773 lines
18 KiB

19 years ago
18 years ago
20 years ago
20 years ago
  1. /* $OpenBSD: ntp.c,v 1.145 2017/01/20 01:21:18 phessler Exp $ */
  2. /*
  3. * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
  4. * Copyright (c) 2004 Alexander Guy <alexander.guy@andern.org>
  5. *
  6. * Permission to use, copy, modify, and distribute this software for any
  7. * purpose with or without fee is hereby granted, provided that the above
  8. * copyright notice and this permission notice appear in all copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  11. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  12. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  13. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  14. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  15. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  16. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <sys/types.h>
  19. #include <sys/time.h>
  20. #include <sys/stat.h>
  21. #include <errno.h>
  22. #include <fcntl.h>
  23. #include <paths.h>
  24. #include <poll.h>
  25. #include <pwd.h>
  26. #include <signal.h>
  27. #include <stdlib.h>
  28. #include <string.h>
  29. #include <syslog.h>
  30. #include <time.h>
  31. #include <unistd.h>
  32. #include <err.h>
  33. #include "ntpd.h"
  34. #define PFD_PIPE_MAIN 0
  35. #define PFD_PIPE_DNS 1
  36. #define PFD_SOCK_CTL 2
  37. #define PFD_MAX 3
  38. volatile sig_atomic_t ntp_quit = 0;
  39. struct imsgbuf *ibuf_main;
  40. struct imsgbuf *ibuf_dns;
  41. struct ntpd_conf *conf;
  42. struct ctl_conns ctl_conns;
  43. u_int peer_cnt;
  44. u_int sensors_cnt;
  45. extern u_int constraint_cnt;
  46. void ntp_sighdlr(int);
  47. int ntp_dispatch_imsg(void);
  48. int ntp_dispatch_imsg_dns(void);
  49. void peer_add(struct ntp_peer *);
  50. void peer_remove(struct ntp_peer *);
  51. void
  52. ntp_sighdlr(int sig)
  53. {
  54. switch (sig) {
  55. case SIGINT:
  56. case SIGTERM:
  57. ntp_quit = 1;
  58. break;
  59. }
  60. }
  61. void
  62. ntp_main(struct ntpd_conf *nconf, struct passwd *pw, int argc, char **argv)
  63. {
  64. int a, b, nfds, i, j, idx_peers, timeout;
  65. int nullfd, pipe_dns[2], idx_clients;
  66. int ctls;
  67. int fd_ctl;
  68. u_int pfd_elms = 0, idx2peer_elms = 0;
  69. u_int listener_cnt, new_cnt, sent_cnt, trial_cnt;
  70. u_int ctl_cnt;
  71. struct pollfd *pfd = NULL;
  72. struct servent *se;
  73. struct listen_addr *la;
  74. struct ntp_peer *p;
  75. struct ntp_peer **idx2peer = NULL;
  76. struct ntp_sensor *s, *next_s;
  77. struct constraint *cstr;
  78. struct timespec tp;
  79. struct stat stb;
  80. struct ctl_conn *cc;
  81. time_t nextaction, last_sensor_scan = 0, now;
  82. void *newp;
  83. if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, PF_UNSPEC,
  84. pipe_dns) == -1)
  85. fatal("socketpair");
  86. start_child(NTPDNS_PROC_NAME, pipe_dns[1], argc, argv);
  87. /* in this case the parent didn't init logging and didn't daemonize */
  88. if (nconf->settime && !nconf->debug) {
  89. log_init(nconf->debug, LOG_DAEMON);
  90. if (setsid() == -1)
  91. fatal("setsid");
  92. }
  93. log_procinit("ntp");
  94. if ((se = getservbyname("ntp", "udp")) == NULL)
  95. fatal("getservbyname");
  96. /* Start control socket. */
  97. if ((fd_ctl = control_init(CTLSOCKET)) == -1)
  98. fatalx("control socket init failed");
  99. if (control_listen(fd_ctl) == -1)
  100. fatalx("control socket listen failed");
  101. if ((nullfd = open("/dev/null", O_RDWR, 0)) == -1)
  102. fatal(NULL);
  103. if (stat(pw->pw_dir, &stb) == -1) {
  104. fatal("privsep dir %s could not be opened", pw->pw_dir);
  105. }
  106. if (stb.st_uid != 0 || (stb.st_mode & (S_IWGRP|S_IWOTH)) != 0) {
  107. fatalx("bad privsep dir %s permissions: %o",
  108. pw->pw_dir, stb.st_mode);
  109. }
  110. if (chroot(pw->pw_dir) == -1)
  111. fatal("chroot");
  112. if (chdir("/") == -1)
  113. fatal("chdir(\"/\")");
  114. if (!nconf->debug) {
  115. dup2(nullfd, STDIN_FILENO);
  116. dup2(nullfd, STDOUT_FILENO);
  117. dup2(nullfd, STDERR_FILENO);
  118. }
  119. close(nullfd);
  120. setproctitle("ntp engine");
  121. conf = nconf;
  122. setup_listeners(se, conf, &listener_cnt);
  123. if (setgroups(1, &pw->pw_gid) ||
  124. setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
  125. setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
  126. fatal("can't drop privileges");
  127. endservent();
  128. /* The ntp process will want to open NTP client sockets -> "inet" */
  129. if (pledge("stdio inet", NULL) == -1)
  130. err(1, "pledge");
  131. signal(SIGTERM, ntp_sighdlr);
  132. signal(SIGINT, ntp_sighdlr);
  133. signal(SIGPIPE, SIG_IGN);
  134. signal(SIGHUP, SIG_IGN);
  135. signal(SIGCHLD, SIG_DFL);
  136. if ((ibuf_main = malloc(sizeof(struct imsgbuf))) == NULL)
  137. fatal(NULL);
  138. imsg_init(ibuf_main, PARENT_SOCK_FILENO);
  139. if ((ibuf_dns = malloc(sizeof(struct imsgbuf))) == NULL)
  140. fatal(NULL);
  141. imsg_init(ibuf_dns, pipe_dns[0]);
  142. constraint_cnt = 0;
  143. conf->constraint_median = 0;
  144. conf->constraint_last = getmonotime();
  145. TAILQ_FOREACH(cstr, &conf->constraints, entry)
  146. constraint_cnt += constraint_init(cstr);
  147. TAILQ_FOREACH(p, &conf->ntp_peers, entry)
  148. client_peer_init(p);
  149. memset(&conf->status, 0, sizeof(conf->status));
  150. conf->freq.num = 0;
  151. conf->freq.samples = 0;
  152. conf->freq.x = 0.0;
  153. conf->freq.xx = 0.0;
  154. conf->freq.xy = 0.0;
  155. conf->freq.y = 0.0;
  156. conf->freq.overall_offset = 0.0;
  157. conf->status.synced = 0;
  158. clock_getres(CLOCK_REALTIME, &tp);
  159. b = 1000000000 / tp.tv_nsec; /* convert to Hz */
  160. for (a = 0; b > 1; a--, b >>= 1)
  161. ;
  162. conf->status.precision = a;
  163. conf->scale = 1;
  164. TAILQ_INIT(&ctl_conns);
  165. sensor_init();
  166. log_info("ntp engine ready");
  167. ctl_cnt = 0;
  168. peer_cnt = 0;
  169. TAILQ_FOREACH(p, &conf->ntp_peers, entry)
  170. peer_cnt++;
  171. while (ntp_quit == 0) {
  172. if (peer_cnt > idx2peer_elms) {
  173. if ((newp = reallocarray(idx2peer, peer_cnt,
  174. sizeof(*idx2peer))) == NULL) {
  175. /* panic for now */
  176. log_warn("could not resize idx2peer from %u -> "
  177. "%u entries", idx2peer_elms, peer_cnt);
  178. fatalx("exiting");
  179. }
  180. idx2peer = newp;
  181. idx2peer_elms = peer_cnt;
  182. }
  183. new_cnt = PFD_MAX +
  184. peer_cnt + listener_cnt + ctl_cnt;
  185. if (new_cnt > pfd_elms) {
  186. if ((newp = reallocarray(pfd, new_cnt,
  187. sizeof(*pfd))) == NULL) {
  188. /* panic for now */
  189. log_warn("could not resize pfd from %u -> "
  190. "%u entries", pfd_elms, new_cnt);
  191. fatalx("exiting");
  192. }
  193. pfd = newp;
  194. pfd_elms = new_cnt;
  195. }
  196. memset(pfd, 0, sizeof(*pfd) * pfd_elms);
  197. memset(idx2peer, 0, sizeof(*idx2peer) * idx2peer_elms);
  198. nextaction = getmonotime() + 3600;
  199. pfd[PFD_PIPE_MAIN].fd = ibuf_main->fd;
  200. pfd[PFD_PIPE_MAIN].events = POLLIN;
  201. pfd[PFD_PIPE_DNS].fd = ibuf_dns->fd;
  202. pfd[PFD_PIPE_DNS].events = POLLIN;
  203. pfd[PFD_SOCK_CTL].fd = fd_ctl;
  204. pfd[PFD_SOCK_CTL].events = POLLIN;
  205. i = PFD_MAX;
  206. TAILQ_FOREACH(la, &conf->listen_addrs, entry) {
  207. pfd[i].fd = la->fd;
  208. pfd[i].events = POLLIN;
  209. i++;
  210. }
  211. idx_peers = i;
  212. sent_cnt = trial_cnt = 0;
  213. TAILQ_FOREACH(p, &conf->ntp_peers, entry) {
  214. if (constraint_cnt && conf->constraint_median == 0)
  215. continue;
  216. if (p->next > 0 && p->next <= getmonotime()) {
  217. if (p->state > STATE_DNS_INPROGRESS)
  218. trial_cnt++;
  219. if (client_query(p) == 0)
  220. sent_cnt++;
  221. }
  222. if (p->deadline > 0 && p->deadline <= getmonotime()) {
  223. timeout = 300;
  224. log_debug("no reply from %s received in time, "
  225. "next query %ds", log_sockaddr(
  226. (struct sockaddr *)&p->addr->ss), timeout);
  227. if (p->trustlevel >= TRUSTLEVEL_BADPEER &&
  228. (p->trustlevel /= 2) < TRUSTLEVEL_BADPEER)
  229. log_info("peer %s now invalid",
  230. log_sockaddr(
  231. (struct sockaddr *)&p->addr->ss));
  232. client_nextaddr(p);
  233. set_next(p, timeout);
  234. }
  235. if (p->senderrors > MAX_SEND_ERRORS) {
  236. log_debug("failed to send query to %s, "
  237. "next query %ds", log_sockaddr(
  238. (struct sockaddr *)&p->addr->ss),
  239. INTERVAL_QUERY_PATHETIC);
  240. p->senderrors = 0;
  241. client_nextaddr(p);
  242. set_next(p, INTERVAL_QUERY_PATHETIC);
  243. }
  244. if (p->next > 0 && p->next < nextaction)
  245. nextaction = p->next;
  246. if (p->deadline > 0 && p->deadline < nextaction)
  247. nextaction = p->deadline;
  248. if (p->state == STATE_QUERY_SENT &&
  249. p->query->fd != -1) {
  250. pfd[i].fd = p->query->fd;
  251. pfd[i].events = POLLIN;
  252. idx2peer[i - idx_peers] = p;
  253. i++;
  254. }
  255. }
  256. idx_clients = i;
  257. if (!TAILQ_EMPTY(&conf->ntp_conf_sensors)) {
  258. if (last_sensor_scan == 0 ||
  259. last_sensor_scan + SENSOR_SCAN_INTERVAL <= getmonotime()) {
  260. sensors_cnt = sensor_scan();
  261. last_sensor_scan = getmonotime();
  262. }
  263. if (sensors_cnt == 0 &&
  264. nextaction > last_sensor_scan + SENSOR_SCAN_INTERVAL)
  265. nextaction = last_sensor_scan + SENSOR_SCAN_INTERVAL;
  266. sensors_cnt = 0;
  267. TAILQ_FOREACH(s, &conf->ntp_sensors, entry) {
  268. if (conf->settime && s->offsets[0].offset)
  269. priv_settime(s->offsets[0].offset);
  270. sensors_cnt++;
  271. if (s->next > 0 && s->next < nextaction)
  272. nextaction = s->next;
  273. }
  274. }
  275. if (conf->settime &&
  276. ((trial_cnt > 0 && sent_cnt == 0) ||
  277. (peer_cnt == 0 && sensors_cnt == 0)))
  278. priv_settime(0); /* no good peers, don't wait */
  279. if (ibuf_main->w.queued > 0)
  280. pfd[PFD_PIPE_MAIN].events |= POLLOUT;
  281. if (ibuf_dns->w.queued > 0)
  282. pfd[PFD_PIPE_DNS].events |= POLLOUT;
  283. TAILQ_FOREACH(cc, &ctl_conns, entry) {
  284. pfd[i].fd = cc->ibuf.fd;
  285. pfd[i].events = POLLIN;
  286. if (cc->ibuf.w.queued > 0)
  287. pfd[i].events |= POLLOUT;
  288. i++;
  289. }
  290. ctls = i;
  291. TAILQ_FOREACH(cstr, &conf->constraints, entry) {
  292. if (constraint_query(cstr) == -1)
  293. continue;
  294. }
  295. now = getmonotime();
  296. if (constraint_cnt)
  297. nextaction = now + 1;
  298. timeout = nextaction - now;
  299. if (timeout < 0)
  300. timeout = 0;
  301. if ((nfds = poll(pfd, i, timeout * 1000)) == -1)
  302. if (errno != EINTR) {
  303. log_warn("poll error");
  304. ntp_quit = 1;
  305. }
  306. if (nfds > 0 && (pfd[PFD_PIPE_MAIN].revents & POLLOUT))
  307. if (msgbuf_write(&ibuf_main->w) <= 0 &&
  308. errno != EAGAIN) {
  309. log_warn("pipe write error (to parent)");
  310. ntp_quit = 1;
  311. }
  312. if (nfds > 0 && pfd[PFD_PIPE_MAIN].revents & (POLLIN|POLLERR)) {
  313. nfds--;
  314. if (ntp_dispatch_imsg() == -1) {
  315. log_warn("pipe write error (from main)");
  316. ntp_quit = 1;
  317. }
  318. }
  319. if (nfds > 0 && (pfd[PFD_PIPE_DNS].revents & POLLOUT))
  320. if (msgbuf_write(&ibuf_dns->w) <= 0 &&
  321. errno != EAGAIN) {
  322. log_warn("pipe write error (to dns engine)");
  323. ntp_quit = 1;
  324. }
  325. if (nfds > 0 && pfd[PFD_PIPE_DNS].revents & (POLLIN|POLLERR)) {
  326. nfds--;
  327. if (ntp_dispatch_imsg_dns() == -1) {
  328. log_warn("pipe write error (from dns engine)");
  329. ntp_quit = 1;
  330. }
  331. }
  332. if (nfds > 0 && pfd[PFD_SOCK_CTL].revents & (POLLIN|POLLERR)) {
  333. nfds--;
  334. ctl_cnt += control_accept(fd_ctl);
  335. }
  336. for (j = PFD_MAX; nfds > 0 && j < idx_peers; j++)
  337. if (pfd[j].revents & (POLLIN|POLLERR)) {
  338. nfds--;
  339. if (server_dispatch(pfd[j].fd, conf) == -1) {
  340. log_warn("pipe write error (conf)");
  341. ntp_quit = 1;
  342. }
  343. }
  344. for (; nfds > 0 && j < idx_clients; j++) {
  345. if (pfd[j].revents & (POLLIN|POLLERR)) {
  346. nfds--;
  347. if (client_dispatch(idx2peer[j - idx_peers],
  348. conf->settime) == -1) {
  349. log_warn("pipe write error (settime)");
  350. ntp_quit = 1;
  351. }
  352. }
  353. }
  354. for (; nfds > 0 && j < ctls; j++) {
  355. nfds -= control_dispatch_msg(&pfd[j], &ctl_cnt);
  356. }
  357. for (s = TAILQ_FIRST(&conf->ntp_sensors); s != NULL;
  358. s = next_s) {
  359. next_s = TAILQ_NEXT(s, entry);
  360. if (s->next <= getmonotime())
  361. sensor_query(s);
  362. }
  363. }
  364. msgbuf_write(&ibuf_main->w);
  365. msgbuf_clear(&ibuf_main->w);
  366. free(ibuf_main);
  367. msgbuf_write(&ibuf_dns->w);
  368. msgbuf_clear(&ibuf_dns->w);
  369. free(ibuf_dns);
  370. log_info("ntp engine exiting");
  371. exit(0);
  372. }
  373. int
  374. ntp_dispatch_imsg(void)
  375. {
  376. struct imsg imsg;
  377. int n;
  378. if (((n = imsg_read(ibuf_main)) == -1 && errno != EAGAIN) || n == 0)
  379. return (-1);
  380. for (;;) {
  381. if ((n = imsg_get(ibuf_main, &imsg)) == -1)
  382. return (-1);
  383. if (n == 0)
  384. break;
  385. switch (imsg.hdr.type) {
  386. case IMSG_ADJTIME:
  387. memcpy(&n, imsg.data, sizeof(n));
  388. if (n == 1 && !conf->status.synced) {
  389. log_info("clock is now synced");
  390. conf->status.synced = 1;
  391. } else if (n == 0 && conf->status.synced) {
  392. log_info("clock is now unsynced");
  393. conf->status.synced = 0;
  394. }
  395. break;
  396. case IMSG_CONSTRAINT_RESULT:
  397. constraint_msg_result(imsg.hdr.peerid,
  398. imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
  399. break;
  400. case IMSG_CONSTRAINT_CLOSE:
  401. constraint_msg_close(imsg.hdr.peerid,
  402. imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
  403. break;
  404. default:
  405. break;
  406. }
  407. imsg_free(&imsg);
  408. }
  409. return (0);
  410. }
  411. int
  412. ntp_dispatch_imsg_dns(void)
  413. {
  414. struct imsg imsg;
  415. struct ntp_peer *peer, *npeer;
  416. u_int16_t dlen;
  417. u_char *p;
  418. struct ntp_addr *h;
  419. int n;
  420. if (((n = imsg_read(ibuf_dns)) == -1 && errno != EAGAIN) || n == 0)
  421. return (-1);
  422. for (;;) {
  423. if ((n = imsg_get(ibuf_dns, &imsg)) == -1)
  424. return (-1);
  425. if (n == 0)
  426. break;
  427. switch (imsg.hdr.type) {
  428. case IMSG_HOST_DNS:
  429. TAILQ_FOREACH(peer, &conf->ntp_peers, entry)
  430. if (peer->id == imsg.hdr.peerid)
  431. break;
  432. if (peer == NULL) {
  433. log_warnx("IMSG_HOST_DNS with invalid peerID");
  434. break;
  435. }
  436. if (peer->addr != NULL) {
  437. log_warnx("IMSG_HOST_DNS but addr != NULL!");
  438. break;
  439. }
  440. dlen = imsg.hdr.len - IMSG_HEADER_SIZE;
  441. if (dlen == 0) { /* no data -> temp error */
  442. peer->state = STATE_DNS_TEMPFAIL;
  443. break;
  444. }
  445. p = (u_char *)imsg.data;
  446. while (dlen >= sizeof(struct sockaddr_storage)) {
  447. if ((h = calloc(1, sizeof(struct ntp_addr))) ==
  448. NULL)
  449. fatal(NULL);
  450. memcpy(&h->ss, p, sizeof(h->ss));
  451. p += sizeof(h->ss);
  452. dlen -= sizeof(h->ss);
  453. if (peer->addr_head.pool) {
  454. npeer = new_peer();
  455. npeer->weight = peer->weight;
  456. h->next = NULL;
  457. npeer->addr = h;
  458. npeer->addr_head.a = h;
  459. npeer->addr_head.name =
  460. peer->addr_head.name;
  461. npeer->addr_head.pool = 1;
  462. client_peer_init(npeer);
  463. npeer->state = STATE_DNS_DONE;
  464. peer_add(npeer);
  465. } else {
  466. h->next = peer->addr;
  467. peer->addr = h;
  468. peer->addr_head.a = peer->addr;
  469. peer->state = STATE_DNS_DONE;
  470. }
  471. }
  472. if (dlen != 0)
  473. fatalx("IMSG_HOST_DNS: dlen != 0");
  474. if (peer->addr_head.pool)
  475. peer_remove(peer);
  476. else
  477. client_addr_init(peer);
  478. break;
  479. case IMSG_CONSTRAINT_DNS:
  480. constraint_msg_dns(imsg.hdr.peerid,
  481. imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
  482. break;
  483. default:
  484. break;
  485. }
  486. imsg_free(&imsg);
  487. }
  488. return (0);
  489. }
  490. void
  491. peer_add(struct ntp_peer *p)
  492. {
  493. TAILQ_INSERT_TAIL(&conf->ntp_peers, p, entry);
  494. peer_cnt++;
  495. }
  496. void
  497. peer_remove(struct ntp_peer *p)
  498. {
  499. TAILQ_REMOVE(&conf->ntp_peers, p, entry);
  500. free(p);
  501. peer_cnt--;
  502. }
  503. static void
  504. priv_adjfreq(double offset)
  505. {
  506. double curtime, freq;
  507. if (!conf->status.synced){
  508. conf->freq.samples = 0;
  509. return;
  510. }
  511. conf->freq.samples++;
  512. if (conf->freq.samples <= 0)
  513. return;
  514. conf->freq.overall_offset += offset;
  515. offset = conf->freq.overall_offset;
  516. curtime = gettime_corrected();
  517. conf->freq.xy += offset * curtime;
  518. conf->freq.x += curtime;
  519. conf->freq.y += offset;
  520. conf->freq.xx += curtime * curtime;
  521. if (conf->freq.samples % FREQUENCY_SAMPLES != 0)
  522. return;
  523. freq =
  524. (conf->freq.xy - conf->freq.x * conf->freq.y / conf->freq.samples)
  525. /
  526. (conf->freq.xx - conf->freq.x * conf->freq.x / conf->freq.samples);
  527. if (freq > MAX_FREQUENCY_ADJUST)
  528. freq = MAX_FREQUENCY_ADJUST;
  529. else if (freq < -MAX_FREQUENCY_ADJUST)
  530. freq = -MAX_FREQUENCY_ADJUST;
  531. imsg_compose(ibuf_main, IMSG_ADJFREQ, 0, 0, -1, &freq, sizeof(freq));
  532. conf->filters |= FILTER_ADJFREQ;
  533. conf->freq.xy = 0.0;
  534. conf->freq.x = 0.0;
  535. conf->freq.y = 0.0;
  536. conf->freq.xx = 0.0;
  537. conf->freq.samples = 0;
  538. conf->freq.overall_offset = 0.0;
  539. conf->freq.num++;
  540. }
  541. int
  542. priv_adjtime(void)
  543. {
  544. struct ntp_peer *p;
  545. struct ntp_sensor *s;
  546. int offset_cnt = 0, i = 0, j;
  547. struct ntp_offset **offsets;
  548. double offset_median;
  549. TAILQ_FOREACH(p, &conf->ntp_peers, entry) {
  550. if (p->trustlevel < TRUSTLEVEL_BADPEER)
  551. continue;
  552. if (!p->update.good)
  553. return (1);
  554. offset_cnt += p->weight;
  555. }
  556. TAILQ_FOREACH(s, &conf->ntp_sensors, entry) {
  557. if (!s->update.good)
  558. continue;
  559. offset_cnt += s->weight;
  560. }
  561. if (offset_cnt == 0)
  562. return (1);
  563. if ((offsets = calloc(offset_cnt, sizeof(struct ntp_offset *))) == NULL)
  564. fatal("calloc priv_adjtime");
  565. TAILQ_FOREACH(p, &conf->ntp_peers, entry) {
  566. if (p->trustlevel < TRUSTLEVEL_BADPEER)
  567. continue;
  568. for (j = 0; j < p->weight; j++)
  569. offsets[i++] = &p->update;
  570. }
  571. TAILQ_FOREACH(s, &conf->ntp_sensors, entry) {
  572. if (!s->update.good)
  573. continue;
  574. for (j = 0; j < s->weight; j++)
  575. offsets[i++] = &s->update;
  576. }
  577. qsort(offsets, offset_cnt, sizeof(struct ntp_offset *), offset_compare);
  578. i = offset_cnt / 2;
  579. if (offset_cnt % 2 == 0)
  580. if (offsets[i - 1]->delay < offsets[i]->delay)
  581. i -= 1;
  582. offset_median = offsets[i]->offset;
  583. conf->status.rootdelay = offsets[i]->delay;
  584. conf->status.stratum = offsets[i]->status.stratum;
  585. conf->status.leap = offsets[i]->status.leap;
  586. imsg_compose(ibuf_main, IMSG_ADJTIME, 0, 0, -1,
  587. &offset_median, sizeof(offset_median));
  588. priv_adjfreq(offset_median);
  589. conf->status.reftime = gettime();
  590. conf->status.stratum++; /* one more than selected peer */
  591. if (conf->status.stratum > NTP_MAXSTRATUM)
  592. conf->status.stratum = NTP_MAXSTRATUM;
  593. update_scale(offset_median);
  594. conf->status.refid = offsets[i]->status.send_refid;
  595. free(offsets);
  596. TAILQ_FOREACH(p, &conf->ntp_peers, entry) {
  597. for (i = 0; i < OFFSET_ARRAY_SIZE; i++)
  598. p->reply[i].offset -= offset_median;
  599. p->update.good = 0;
  600. }
  601. TAILQ_FOREACH(s, &conf->ntp_sensors, entry) {
  602. for (i = 0; i < SENSOR_OFFSETS; i++)
  603. s->offsets[i].offset -= offset_median;
  604. s->update.offset -= offset_median;
  605. }
  606. return (0);
  607. }
  608. int
  609. offset_compare(const void *aa, const void *bb)
  610. {
  611. const struct ntp_offset * const *a;
  612. const struct ntp_offset * const *b;
  613. a = aa;
  614. b = bb;
  615. if ((*a)->offset < (*b)->offset)
  616. return (-1);
  617. else if ((*a)->offset > (*b)->offset)
  618. return (1);
  619. else
  620. return (0);
  621. }
  622. void
  623. priv_settime(double offset)
  624. {
  625. imsg_compose(ibuf_main, IMSG_SETTIME, 0, 0, -1,
  626. &offset, sizeof(offset));
  627. conf->settime = 0;
  628. }
  629. void
  630. priv_dns(int cmd, char *name, u_int32_t peerid)
  631. {
  632. u_int16_t dlen;
  633. dlen = strlen(name) + 1;
  634. imsg_compose(ibuf_dns, cmd, peerid, 0, -1, name, dlen);
  635. }
  636. void
  637. update_scale(double offset)
  638. {
  639. offset += getoffset();
  640. if (offset < 0)
  641. offset = -offset;
  642. if (offset > QSCALE_OFF_MAX || !conf->status.synced ||
  643. conf->freq.num < 3)
  644. conf->scale = 1;
  645. else if (offset < QSCALE_OFF_MIN)
  646. conf->scale = QSCALE_OFF_MAX / QSCALE_OFF_MIN;
  647. else
  648. conf->scale = QSCALE_OFF_MAX / offset;
  649. }
  650. time_t
  651. scale_interval(time_t requested)
  652. {
  653. time_t interval, r;
  654. interval = requested * conf->scale;
  655. r = arc4random_uniform(MAXIMUM(5, interval / 10));
  656. return (interval + r);
  657. }
  658. time_t
  659. error_interval(void)
  660. {
  661. time_t interval, r;
  662. interval = INTERVAL_QUERY_PATHETIC * QSCALE_OFF_MAX / QSCALE_OFF_MIN;
  663. r = arc4random_uniform(interval / 10);
  664. return (interval + r);
  665. }