Source code pulled from OpenBSD for OpenNTPD. The place to contribute to this code is via the OpenBSD CVS tree.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

868 lines
21 KiB

19 years ago
18 years ago
19 years ago
19 years ago
  1. /* $OpenBSD: ntp.c,v 1.165 2020/06/22 06:11:34 otto Exp $ */
  2. /*
  3. * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
  4. * Copyright (c) 2004 Alexander Guy <alexander.guy@andern.org>
  5. *
  6. * Permission to use, copy, modify, and distribute this software for any
  7. * purpose with or without fee is hereby granted, provided that the above
  8. * copyright notice and this permission notice appear in all copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  11. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  12. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  13. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  14. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  15. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  16. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <sys/types.h>
  19. #include <sys/time.h>
  20. #include <sys/stat.h>
  21. #include <errno.h>
  22. #include <fcntl.h>
  23. #include <paths.h>
  24. #include <poll.h>
  25. #include <pwd.h>
  26. #include <signal.h>
  27. #include <stdlib.h>
  28. #include <string.h>
  29. #include <syslog.h>
  30. #include <time.h>
  31. #include <unistd.h>
  32. #include <err.h>
  33. #include "ntpd.h"
  34. #define PFD_PIPE_MAIN 0
  35. #define PFD_PIPE_DNS 1
  36. #define PFD_SOCK_CTL 2
  37. #define PFD_MAX 3
  38. volatile sig_atomic_t ntp_quit = 0;
  39. struct imsgbuf *ibuf_main;
  40. static struct imsgbuf *ibuf_dns;
  41. struct ntpd_conf *conf;
  42. struct ctl_conns ctl_conns;
  43. u_int peer_cnt;
  44. u_int sensors_cnt;
  45. extern u_int constraint_cnt;
  46. void ntp_sighdlr(int);
  47. int ntp_dispatch_imsg(void);
  48. int ntp_dispatch_imsg_dns(void);
  49. void peer_add(struct ntp_peer *);
  50. void peer_remove(struct ntp_peer *);
  51. int inpool(struct sockaddr_storage *,
  52. struct sockaddr_storage[MAX_SERVERS_DNS], size_t);
  53. void
  54. ntp_sighdlr(int sig)
  55. {
  56. switch (sig) {
  57. case SIGINT:
  58. case SIGTERM:
  59. ntp_quit = 1;
  60. break;
  61. }
  62. }
  63. void
  64. ntp_main(struct ntpd_conf *nconf, struct passwd *pw, int argc, char **argv)
  65. {
  66. int a, b, nfds, i, j, idx_peers, timeout;
  67. int nullfd, pipe_dns[2], idx_clients;
  68. int ctls;
  69. int fd_ctl;
  70. u_int pfd_elms = 0, idx2peer_elms = 0;
  71. u_int listener_cnt, new_cnt, sent_cnt, trial_cnt;
  72. u_int ctl_cnt;
  73. struct pollfd *pfd = NULL;
  74. struct servent *se;
  75. struct listen_addr *la;
  76. struct ntp_peer *p;
  77. struct ntp_peer **idx2peer = NULL;
  78. struct ntp_sensor *s, *next_s;
  79. struct constraint *cstr;
  80. struct timespec tp;
  81. struct stat stb;
  82. struct ctl_conn *cc;
  83. time_t nextaction, last_sensor_scan = 0, now;
  84. void *newp;
  85. if (socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, PF_UNSPEC,
  86. pipe_dns) == -1)
  87. fatal("socketpair");
  88. start_child(NTPDNS_PROC_NAME, pipe_dns[1], argc, argv);
  89. log_init(nconf->debug ? LOG_TO_STDERR : LOG_TO_SYSLOG, nconf->verbose,
  90. LOG_DAEMON);
  91. if (!nconf->debug && setsid() == -1)
  92. fatal("setsid");
  93. log_procinit("ntp");
  94. if ((se = getservbyname("ntp", "udp")) == NULL)
  95. fatal("getservbyname");
  96. /* Start control socket. */
  97. if ((fd_ctl = control_init(CTLSOCKET)) == -1)
  98. fatalx("control socket init failed");
  99. if (control_listen(fd_ctl) == -1)
  100. fatalx("control socket listen failed");
  101. if ((nullfd = open("/dev/null", O_RDWR, 0)) == -1)
  102. fatal(NULL);
  103. if (stat(pw->pw_dir, &stb) == -1) {
  104. fatal("privsep dir %s could not be opened", pw->pw_dir);
  105. }
  106. if (stb.st_uid != 0 || (stb.st_mode & (S_IWGRP|S_IWOTH)) != 0) {
  107. fatalx("bad privsep dir %s permissions: %o",
  108. pw->pw_dir, stb.st_mode);
  109. }
  110. if (chroot(pw->pw_dir) == -1)
  111. fatal("chroot");
  112. if (chdir("/") == -1)
  113. fatal("chdir(\"/\")");
  114. if (!nconf->debug) {
  115. dup2(nullfd, STDIN_FILENO);
  116. dup2(nullfd, STDOUT_FILENO);
  117. dup2(nullfd, STDERR_FILENO);
  118. }
  119. close(nullfd);
  120. setproctitle("ntp engine");
  121. conf = nconf;
  122. setup_listeners(se, conf, &listener_cnt);
  123. if (setgroups(1, &pw->pw_gid) ||
  124. setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
  125. setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
  126. fatal("can't drop privileges");
  127. endservent();
  128. /* The ntp process will want to open NTP client sockets -> "inet" */
  129. if (pledge("stdio inet", NULL) == -1)
  130. err(1, "pledge");
  131. signal(SIGTERM, ntp_sighdlr);
  132. signal(SIGINT, ntp_sighdlr);
  133. signal(SIGPIPE, SIG_IGN);
  134. signal(SIGHUP, SIG_IGN);
  135. signal(SIGCHLD, SIG_DFL);
  136. if ((ibuf_main = malloc(sizeof(struct imsgbuf))) == NULL)
  137. fatal(NULL);
  138. imsg_init(ibuf_main, PARENT_SOCK_FILENO);
  139. if ((ibuf_dns = malloc(sizeof(struct imsgbuf))) == NULL)
  140. fatal(NULL);
  141. imsg_init(ibuf_dns, pipe_dns[0]);
  142. constraint_cnt = 0;
  143. conf->constraint_median = 0;
  144. conf->constraint_last = getmonotime();
  145. TAILQ_FOREACH(cstr, &conf->constraints, entry)
  146. constraint_cnt += constraint_init(cstr);
  147. TAILQ_FOREACH(p, &conf->ntp_peers, entry)
  148. client_peer_init(p);
  149. memset(&conf->status, 0, sizeof(conf->status));
  150. conf->freq.num = 0;
  151. conf->freq.samples = 0;
  152. conf->freq.x = 0.0;
  153. conf->freq.xx = 0.0;
  154. conf->freq.xy = 0.0;
  155. conf->freq.y = 0.0;
  156. conf->freq.overall_offset = 0.0;
  157. conf->status.synced = 0;
  158. clock_getres(CLOCK_REALTIME, &tp);
  159. b = 1000000000 / tp.tv_nsec; /* convert to Hz */
  160. for (a = 0; b > 1; a--, b >>= 1)
  161. ;
  162. conf->status.precision = a;
  163. conf->scale = 1;
  164. TAILQ_INIT(&ctl_conns);
  165. sensor_init();
  166. log_info("ntp engine ready");
  167. ctl_cnt = 0;
  168. peer_cnt = 0;
  169. TAILQ_FOREACH(p, &conf->ntp_peers, entry)
  170. peer_cnt++;
  171. while (ntp_quit == 0) {
  172. if (peer_cnt > idx2peer_elms) {
  173. if ((newp = reallocarray(idx2peer, peer_cnt,
  174. sizeof(*idx2peer))) == NULL) {
  175. /* panic for now */
  176. log_warn("could not resize idx2peer from %u -> "
  177. "%u entries", idx2peer_elms, peer_cnt);
  178. fatalx("exiting");
  179. }
  180. idx2peer = newp;
  181. idx2peer_elms = peer_cnt;
  182. }
  183. new_cnt = PFD_MAX +
  184. peer_cnt + listener_cnt + ctl_cnt;
  185. if (new_cnt > pfd_elms) {
  186. if ((newp = reallocarray(pfd, new_cnt,
  187. sizeof(*pfd))) == NULL) {
  188. /* panic for now */
  189. log_warn("could not resize pfd from %u -> "
  190. "%u entries", pfd_elms, new_cnt);
  191. fatalx("exiting");
  192. }
  193. pfd = newp;
  194. pfd_elms = new_cnt;
  195. }
  196. memset(pfd, 0, sizeof(*pfd) * pfd_elms);
  197. memset(idx2peer, 0, sizeof(*idx2peer) * idx2peer_elms);
  198. nextaction = getmonotime() + 900;
  199. pfd[PFD_PIPE_MAIN].fd = ibuf_main->fd;
  200. pfd[PFD_PIPE_MAIN].events = POLLIN;
  201. pfd[PFD_PIPE_DNS].fd = ibuf_dns->fd;
  202. pfd[PFD_PIPE_DNS].events = POLLIN;
  203. pfd[PFD_SOCK_CTL].fd = fd_ctl;
  204. pfd[PFD_SOCK_CTL].events = POLLIN;
  205. i = PFD_MAX;
  206. TAILQ_FOREACH(la, &conf->listen_addrs, entry) {
  207. pfd[i].fd = la->fd;
  208. pfd[i].events = POLLIN;
  209. i++;
  210. }
  211. idx_peers = i;
  212. sent_cnt = trial_cnt = 0;
  213. TAILQ_FOREACH(p, &conf->ntp_peers, entry) {
  214. if (!p->trusted && constraint_cnt &&
  215. conf->constraint_median == 0)
  216. continue;
  217. if (p->next > 0 && p->next <= getmonotime()) {
  218. if (p->state > STATE_DNS_INPROGRESS)
  219. trial_cnt++;
  220. if (client_query(p) == 0)
  221. sent_cnt++;
  222. }
  223. if (p->deadline > 0 && p->deadline <= getmonotime()) {
  224. timeout = 300;
  225. log_debug("no reply from %s received in time, "
  226. "next query %ds", log_sockaddr(
  227. (struct sockaddr *)&p->addr->ss), timeout);
  228. if (p->trustlevel >= TRUSTLEVEL_BADPEER &&
  229. (p->trustlevel /= 2) < TRUSTLEVEL_BADPEER)
  230. log_info("peer %s now invalid",
  231. log_sockaddr(
  232. (struct sockaddr *)&p->addr->ss));
  233. if (client_nextaddr(p) == 1) {
  234. peer_addr_head_clear(p);
  235. client_nextaddr(p);
  236. }
  237. set_next(p, timeout);
  238. }
  239. if (p->senderrors > MAX_SEND_ERRORS) {
  240. log_debug("failed to send query to %s, "
  241. "next query %ds", log_sockaddr(
  242. (struct sockaddr *)&p->addr->ss),
  243. INTERVAL_QUERY_PATHETIC);
  244. p->senderrors = 0;
  245. if (client_nextaddr(p) == 1) {
  246. peer_addr_head_clear(p);
  247. client_nextaddr(p);
  248. }
  249. set_next(p, INTERVAL_QUERY_PATHETIC);
  250. }
  251. if (p->next > 0 && p->next < nextaction)
  252. nextaction = p->next;
  253. if (p->deadline > 0 && p->deadline < nextaction)
  254. nextaction = p->deadline;
  255. if (p->state == STATE_QUERY_SENT &&
  256. p->query->fd != -1) {
  257. pfd[i].fd = p->query->fd;
  258. pfd[i].events = POLLIN;
  259. idx2peer[i - idx_peers] = p;
  260. i++;
  261. }
  262. }
  263. idx_clients = i;
  264. if (!TAILQ_EMPTY(&conf->ntp_conf_sensors) &&
  265. (conf->trusted_sensors || constraint_cnt == 0 ||
  266. conf->constraint_median != 0)) {
  267. if (last_sensor_scan == 0 ||
  268. last_sensor_scan + SENSOR_SCAN_INTERVAL <= getmonotime()) {
  269. sensors_cnt = sensor_scan();
  270. last_sensor_scan = getmonotime();
  271. }
  272. if (sensors_cnt == 0 &&
  273. nextaction > last_sensor_scan + SENSOR_SCAN_INTERVAL)
  274. nextaction = last_sensor_scan + SENSOR_SCAN_INTERVAL;
  275. sensors_cnt = 0;
  276. TAILQ_FOREACH(s, &conf->ntp_sensors, entry) {
  277. if (conf->settime && s->offsets[0].offset)
  278. priv_settime(s->offsets[0].offset, NULL);
  279. sensors_cnt++;
  280. if (s->next > 0 && s->next < nextaction)
  281. nextaction = s->next;
  282. }
  283. }
  284. if (conf->settime &&
  285. ((trial_cnt > 0 && sent_cnt == 0) ||
  286. (peer_cnt == 0 && sensors_cnt == 0)))
  287. priv_settime(0, "no valid peers configured");
  288. TAILQ_FOREACH(cstr, &conf->constraints, entry) {
  289. if (constraint_query(cstr) == -1)
  290. continue;
  291. }
  292. if (ibuf_main->w.queued > 0)
  293. pfd[PFD_PIPE_MAIN].events |= POLLOUT;
  294. if (ibuf_dns->w.queued > 0)
  295. pfd[PFD_PIPE_DNS].events |= POLLOUT;
  296. TAILQ_FOREACH(cc, &ctl_conns, entry) {
  297. pfd[i].fd = cc->ibuf.fd;
  298. pfd[i].events = POLLIN;
  299. if (cc->ibuf.w.queued > 0)
  300. pfd[i].events |= POLLOUT;
  301. i++;
  302. }
  303. ctls = i;
  304. now = getmonotime();
  305. timeout = nextaction - now;
  306. if (timeout < 0)
  307. timeout = 0;
  308. if ((nfds = poll(pfd, i, timeout ? timeout * 1000 : 1)) == -1)
  309. if (errno != EINTR) {
  310. log_warn("poll error");
  311. ntp_quit = 1;
  312. }
  313. if (nfds > 0 && (pfd[PFD_PIPE_MAIN].revents & POLLOUT))
  314. if (msgbuf_write(&ibuf_main->w) <= 0 &&
  315. errno != EAGAIN) {
  316. log_warn("pipe write error (to parent)");
  317. ntp_quit = 1;
  318. }
  319. if (nfds > 0 && pfd[PFD_PIPE_MAIN].revents & (POLLIN|POLLERR)) {
  320. nfds--;
  321. if (ntp_dispatch_imsg() == -1) {
  322. log_debug("pipe read error (from main)");
  323. ntp_quit = 1;
  324. }
  325. }
  326. if (nfds > 0 && (pfd[PFD_PIPE_DNS].revents & POLLOUT))
  327. if (msgbuf_write(&ibuf_dns->w) <= 0 &&
  328. errno != EAGAIN) {
  329. log_warn("pipe write error (to dns engine)");
  330. ntp_quit = 1;
  331. }
  332. if (nfds > 0 && pfd[PFD_PIPE_DNS].revents & (POLLIN|POLLERR)) {
  333. nfds--;
  334. if (ntp_dispatch_imsg_dns() == -1) {
  335. log_warn("pipe read error (from dns engine)");
  336. ntp_quit = 1;
  337. }
  338. }
  339. if (nfds > 0 && pfd[PFD_SOCK_CTL].revents & (POLLIN|POLLERR)) {
  340. nfds--;
  341. ctl_cnt += control_accept(fd_ctl);
  342. }
  343. for (j = PFD_MAX; nfds > 0 && j < idx_peers; j++)
  344. if (pfd[j].revents & (POLLIN|POLLERR)) {
  345. nfds--;
  346. if (server_dispatch(pfd[j].fd, conf) == -1) {
  347. log_warn("pipe write error (conf)");
  348. ntp_quit = 1;
  349. }
  350. }
  351. for (; nfds > 0 && j < idx_clients; j++) {
  352. if (pfd[j].revents & (POLLIN|POLLERR)) {
  353. nfds--;
  354. if (client_dispatch(idx2peer[j - idx_peers],
  355. conf->settime, conf->automatic) == -1) {
  356. log_warn("pipe write error (settime)");
  357. ntp_quit = 1;
  358. }
  359. }
  360. }
  361. for (; nfds > 0 && j < ctls; j++) {
  362. nfds -= control_dispatch_msg(&pfd[j], &ctl_cnt);
  363. }
  364. for (s = TAILQ_FIRST(&conf->ntp_sensors); s != NULL;
  365. s = next_s) {
  366. next_s = TAILQ_NEXT(s, entry);
  367. if (s->next <= getmonotime())
  368. sensor_query(s);
  369. }
  370. }
  371. msgbuf_write(&ibuf_main->w);
  372. msgbuf_clear(&ibuf_main->w);
  373. free(ibuf_main);
  374. msgbuf_write(&ibuf_dns->w);
  375. msgbuf_clear(&ibuf_dns->w);
  376. free(ibuf_dns);
  377. log_info("ntp engine exiting");
  378. exit(0);
  379. }
  380. int
  381. ntp_dispatch_imsg(void)
  382. {
  383. struct imsg imsg;
  384. int n;
  385. if (((n = imsg_read(ibuf_main)) == -1 && errno != EAGAIN) || n == 0)
  386. return (-1);
  387. for (;;) {
  388. if ((n = imsg_get(ibuf_main, &imsg)) == -1)
  389. return (-1);
  390. if (n == 0)
  391. break;
  392. switch (imsg.hdr.type) {
  393. case IMSG_ADJTIME:
  394. memcpy(&n, imsg.data, sizeof(n));
  395. if (n == 1 && !conf->status.synced) {
  396. log_info("clock is now synced");
  397. conf->status.synced = 1;
  398. priv_dns(IMSG_SYNCED, NULL, 0);
  399. constraint_reset();
  400. } else if (n == 0 && conf->status.synced) {
  401. log_info("clock is now unsynced");
  402. conf->status.synced = 0;
  403. priv_dns(IMSG_UNSYNCED, NULL, 0);
  404. }
  405. break;
  406. case IMSG_CONSTRAINT_RESULT:
  407. constraint_msg_result(imsg.hdr.peerid,
  408. imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
  409. break;
  410. case IMSG_CONSTRAINT_CLOSE:
  411. constraint_msg_close(imsg.hdr.peerid,
  412. imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
  413. break;
  414. default:
  415. break;
  416. }
  417. imsg_free(&imsg);
  418. }
  419. return (0);
  420. }
  421. int
  422. inpool(struct sockaddr_storage *a,
  423. struct sockaddr_storage old[MAX_SERVERS_DNS], size_t n)
  424. {
  425. size_t i;
  426. for (i = 0; i < n; i++) {
  427. if (a->ss_family != old[i].ss_family)
  428. continue;
  429. if (a->ss_family == AF_INET) {
  430. if (((struct sockaddr_in *)a)->sin_addr.s_addr ==
  431. ((struct sockaddr_in *)&old[i])->sin_addr.s_addr)
  432. return 1;
  433. } else if (memcmp(&((struct sockaddr_in6 *)a)->sin6_addr,
  434. &((struct sockaddr_in6 *)&old[i])->sin6_addr,
  435. sizeof(struct sockaddr_in6)) == 0) {
  436. return 1;
  437. }
  438. }
  439. return 0;
  440. }
  441. int
  442. ntp_dispatch_imsg_dns(void)
  443. {
  444. struct imsg imsg;
  445. struct sockaddr_storage existing[MAX_SERVERS_DNS];
  446. struct ntp_peer *peer, *npeer, *tmp;
  447. u_int16_t dlen;
  448. u_char *p;
  449. struct ntp_addr *h;
  450. size_t addrcount, peercount;
  451. int n;
  452. if (((n = imsg_read(ibuf_dns)) == -1 && errno != EAGAIN) || n == 0)
  453. return (-1);
  454. for (;;) {
  455. if ((n = imsg_get(ibuf_dns, &imsg)) == -1)
  456. return (-1);
  457. if (n == 0)
  458. break;
  459. switch (imsg.hdr.type) {
  460. case IMSG_HOST_DNS:
  461. TAILQ_FOREACH(peer, &conf->ntp_peers, entry)
  462. if (peer->id == imsg.hdr.peerid)
  463. break;
  464. if (peer == NULL) {
  465. log_warnx("IMSG_HOST_DNS with invalid peerID");
  466. break;
  467. }
  468. if (peer->addr != NULL) {
  469. log_warnx("IMSG_HOST_DNS but addr != NULL!");
  470. break;
  471. }
  472. if (peer->addr_head.pool) {
  473. n = 0;
  474. peercount = 0;
  475. TAILQ_FOREACH_SAFE(npeer, &conf->ntp_peers,
  476. entry, tmp) {
  477. if (npeer->addr_head.pool !=
  478. peer->addr_head.pool)
  479. continue;
  480. peercount++;
  481. if (npeer->id == peer->id)
  482. continue;
  483. if (npeer->addr != NULL)
  484. existing[n++] = npeer->addr->ss;
  485. }
  486. }
  487. dlen = imsg.hdr.len - IMSG_HEADER_SIZE;
  488. if (dlen == 0) { /* no data -> temp error */
  489. log_warnx("DNS lookup tempfail");
  490. peer->state = STATE_DNS_TEMPFAIL;
  491. if (conf->tmpfail++ == TRIES_AUTO_DNSFAIL)
  492. priv_settime(0, "of dns failures");
  493. break;
  494. }
  495. p = (u_char *)imsg.data;
  496. addrcount = dlen / (sizeof(struct sockaddr_storage) +
  497. sizeof(int));
  498. while (dlen >= sizeof(struct sockaddr_storage) +
  499. sizeof(int)) {
  500. if ((h = calloc(1, sizeof(struct ntp_addr))) ==
  501. NULL)
  502. fatal(NULL);
  503. memcpy(&h->ss, p, sizeof(h->ss));
  504. p += sizeof(h->ss);
  505. dlen -= sizeof(h->ss);
  506. memcpy(&h->notauth, p, sizeof(int));
  507. p += sizeof(int);
  508. dlen -= sizeof(int);
  509. if (peer->addr_head.pool) {
  510. if (peercount > addrcount) {
  511. free(h);
  512. continue;
  513. }
  514. if (inpool(&h->ss, existing,
  515. n)) {
  516. free(h);
  517. continue;
  518. }
  519. log_debug("Adding address %s to %s",
  520. log_sockaddr((struct sockaddr *)
  521. &h->ss), peer->addr_head.name);
  522. npeer = new_peer();
  523. npeer->weight = peer->weight;
  524. npeer->query_addr4 = peer->query_addr4;
  525. npeer->query_addr6 = peer->query_addr6;
  526. h->next = NULL;
  527. npeer->addr = h;
  528. npeer->addr_head.a = h;
  529. npeer->addr_head.name =
  530. peer->addr_head.name;
  531. npeer->addr_head.pool =
  532. peer->addr_head.pool;
  533. client_peer_init(npeer);
  534. npeer->state = STATE_DNS_DONE;
  535. peer_add(npeer);
  536. peercount++;
  537. } else {
  538. h->next = peer->addr;
  539. peer->addr = h;
  540. peer->addr_head.a = peer->addr;
  541. peer->state = STATE_DNS_DONE;
  542. }
  543. }
  544. if (dlen != 0)
  545. fatalx("IMSG_HOST_DNS: dlen != 0");
  546. if (peer->addr_head.pool)
  547. peer_remove(peer);
  548. else
  549. client_addr_init(peer);
  550. break;
  551. case IMSG_CONSTRAINT_DNS:
  552. constraint_msg_dns(imsg.hdr.peerid,
  553. imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
  554. break;
  555. case IMSG_PROBE_ROOT:
  556. dlen = imsg.hdr.len - IMSG_HEADER_SIZE;
  557. if (dlen != sizeof(int))
  558. fatalx("IMSG_PROBE_ROOT");
  559. memcpy(&n, imsg.data, sizeof(int));
  560. if (n < 0)
  561. priv_settime(0, "dns probe failed");
  562. break;
  563. default:
  564. break;
  565. }
  566. imsg_free(&imsg);
  567. }
  568. return (0);
  569. }
  570. void
  571. peer_add(struct ntp_peer *p)
  572. {
  573. TAILQ_INSERT_TAIL(&conf->ntp_peers, p, entry);
  574. peer_cnt++;
  575. }
  576. void
  577. peer_remove(struct ntp_peer *p)
  578. {
  579. TAILQ_REMOVE(&conf->ntp_peers, p, entry);
  580. free(p);
  581. peer_cnt--;
  582. }
  583. void
  584. peer_addr_head_clear(struct ntp_peer *p)
  585. {
  586. host_dns_free(p->addr_head.a);
  587. p->addr_head.a = NULL;
  588. p->addr = NULL;
  589. }
  590. static void
  591. priv_adjfreq(double offset)
  592. {
  593. double curtime, freq;
  594. if (!conf->status.synced){
  595. conf->freq.samples = 0;
  596. return;
  597. }
  598. conf->freq.samples++;
  599. if (conf->freq.samples <= 0)
  600. return;
  601. conf->freq.overall_offset += offset;
  602. offset = conf->freq.overall_offset;
  603. curtime = gettime_corrected();
  604. conf->freq.xy += offset * curtime;
  605. conf->freq.x += curtime;
  606. conf->freq.y += offset;
  607. conf->freq.xx += curtime * curtime;
  608. if (conf->freq.samples % FREQUENCY_SAMPLES != 0)
  609. return;
  610. freq =
  611. (conf->freq.xy - conf->freq.x * conf->freq.y / conf->freq.samples)
  612. /
  613. (conf->freq.xx - conf->freq.x * conf->freq.x / conf->freq.samples);
  614. if (freq > MAX_FREQUENCY_ADJUST)
  615. freq = MAX_FREQUENCY_ADJUST;
  616. else if (freq < -MAX_FREQUENCY_ADJUST)
  617. freq = -MAX_FREQUENCY_ADJUST;
  618. imsg_compose(ibuf_main, IMSG_ADJFREQ, 0, 0, -1, &freq, sizeof(freq));
  619. conf->filters |= FILTER_ADJFREQ;
  620. conf->freq.xy = 0.0;
  621. conf->freq.x = 0.0;
  622. conf->freq.y = 0.0;
  623. conf->freq.xx = 0.0;
  624. conf->freq.samples = 0;
  625. conf->freq.overall_offset = 0.0;
  626. conf->freq.num++;
  627. }
  628. int
  629. priv_adjtime(void)
  630. {
  631. struct ntp_peer *p;
  632. struct ntp_sensor *s;
  633. int offset_cnt = 0, i = 0, j;
  634. struct ntp_offset **offsets;
  635. double offset_median;
  636. TAILQ_FOREACH(p, &conf->ntp_peers, entry) {
  637. if (p->trustlevel < TRUSTLEVEL_BADPEER)
  638. continue;
  639. if (!p->update.good)
  640. return (1);
  641. offset_cnt += p->weight;
  642. }
  643. TAILQ_FOREACH(s, &conf->ntp_sensors, entry) {
  644. if (!s->update.good)
  645. continue;
  646. offset_cnt += s->weight;
  647. }
  648. if (offset_cnt == 0)
  649. return (1);
  650. if ((offsets = calloc(offset_cnt, sizeof(struct ntp_offset *))) == NULL)
  651. fatal("calloc priv_adjtime");
  652. TAILQ_FOREACH(p, &conf->ntp_peers, entry) {
  653. if (p->trustlevel < TRUSTLEVEL_BADPEER)
  654. continue;
  655. for (j = 0; j < p->weight; j++)
  656. offsets[i++] = &p->update;
  657. }
  658. TAILQ_FOREACH(s, &conf->ntp_sensors, entry) {
  659. if (!s->update.good)
  660. continue;
  661. for (j = 0; j < s->weight; j++)
  662. offsets[i++] = &s->update;
  663. }
  664. qsort(offsets, offset_cnt, sizeof(struct ntp_offset *), offset_compare);
  665. i = offset_cnt / 2;
  666. if (offset_cnt % 2 == 0)
  667. if (offsets[i - 1]->delay < offsets[i]->delay)
  668. i -= 1;
  669. offset_median = offsets[i]->offset;
  670. conf->status.rootdelay = offsets[i]->delay;
  671. conf->status.stratum = offsets[i]->status.stratum;
  672. conf->status.leap = offsets[i]->status.leap;
  673. imsg_compose(ibuf_main, IMSG_ADJTIME, 0, 0, -1,
  674. &offset_median, sizeof(offset_median));
  675. priv_adjfreq(offset_median);
  676. conf->status.reftime = gettime();
  677. conf->status.stratum++; /* one more than selected peer */
  678. if (conf->status.stratum > NTP_MAXSTRATUM)
  679. conf->status.stratum = NTP_MAXSTRATUM;
  680. update_scale(offset_median);
  681. conf->status.refid = offsets[i]->status.send_refid;
  682. free(offsets);
  683. TAILQ_FOREACH(p, &conf->ntp_peers, entry) {
  684. for (i = 0; i < OFFSET_ARRAY_SIZE; i++)
  685. p->reply[i].offset -= offset_median;
  686. p->update.good = 0;
  687. }
  688. TAILQ_FOREACH(s, &conf->ntp_sensors, entry) {
  689. for (i = 0; i < SENSOR_OFFSETS; i++)
  690. s->offsets[i].offset -= offset_median;
  691. s->update.offset -= offset_median;
  692. }
  693. return (0);
  694. }
  695. int
  696. offset_compare(const void *aa, const void *bb)
  697. {
  698. const struct ntp_offset * const *a;
  699. const struct ntp_offset * const *b;
  700. a = aa;
  701. b = bb;
  702. if ((*a)->offset < (*b)->offset)
  703. return (-1);
  704. else if ((*a)->offset > (*b)->offset)
  705. return (1);
  706. else
  707. return (0);
  708. }
  709. void
  710. priv_settime(double offset, char *msg)
  711. {
  712. if (offset == 0)
  713. log_info("cancel settime because %s", msg);
  714. imsg_compose(ibuf_main, IMSG_SETTIME, 0, 0, -1,
  715. &offset, sizeof(offset));
  716. conf->settime = 0;
  717. }
  718. void
  719. priv_dns(int cmd, char *name, u_int32_t peerid)
  720. {
  721. u_int16_t dlen = 0;
  722. if (name != NULL)
  723. dlen = strlen(name) + 1;
  724. imsg_compose(ibuf_dns, cmd, peerid, 0, -1, name, dlen);
  725. }
  726. void
  727. update_scale(double offset)
  728. {
  729. offset += getoffset();
  730. if (offset < 0)
  731. offset = -offset;
  732. if (offset > QSCALE_OFF_MAX || !conf->status.synced ||
  733. conf->freq.num < 3)
  734. conf->scale = 1;
  735. else if (offset < QSCALE_OFF_MIN)
  736. conf->scale = QSCALE_OFF_MAX / QSCALE_OFF_MIN;
  737. else
  738. conf->scale = QSCALE_OFF_MAX / offset;
  739. }
  740. time_t
  741. scale_interval(time_t requested)
  742. {
  743. time_t interval, r;
  744. interval = requested * conf->scale;
  745. r = arc4random_uniform(MAXIMUM(5, interval / 10));
  746. return (interval + r);
  747. }
  748. time_t
  749. error_interval(void)
  750. {
  751. time_t interval, r;
  752. interval = INTERVAL_QUERY_PATHETIC * QSCALE_OFF_MAX / QSCALE_OFF_MIN;
  753. r = arc4random_uniform(interval / 10);
  754. return (interval + r);
  755. }