Source code pulled from OpenBSD for OpenNTPD. The place to contribute to this code is via the OpenBSD CVS tree.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

511 lines
14 KiB

4 years ago
20 years ago
20 years ago
20 years ago
  1. /* $OpenBSD: client.c,v 1.113 2020/01/30 15:55:41 otto Exp $ */
  2. /*
  3. * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
  4. * Copyright (c) 2004 Alexander Guy <alexander.guy@andern.org>
  5. *
  6. * Permission to use, copy, modify, and distribute this software for any
  7. * purpose with or without fee is hereby granted, provided that the above
  8. * copyright notice and this permission notice appear in all copies.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  11. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  12. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  13. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  14. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  15. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  16. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  17. */
  18. #include <sys/types.h>
  19. #include <errno.h>
  20. #include <md5.h>
  21. #include <stdio.h>
  22. #include <stdlib.h>
  23. #include <string.h>
  24. #include <time.h>
  25. #include <unistd.h>
  26. #include "ntpd.h"
  27. int client_update(struct ntp_peer *);
  28. int auto_cmp(const void *, const void *);
  29. void handle_auto(u_int8_t, double);
  30. void set_deadline(struct ntp_peer *, time_t);
  31. void
  32. set_next(struct ntp_peer *p, time_t t)
  33. {
  34. p->next = getmonotime() + t;
  35. p->deadline = 0;
  36. p->poll = t;
  37. }
  38. void
  39. set_deadline(struct ntp_peer *p, time_t t)
  40. {
  41. p->deadline = getmonotime() + t;
  42. p->next = 0;
  43. }
  44. int
  45. client_peer_init(struct ntp_peer *p)
  46. {
  47. if ((p->query = calloc(1, sizeof(struct ntp_query))) == NULL)
  48. fatal("client_peer_init calloc");
  49. p->query->fd = -1;
  50. p->query->msg.status = MODE_CLIENT | (NTP_VERSION << 3);
  51. p->state = STATE_NONE;
  52. p->shift = 0;
  53. p->trustlevel = TRUSTLEVEL_PATHETIC;
  54. p->lasterror = 0;
  55. p->senderrors = 0;
  56. return (client_addr_init(p));
  57. }
  58. int
  59. client_addr_init(struct ntp_peer *p)
  60. {
  61. struct sockaddr_in *sa_in;
  62. struct sockaddr_in6 *sa_in6;
  63. struct ntp_addr *h;
  64. for (h = p->addr; h != NULL; h = h->next) {
  65. switch (h->ss.ss_family) {
  66. case AF_INET:
  67. sa_in = (struct sockaddr_in *)&h->ss;
  68. if (ntohs(sa_in->sin_port) == 0)
  69. sa_in->sin_port = htons(123);
  70. p->state = STATE_DNS_DONE;
  71. break;
  72. case AF_INET6:
  73. sa_in6 = (struct sockaddr_in6 *)&h->ss;
  74. if (ntohs(sa_in6->sin6_port) == 0)
  75. sa_in6->sin6_port = htons(123);
  76. p->state = STATE_DNS_DONE;
  77. break;
  78. default:
  79. fatalx("king bula sez: wrong AF in client_addr_init");
  80. /* NOTREACHED */
  81. }
  82. }
  83. p->query->fd = -1;
  84. set_next(p, 0);
  85. return (0);
  86. }
  87. int
  88. client_nextaddr(struct ntp_peer *p)
  89. {
  90. if (p->query->fd != -1) {
  91. close(p->query->fd);
  92. p->query->fd = -1;
  93. }
  94. if (p->state == STATE_DNS_INPROGRESS)
  95. return (-1);
  96. if (p->addr_head.a == NULL) {
  97. priv_dns(IMSG_HOST_DNS, p->addr_head.name, p->id);
  98. p->state = STATE_DNS_INPROGRESS;
  99. return (-1);
  100. }
  101. p->shift = 0;
  102. p->trustlevel = TRUSTLEVEL_PATHETIC;
  103. if (p->addr == NULL)
  104. p->addr = p->addr_head.a;
  105. else if ((p->addr = p->addr->next) == NULL)
  106. return (1);
  107. return (0);
  108. }
  109. int
  110. client_query(struct ntp_peer *p)
  111. {
  112. int val;
  113. if (p->addr == NULL && client_nextaddr(p) == -1) {
  114. if (conf->settime)
  115. set_next(p, INTERVAL_AUIO_DNSFAIL);
  116. else
  117. set_next(p, MAXIMUM(SETTIME_TIMEOUT,
  118. scale_interval(INTERVAL_QUERY_AGGRESSIVE)));
  119. return (0);
  120. }
  121. if (conf->status.synced && p->addr->notauth) {
  122. peer_addr_head_clear(p);
  123. client_nextaddr(p);
  124. return (0);
  125. }
  126. if (p->state < STATE_DNS_DONE || p->addr == NULL)
  127. return (-1);
  128. if (p->query->fd == -1) {
  129. struct sockaddr *sa = (struct sockaddr *)&p->addr->ss;
  130. struct sockaddr *qa4 = (struct sockaddr *)&p->query_addr4;
  131. struct sockaddr *qa6 = (struct sockaddr *)&p->query_addr6;
  132. if ((p->query->fd = socket(p->addr->ss.ss_family, SOCK_DGRAM,
  133. 0)) == -1)
  134. fatal("client_query socket");
  135. if (p->addr->ss.ss_family == qa4->sa_family) {
  136. if (bind(p->query->fd, qa4, SA_LEN(qa4)) == -1)
  137. fatal("couldn't bind to IPv4 query address: %s",
  138. log_sockaddr(qa4));
  139. } else if (p->addr->ss.ss_family == qa6->sa_family) {
  140. if (bind(p->query->fd, qa6, SA_LEN(qa6)) == -1)
  141. fatal("couldn't bind to IPv6 query address: %s",
  142. log_sockaddr(qa6));
  143. }
  144. if (connect(p->query->fd, sa, SA_LEN(sa)) == -1) {
  145. if (errno == ECONNREFUSED || errno == ENETUNREACH ||
  146. errno == EHOSTUNREACH || errno == EADDRNOTAVAIL) {
  147. /* cycle through addresses, but do increase
  148. senderrors */
  149. client_nextaddr(p);
  150. if (p->addr == NULL)
  151. p->addr = p->addr_head.a;
  152. set_next(p, MAXIMUM(SETTIME_TIMEOUT,
  153. scale_interval(INTERVAL_QUERY_AGGRESSIVE)));
  154. p->senderrors++;
  155. return (-1);
  156. } else
  157. fatal("client_query connect");
  158. }
  159. val = IPTOS_LOWDELAY;
  160. if (p->addr->ss.ss_family == AF_INET && setsockopt(p->query->fd,
  161. IPPROTO_IP, IP_TOS, &val, sizeof(val)) == -1)
  162. log_warn("setsockopt IPTOS_LOWDELAY");
  163. val = 1;
  164. if (setsockopt(p->query->fd, SOL_SOCKET, SO_TIMESTAMP,
  165. &val, sizeof(val)) == -1)
  166. fatal("setsockopt SO_TIMESTAMP");
  167. }
  168. /*
  169. * Send out a random 64-bit number as our transmit time. The NTP
  170. * server will copy said number into the originate field on the
  171. * response that it sends us. This is totally legal per the SNTP spec.
  172. *
  173. * The impact of this is two fold: we no longer send out the current
  174. * system time for the world to see (which may aid an attacker), and
  175. * it gives us a (not very secure) way of knowing that we're not
  176. * getting spoofed by an attacker that can't capture our traffic
  177. * but can spoof packets from the NTP server we're communicating with.
  178. *
  179. * Save the real transmit timestamp locally.
  180. */
  181. p->query->msg.xmttime.int_partl = arc4random();
  182. p->query->msg.xmttime.fractionl = arc4random();
  183. p->query->xmttime = gettime_corrected();
  184. if (ntp_sendmsg(p->query->fd, NULL, &p->query->msg) == -1) {
  185. p->senderrors++;
  186. set_next(p, INTERVAL_QUERY_PATHETIC);
  187. p->trustlevel = TRUSTLEVEL_PATHETIC;
  188. return (-1);
  189. }
  190. p->senderrors = 0;
  191. p->state = STATE_QUERY_SENT;
  192. set_deadline(p, QUERYTIME_MAX);
  193. return (0);
  194. }
  195. int
  196. auto_cmp(const void *a, const void *b)
  197. {
  198. double at = *(const double *)a;
  199. double bt = *(const double *)b;
  200. return at < bt ? -1 : (at > bt ? 1 : 0);
  201. }
  202. void
  203. handle_auto(uint8_t trusted, double offset)
  204. {
  205. static int count;
  206. static double v[AUTO_REPLIES];
  207. /*
  208. * It happens the (constraint) resolves initially fail, don't give up
  209. * but see if we get validated replies later.
  210. */
  211. if (!trusted && conf->constraint_median == 0)
  212. return;
  213. if (offset < AUTO_THRESHOLD) {
  214. /* don't bother */
  215. priv_settime(0, "offset is negative or close enough");
  216. return;
  217. }
  218. /* collect some more */
  219. v[count++] = offset;
  220. if (count < AUTO_REPLIES)
  221. return;
  222. /* we have enough */
  223. qsort(v, count, sizeof(double), auto_cmp);
  224. if (AUTO_REPLIES % 2 == 0)
  225. offset = (v[AUTO_REPLIES / 2 - 1] + v[AUTO_REPLIES / 2]) / 2;
  226. else
  227. offset = v[AUTO_REPLIES / 2];
  228. priv_settime(offset, "");
  229. }
  230. int
  231. client_dispatch(struct ntp_peer *p, u_int8_t settime, u_int8_t automatic)
  232. {
  233. struct ntp_msg msg;
  234. struct msghdr somsg;
  235. struct iovec iov[1];
  236. struct timeval tv;
  237. char buf[NTP_MSGSIZE];
  238. union {
  239. struct cmsghdr hdr;
  240. char buf[CMSG_SPACE(sizeof(tv))];
  241. } cmsgbuf;
  242. struct cmsghdr *cmsg;
  243. ssize_t size;
  244. double T1, T2, T3, T4;
  245. time_t interval;
  246. memset(&somsg, 0, sizeof(somsg));
  247. iov[0].iov_base = buf;
  248. iov[0].iov_len = sizeof(buf);
  249. somsg.msg_iov = iov;
  250. somsg.msg_iovlen = 1;
  251. somsg.msg_control = cmsgbuf.buf;
  252. somsg.msg_controllen = sizeof(cmsgbuf.buf);
  253. T4 = getoffset();
  254. if ((size = recvmsg(p->query->fd, &somsg, 0)) == -1) {
  255. if (errno == EHOSTUNREACH || errno == EHOSTDOWN ||
  256. errno == ENETUNREACH || errno == ENETDOWN ||
  257. errno == ECONNREFUSED || errno == EADDRNOTAVAIL ||
  258. errno == ENOPROTOOPT || errno == ENOENT) {
  259. client_log_error(p, "recvmsg", errno);
  260. set_next(p, error_interval());
  261. return (0);
  262. } else
  263. fatal("recvfrom");
  264. }
  265. if (somsg.msg_flags & MSG_TRUNC) {
  266. client_log_error(p, "recvmsg packet", EMSGSIZE);
  267. set_next(p, error_interval());
  268. return (0);
  269. }
  270. if (somsg.msg_flags & MSG_CTRUNC) {
  271. client_log_error(p, "recvmsg control data", E2BIG);
  272. set_next(p, error_interval());
  273. return (0);
  274. }
  275. for (cmsg = CMSG_FIRSTHDR(&somsg); cmsg != NULL;
  276. cmsg = CMSG_NXTHDR(&somsg, cmsg)) {
  277. if (cmsg->cmsg_level == SOL_SOCKET &&
  278. cmsg->cmsg_type == SCM_TIMESTAMP) {
  279. memcpy(&tv, CMSG_DATA(cmsg), sizeof(tv));
  280. T4 += gettime_from_timeval(&tv);
  281. break;
  282. }
  283. }
  284. ntp_getmsg((struct sockaddr *)&p->addr->ss, buf, size, &msg);
  285. if (msg.orgtime.int_partl != p->query->msg.xmttime.int_partl ||
  286. msg.orgtime.fractionl != p->query->msg.xmttime.fractionl)
  287. return (0);
  288. if ((msg.status & LI_ALARM) == LI_ALARM || msg.stratum == 0 ||
  289. msg.stratum > NTP_MAXSTRATUM) {
  290. char s[16];
  291. if ((msg.status & LI_ALARM) == LI_ALARM) {
  292. strlcpy(s, "alarm", sizeof(s));
  293. } else if (msg.stratum == 0) {
  294. /* Kiss-o'-Death (KoD) packet */
  295. strlcpy(s, "KoD", sizeof(s));
  296. } else if (msg.stratum > NTP_MAXSTRATUM) {
  297. snprintf(s, sizeof(s), "stratum %d", msg.stratum);
  298. }
  299. interval = error_interval();
  300. set_next(p, interval);
  301. log_info("reply from %s: not synced (%s), next query %llds",
  302. log_sockaddr((struct sockaddr *)&p->addr->ss), s,
  303. (long long)interval);
  304. return (0);
  305. }
  306. /*
  307. * From RFC 2030 (with a correction to the delay math):
  308. *
  309. * Timestamp Name ID When Generated
  310. * ------------------------------------------------------------
  311. * Originate Timestamp T1 time request sent by client
  312. * Receive Timestamp T2 time request received by server
  313. * Transmit Timestamp T3 time reply sent by server
  314. * Destination Timestamp T4 time reply received by client
  315. *
  316. * The roundtrip delay d and local clock offset t are defined as
  317. *
  318. * d = (T4 - T1) - (T3 - T2) t = ((T2 - T1) + (T3 - T4)) / 2.
  319. */
  320. T1 = p->query->xmttime;
  321. T2 = lfp_to_d(msg.rectime);
  322. T3 = lfp_to_d(msg.xmttime);
  323. /* Detect liars */
  324. if (!p->trusted && conf->constraint_median != 0 &&
  325. (constraint_check(T2) != 0 || constraint_check(T3) != 0)) {
  326. log_info("reply from %s: constraint check failed",
  327. log_sockaddr((struct sockaddr *)&p->addr->ss));
  328. set_next(p, error_interval());
  329. return (0);
  330. }
  331. p->reply[p->shift].offset = ((T2 - T1) + (T3 - T4)) / 2;
  332. p->reply[p->shift].delay = (T4 - T1) - (T3 - T2);
  333. p->reply[p->shift].status.stratum = msg.stratum;
  334. if (p->reply[p->shift].delay < 0) {
  335. interval = error_interval();
  336. set_next(p, interval);
  337. log_info("reply from %s: negative delay %fs, "
  338. "next query %llds",
  339. log_sockaddr((struct sockaddr *)&p->addr->ss),
  340. p->reply[p->shift].delay, (long long)interval);
  341. return (0);
  342. }
  343. p->reply[p->shift].error = (T2 - T1) - (T3 - T4);
  344. p->reply[p->shift].rcvd = getmonotime();
  345. p->reply[p->shift].good = 1;
  346. p->reply[p->shift].status.leap = (msg.status & LIMASK);
  347. p->reply[p->shift].status.precision = msg.precision;
  348. p->reply[p->shift].status.rootdelay = sfp_to_d(msg.rootdelay);
  349. p->reply[p->shift].status.rootdispersion = sfp_to_d(msg.dispersion);
  350. p->reply[p->shift].status.refid = msg.refid;
  351. p->reply[p->shift].status.reftime = lfp_to_d(msg.reftime);
  352. p->reply[p->shift].status.poll = msg.ppoll;
  353. if (p->addr->ss.ss_family == AF_INET) {
  354. p->reply[p->shift].status.send_refid =
  355. ((struct sockaddr_in *)&p->addr->ss)->sin_addr.s_addr;
  356. } else if (p->addr->ss.ss_family == AF_INET6) {
  357. MD5_CTX context;
  358. u_int8_t digest[MD5_DIGEST_LENGTH];
  359. MD5Init(&context);
  360. MD5Update(&context, ((struct sockaddr_in6 *)&p->addr->ss)->
  361. sin6_addr.s6_addr, sizeof(struct in6_addr));
  362. MD5Final(digest, &context);
  363. memcpy((char *)&p->reply[p->shift].status.send_refid, digest,
  364. sizeof(u_int32_t));
  365. } else
  366. p->reply[p->shift].status.send_refid = msg.xmttime.fractionl;
  367. if (p->trustlevel < TRUSTLEVEL_PATHETIC)
  368. interval = scale_interval(INTERVAL_QUERY_PATHETIC);
  369. else if (p->trustlevel < TRUSTLEVEL_AGGRESSIVE)
  370. interval = (conf->settime && conf->automatic) ?
  371. INTERVAL_QUERY_ULTRA_VIOLENCE :
  372. scale_interval(INTERVAL_QUERY_AGGRESSIVE);
  373. else
  374. interval = scale_interval(INTERVAL_QUERY_NORMAL);
  375. set_next(p, interval);
  376. p->state = STATE_REPLY_RECEIVED;
  377. /* every received reply which we do not discard increases trust */
  378. if (p->trustlevel < TRUSTLEVEL_MAX) {
  379. if (p->trustlevel < TRUSTLEVEL_BADPEER &&
  380. p->trustlevel + 1 >= TRUSTLEVEL_BADPEER)
  381. log_info("peer %s now valid",
  382. log_sockaddr((struct sockaddr *)&p->addr->ss));
  383. p->trustlevel++;
  384. }
  385. log_debug("reply from %s: offset %f delay %f, "
  386. "next query %llds",
  387. log_sockaddr((struct sockaddr *)&p->addr->ss),
  388. p->reply[p->shift].offset, p->reply[p->shift].delay,
  389. (long long)interval);
  390. client_update(p);
  391. if (settime) {
  392. if (automatic)
  393. handle_auto(p->trusted, p->reply[p->shift].offset);
  394. else
  395. priv_settime(p->reply[p->shift].offset, "");
  396. }
  397. if (++p->shift >= OFFSET_ARRAY_SIZE)
  398. p->shift = 0;
  399. return (0);
  400. }
  401. int
  402. client_update(struct ntp_peer *p)
  403. {
  404. int i, best = 0, good = 0;
  405. /*
  406. * clock filter
  407. * find the offset which arrived with the lowest delay
  408. * use that as the peer update
  409. * invalidate it and all older ones
  410. */
  411. for (i = 0; good == 0 && i < OFFSET_ARRAY_SIZE; i++)
  412. if (p->reply[i].good) {
  413. good++;
  414. best = i;
  415. }
  416. for (; i < OFFSET_ARRAY_SIZE; i++)
  417. if (p->reply[i].good) {
  418. good++;
  419. if (p->reply[i].delay < p->reply[best].delay)
  420. best = i;
  421. }
  422. if (good < 8)
  423. return (-1);
  424. memcpy(&p->update, &p->reply[best], sizeof(p->update));
  425. if (priv_adjtime() == 0) {
  426. for (i = 0; i < OFFSET_ARRAY_SIZE; i++)
  427. if (p->reply[i].rcvd <= p->reply[best].rcvd)
  428. p->reply[i].good = 0;
  429. }
  430. return (0);
  431. }
  432. void
  433. client_log_error(struct ntp_peer *peer, const char *operation, int error)
  434. {
  435. const char *address;
  436. address = log_sockaddr((struct sockaddr *)&peer->addr->ss);
  437. if (peer->lasterror == error) {
  438. log_debug("%s %s: %s", operation, address, strerror(error));
  439. return;
  440. }
  441. peer->lasterror = error;
  442. log_warn("%s %s", operation, address);
  443. }