A clone of btpd with my configuration changes.

959 lines
21 KiB

  1. #include <sys/types.h>
  2. #include <sys/uio.h>
  3. #include <sys/socket.h>
  4. #include <netinet/in.h>
  5. #include <netdb.h>
  6. #include <sys/mman.h>
  7. #include <sys/wait.h>
  8. #include <assert.h>
  9. #include <errno.h>
  10. #include <fcntl.h>
  11. #include <math.h>
  12. #include <stdio.h>
  13. #include <stdlib.h>
  14. #include <string.h>
  15. #include <unistd.h>
  16. #include "btpd.h"
  17. #define WRITE_TIMEOUT (& (struct timeval) { 60, 0 })
  18. #define min(x, y) ((x) <= (y) ? (x) : (y))
  19. static unsigned long
  20. net_write(struct peer *p, unsigned long wmax);
  21. void
  22. net_read_cb(int sd, short type, void *arg)
  23. {
  24. struct peer *p = (struct peer *)arg;
  25. if (btpd.ibwlim == 0) {
  26. p->reader->read(p, 0);
  27. } else if (btpd.ibw_left > 0) {
  28. btpd.ibw_left -= p->reader->read(p, btpd.ibw_left);
  29. } else {
  30. p->flags |= PF_ON_READQ;
  31. BTPDQ_INSERT_TAIL(&btpd.readq, p, rq_entry);
  32. }
  33. }
  34. void
  35. net_write_cb(int sd, short type, void *arg)
  36. {
  37. struct peer *p = (struct peer *)arg;
  38. if (type == EV_TIMEOUT) {
  39. btpd_log(BTPD_L_ERROR, "Write attempt timed out.\n");
  40. peer_kill(p);
  41. return;
  42. }
  43. if (btpd.obwlim == 0) {
  44. net_write(p, 0);
  45. } else if (btpd.obw_left > 0) {
  46. btpd.obw_left -= net_write(p, btpd.obw_left);
  47. } else {
  48. p->flags |= PF_ON_WRITEQ;
  49. BTPDQ_INSERT_TAIL(&btpd.writeq, p, wq_entry);
  50. }
  51. }
  52. void
  53. net_write32(void *buf, uint32_t num)
  54. {
  55. *(uint32_t *)buf = htonl(num);
  56. }
  57. uint32_t
  58. net_read32(void *buf)
  59. {
  60. return ntohl(*(uint32_t *)buf);
  61. }
  62. static void
  63. kill_buf_no(char *buf, size_t len)
  64. {
  65. //Nothing
  66. }
  67. static void
  68. kill_buf_free(char *buf, size_t len)
  69. {
  70. free(buf);
  71. }
  72. int
  73. nb_drop(struct net_buf *nb)
  74. {
  75. assert(nb->refs > 0);
  76. nb->refs--;
  77. if (nb->refs == 0) {
  78. nb->kill_buf(nb->buf, nb->len);
  79. free(nb);
  80. return 1;
  81. } else
  82. return 0;
  83. }
  84. void
  85. nb_hold(struct net_buf *nb)
  86. {
  87. nb->refs++;
  88. }
  89. struct net_buf *
  90. nb_create_alloc(short type, size_t len)
  91. {
  92. struct net_buf *nb = btpd_calloc(1, sizeof(*nb) + len);
  93. nb->type = type;
  94. nb->buf = (char *)(nb + 1);
  95. nb->len = len;
  96. nb->kill_buf = kill_buf_no;
  97. return nb;
  98. }
  99. struct net_buf *
  100. nb_create_set(short type, char *buf, size_t len,
  101. void (*kill_buf)(char *, size_t))
  102. {
  103. struct net_buf *nb = btpd_calloc(1, sizeof(*nb));
  104. nb->type = type;
  105. nb->buf = buf;
  106. nb->len = len;
  107. nb->kill_buf = kill_buf;
  108. return nb;
  109. }
  110. uint32_t
  111. nb_get_index(struct net_buf *nb)
  112. {
  113. switch (nb->type) {
  114. case NB_CANCEL:
  115. case NB_HAVE:
  116. case NB_PIECE:
  117. case NB_REQUEST:
  118. return net_read32(nb->buf + 5);
  119. default:
  120. abort();
  121. }
  122. }
  123. uint32_t
  124. nb_get_begin(struct net_buf *nb)
  125. {
  126. switch (nb->type) {
  127. case NB_CANCEL:
  128. case NB_PIECE:
  129. case NB_REQUEST:
  130. return net_read32(nb->buf + 9);
  131. default:
  132. abort();
  133. }
  134. }
  135. uint32_t
  136. nb_get_length(struct net_buf *nb)
  137. {
  138. switch (nb->type) {
  139. case NB_CANCEL:
  140. case NB_REQUEST:
  141. return net_read32(nb->buf + 13);
  142. case NB_PIECE:
  143. return net_read32(nb->buf) - 9;
  144. default:
  145. abort();
  146. }
  147. }
  148. void
  149. kill_shake(struct input_reader *reader)
  150. {
  151. free(reader);
  152. }
  153. #define NIOV 16
  154. static unsigned long
  155. net_write(struct peer *p, unsigned long wmax)
  156. {
  157. struct nb_link *nl;
  158. struct iovec iov[NIOV];
  159. int niov;
  160. int limited;
  161. ssize_t nwritten;
  162. unsigned long bcount;
  163. limited = wmax > 0;
  164. niov = 0;
  165. assert((nl = BTPDQ_FIRST(&p->outq)) != NULL);
  166. while (niov < NIOV && nl != NULL && (!limited || (limited && wmax > 0))) {
  167. if (niov > 0) {
  168. iov[niov].iov_base = nl->nb->buf;
  169. iov[niov].iov_len = nl->nb->len;
  170. } else {
  171. iov[niov].iov_base = nl->nb->buf + p->outq_off;
  172. iov[niov].iov_len = nl->nb->len - p->outq_off;
  173. }
  174. if (limited) {
  175. if (iov[niov].iov_len > wmax)
  176. iov[niov].iov_len = wmax;
  177. wmax -= iov[niov].iov_len;
  178. }
  179. niov++;
  180. nl = BTPDQ_NEXT(nl, entry);
  181. }
  182. nwritten = writev(p->sd, iov, niov);
  183. if (nwritten < 0) {
  184. if (errno == EAGAIN) {
  185. event_add(&p->out_ev, WRITE_TIMEOUT);
  186. return 0;
  187. } else {
  188. btpd_log(BTPD_L_CONN, "write error: %s\n", strerror(errno));
  189. peer_kill(p);
  190. return 0;
  191. }
  192. } else if (nwritten == 0) {
  193. btpd_log(BTPD_L_CONN, "connection closed by peer.\n");
  194. peer_kill(p);
  195. return 0;
  196. }
  197. bcount = nwritten;
  198. nl = BTPDQ_FIRST(&p->outq);
  199. while (bcount > 0) {
  200. unsigned long bufdelta = nl->nb->len - p->outq_off;
  201. if (bcount >= bufdelta) {
  202. if (nl->nb->type == NB_TORRENTDATA) {
  203. p->tp->uploaded += bufdelta;
  204. p->rate_from_me[btpd.seconds % RATEHISTORY] += bufdelta;
  205. }
  206. bcount -= bufdelta;
  207. BTPDQ_REMOVE(&p->outq, nl, entry);
  208. nb_drop(nl->nb);
  209. free(nl);
  210. p->outq_off = 0;
  211. nl = BTPDQ_FIRST(&p->outq);
  212. } else {
  213. if (nl->nb->type == NB_TORRENTDATA) {
  214. p->tp->uploaded += bcount;
  215. p->rate_from_me[btpd.seconds % RATEHISTORY] += bcount;
  216. }
  217. p->outq_off += bcount;
  218. bcount = 0;
  219. }
  220. }
  221. if (!BTPDQ_EMPTY(&p->outq))
  222. event_add(&p->out_ev, WRITE_TIMEOUT);
  223. else if (p->flags & PF_WRITE_CLOSE) {
  224. btpd_log(BTPD_L_CONN, "Closed because of write flag.\n");
  225. peer_kill(p);
  226. }
  227. return nwritten;
  228. }
  229. void
  230. net_send(struct peer *p, struct net_buf *nb)
  231. {
  232. struct nb_link *nl = btpd_calloc(1, sizeof(*nl));
  233. nl->nb = nb;
  234. nb_hold(nb);
  235. if (BTPDQ_EMPTY(&p->outq)) {
  236. assert(p->outq_off == 0);
  237. event_add(&p->out_ev, WRITE_TIMEOUT);
  238. }
  239. BTPDQ_INSERT_TAIL(&p->outq, nl, entry);
  240. }
  241. /*
  242. * Remove a network buffer from the peer's outq.
  243. * If a part of the buffer already have been written
  244. * to the network it cannot be removed.
  245. *
  246. * Returns 1 if the buffer is removed, 0 if not.
  247. */
  248. int
  249. net_unsend(struct peer *p, struct nb_link *nl)
  250. {
  251. if (!(nl == BTPDQ_FIRST(&p->outq) && p->outq_off > 0)) {
  252. BTPDQ_REMOVE(&p->outq, nl, entry);
  253. nb_drop(nl->nb);
  254. free(nl);
  255. if (BTPDQ_EMPTY(&p->outq)) {
  256. if (p->flags & PF_ON_WRITEQ) {
  257. BTPDQ_REMOVE(&btpd.writeq, p, wq_entry);
  258. p->flags &= ~PF_ON_WRITEQ;
  259. } else
  260. event_del(&p->out_ev);
  261. }
  262. return 1;
  263. } else
  264. return 0;
  265. }
  266. void
  267. net_send_piece(struct peer *p, uint32_t index, uint32_t begin,
  268. char *block, size_t blen)
  269. {
  270. struct net_buf *head, *piece;
  271. btpd_log(BTPD_L_MSG, "send piece: %u, %u, %u\n", index, begin, blen);
  272. head = nb_create_alloc(NB_PIECE, 13);
  273. net_write32(head->buf, 9 + blen);
  274. head->buf[4] = MSG_PIECE;
  275. net_write32(head->buf + 5, index);
  276. net_write32(head->buf + 9, begin);
  277. net_send(p, head);
  278. piece = nb_create_set(NB_TORRENTDATA, block, blen, kill_buf_free);
  279. net_send(p, piece);
  280. }
  281. void
  282. net_send_request(struct peer *p, struct piece_req *req)
  283. {
  284. struct net_buf *out = nb_create_alloc(NB_REQUEST, 17);
  285. net_write32(out->buf, 13);
  286. out->buf[4] = MSG_REQUEST;
  287. net_write32(out->buf + 5, req->index);
  288. net_write32(out->buf + 9, req->begin);
  289. net_write32(out->buf + 13, req->length);
  290. net_send(p, out);
  291. }
  292. void
  293. net_send_cancel(struct peer *p, struct piece_req *req)
  294. {
  295. struct net_buf *out = nb_create_alloc(NB_CANCEL, 17);
  296. net_write32(out->buf, 13);
  297. out->buf[4] = MSG_CANCEL;
  298. net_write32(out->buf + 5, req->index);
  299. net_write32(out->buf + 9, req->begin);
  300. net_write32(out->buf + 13, req->length);
  301. net_send(p, out);
  302. }
  303. void
  304. net_send_have(struct peer *p, uint32_t index)
  305. {
  306. struct net_buf *out = nb_create_alloc(NB_HAVE, 9);
  307. net_write32(out->buf, 5);
  308. out->buf[4] = MSG_HAVE;
  309. net_write32(out->buf + 5, index);
  310. net_send(p, out);
  311. }
  312. void
  313. net_send_multihave(struct peer *p)
  314. {
  315. struct torrent *tp = p->tp;
  316. struct net_buf *out = nb_create_alloc(NB_MULTIHAVE, 9 * tp->have_npieces);
  317. for (uint32_t i = 0, count = 0; count < tp->have_npieces; i++) {
  318. if (has_bit(tp->piece_field, i)) {
  319. net_write32(out->buf + count * 9, 5);
  320. out->buf[count * 9 + 4] = MSG_HAVE;
  321. net_write32(out->buf + count * 9 + 5, i);
  322. count++;
  323. }
  324. }
  325. net_send(p, out);
  326. }
  327. void
  328. net_send_onesized(struct peer *p, char mtype, int btype)
  329. {
  330. struct net_buf *out = nb_create_alloc(btype, 5);
  331. net_write32(out->buf, 1);
  332. out->buf[4] = mtype;
  333. net_send(p, out);
  334. }
  335. void
  336. net_send_unchoke(struct peer *p)
  337. {
  338. net_send_onesized(p, MSG_UNCHOKE, NB_UNCHOKE);
  339. }
  340. void
  341. net_send_choke(struct peer *p)
  342. {
  343. net_send_onesized(p, MSG_CHOKE, NB_CHOKE);
  344. }
  345. void
  346. net_send_uninterest(struct peer *p)
  347. {
  348. net_send_onesized(p, MSG_UNINTEREST, NB_UNINTEREST);
  349. }
  350. void
  351. net_send_interest(struct peer *p)
  352. {
  353. net_send_onesized(p, MSG_INTEREST, NB_INTEREST);
  354. }
  355. void
  356. net_send_bitfield(struct peer *p)
  357. {
  358. uint32_t plen = ceil(p->tp->meta.npieces / 8.0);
  359. struct net_buf *out = nb_create_alloc(NB_BITFIELD, 5);
  360. net_write32(out->buf, plen + 1);
  361. out->buf[4] = MSG_BITFIELD;
  362. net_send(p, out);
  363. out = nb_create_set(NB_BITDATA, p->tp->piece_field, plen, kill_buf_no);
  364. net_send(p, out);
  365. }
  366. void
  367. net_send_shake(struct peer *p)
  368. {
  369. struct net_buf *out = nb_create_alloc(NB_SHAKE, 68);
  370. bcopy("\x13""BitTorrent protocol\0\0\0\0\0\0\0\0", out->buf, 28);
  371. bcopy(p->tp->meta.info_hash, out->buf + 28, 20);
  372. bcopy(btpd.peer_id, out->buf + 48, 20);
  373. net_send(p, out);
  374. }
  375. static void
  376. kill_generic(struct input_reader *reader)
  377. {
  378. free(reader);
  379. }
  380. static size_t
  381. net_read(struct peer *p, char *buf, size_t len)
  382. {
  383. ssize_t nread = read(p->sd, buf, len);
  384. if (nread < 0) {
  385. if (errno == EAGAIN) {
  386. event_add(&p->in_ev, NULL);
  387. return 0;
  388. } else {
  389. btpd_log(BTPD_L_CONN, "read error: %s\n", strerror(errno));
  390. peer_kill(p);
  391. return 0;
  392. }
  393. } else if (nread == 0) {
  394. btpd_log(BTPD_L_CONN, "conn closed by other side.\n");
  395. if (!BTPDQ_EMPTY(&p->outq))
  396. p->flags |= PF_WRITE_CLOSE;
  397. else
  398. peer_kill(p);
  399. return 0;
  400. } else
  401. return nread;
  402. }
  403. static size_t
  404. net_read_to_buf(struct peer *p, struct io_buffer *iob, unsigned long rmax)
  405. {
  406. if (rmax == 0)
  407. rmax = iob->buf_len - iob->buf_off;
  408. else
  409. rmax = min(rmax, iob->buf_len - iob->buf_off);
  410. assert(rmax > 0);
  411. size_t nread = net_read(p, iob->buf + iob->buf_off, rmax);
  412. if (nread > 0)
  413. iob->buf_off += nread;
  414. return nread;
  415. }
  416. void
  417. kill_bitfield(struct input_reader *rd)
  418. {
  419. free(rd);
  420. }
  421. static void net_generic_reader(struct peer *p);
  422. static unsigned long
  423. read_bitfield(struct peer *p, unsigned long rmax)
  424. {
  425. struct bitfield_reader *rd = (struct bitfield_reader *)p->reader;
  426. size_t nread = net_read_to_buf(p, &rd->iob, rmax);
  427. if (nread == 0)
  428. return 0;
  429. if (rd->iob.buf_off == rd->iob.buf_len) {
  430. peer_on_bitfield(p, rd->iob.buf);
  431. free(rd);
  432. net_generic_reader(p);
  433. } else
  434. event_add(&p->in_ev, NULL);
  435. return nread;
  436. }
  437. void
  438. kill_piece(struct input_reader *rd)
  439. {
  440. free(rd);
  441. }
  442. static unsigned long
  443. read_piece(struct peer *p, unsigned long rmax)
  444. {
  445. struct piece_reader *rd = (struct piece_reader *)p->reader;
  446. size_t nread = net_read_to_buf(p, &rd->iob, rmax);
  447. if (nread == 0)
  448. return 0;
  449. p->rate_to_me[btpd.seconds % RATEHISTORY] += nread;
  450. p->tp->downloaded += nread;
  451. if (rd->iob.buf_off == rd->iob.buf_len) {
  452. peer_on_piece(p, rd->index, rd->begin, rd->iob.buf_len, rd->iob.buf);
  453. free(rd);
  454. net_generic_reader(p);
  455. } else
  456. event_add(&p->in_ev, NULL);
  457. return nread;
  458. }
  459. #define GRBUFLEN (1 << 15)
  460. static unsigned long
  461. net_generic_read(struct peer *p, unsigned long rmax)
  462. {
  463. char buf[GRBUFLEN];
  464. struct io_buffer iob = { 0, GRBUFLEN, buf };
  465. struct generic_reader *gr = (struct generic_reader *)p->reader;
  466. size_t nread;
  467. size_t off, len;
  468. int got_part;
  469. if (gr->iob.buf_off > 0) {
  470. iob.buf_off = gr->iob.buf_off;
  471. bcopy(gr->iob.buf, iob.buf, iob.buf_off);
  472. gr->iob.buf_off = 0;
  473. }
  474. if ((nread = net_read_to_buf(p, &iob, rmax)) == 0)
  475. return 0;
  476. len = iob.buf_off;
  477. off = 0;
  478. got_part = 0;
  479. while (!got_part && len - off >= 4) {
  480. size_t msg_len = net_read32(buf + off);
  481. if (msg_len == 0) { /* Keep alive */
  482. off += 4;
  483. continue;
  484. }
  485. if (len - off < 5) {
  486. got_part = 1;
  487. break;
  488. }
  489. switch (buf[off + 4]) {
  490. case MSG_CHOKE:
  491. btpd_log(BTPD_L_MSG, "choke.\n");
  492. if (msg_len != 1)
  493. goto bad_data;
  494. peer_on_choke(p);
  495. break;
  496. case MSG_UNCHOKE:
  497. btpd_log(BTPD_L_MSG, "unchoke.\n");
  498. if (msg_len != 1)
  499. goto bad_data;
  500. peer_on_unchoke(p);
  501. break;
  502. case MSG_INTEREST:
  503. btpd_log(BTPD_L_MSG, "interested.\n");
  504. if (msg_len != 1)
  505. goto bad_data;
  506. peer_on_interest(p);
  507. break;
  508. case MSG_UNINTEREST:
  509. btpd_log(BTPD_L_MSG, "uninterested.\n");
  510. if (msg_len != 1)
  511. goto bad_data;
  512. peer_on_uninterest(p);
  513. break;
  514. case MSG_HAVE:
  515. btpd_log(BTPD_L_MSG, "have.\n");
  516. if (msg_len != 5)
  517. goto bad_data;
  518. else if (len - off >= msg_len + 4) {
  519. uint32_t index = net_read32(buf + off + 5);
  520. peer_on_have(p, index);
  521. } else
  522. got_part = 1;
  523. break;
  524. case MSG_BITFIELD:
  525. btpd_log(BTPD_L_MSG, "bitfield.\n");
  526. if (msg_len != (size_t)ceil(p->tp->meta.npieces / 8.0) + 1)
  527. goto bad_data;
  528. else if (p->npieces != 0)
  529. goto bad_data;
  530. else if (len - off >= msg_len + 4)
  531. peer_on_bitfield(p, buf + off + 5);
  532. else {
  533. struct bitfield_reader *rp;
  534. size_t mem = sizeof(*rp) + msg_len - 1;
  535. p->reader->kill(p->reader);
  536. rp = btpd_calloc(1, mem);
  537. rp->rd.kill = kill_bitfield;
  538. rp->rd.read = read_bitfield;
  539. rp->iob.buf = (char *)rp + sizeof(*rp);
  540. rp->iob.buf_len = msg_len - 1;
  541. rp->iob.buf_off = len - off - 5;
  542. bcopy(buf + off + 5, rp->iob.buf, rp->iob.buf_off);
  543. p->reader = (struct input_reader *)rp;
  544. event_add(&p->in_ev, NULL);
  545. return nread;
  546. }
  547. break;
  548. case MSG_REQUEST:
  549. btpd_log(BTPD_L_MSG, "request.\n");
  550. if (msg_len != 13)
  551. goto bad_data;
  552. else if (len - off >= msg_len + 4) {
  553. if ((p->flags & (PF_P_WANT|PF_I_CHOKE)) != PF_P_WANT)
  554. break;
  555. uint32_t index, begin, length;
  556. index = net_read32(buf + off + 5);
  557. begin = net_read32(buf + off + 9);
  558. length = net_read32(buf + off + 13);
  559. if (length > (1 << 15))
  560. goto bad_data;
  561. if (index >= p->tp->meta.npieces)
  562. goto bad_data;
  563. if (!has_bit(p->tp->piece_field, index))
  564. goto bad_data;
  565. if (begin + length > torrent_piece_size(p->tp, index))
  566. goto bad_data;
  567. peer_on_request(p, index, begin, length);
  568. } else
  569. got_part = 1;
  570. break;
  571. case MSG_PIECE:
  572. btpd_log(BTPD_L_MSG, "piece.\n");
  573. if (msg_len < 10)
  574. goto bad_data;
  575. else if (len - off >= 13) {
  576. uint32_t index = net_read32(buf + off + 5);
  577. uint32_t begin = net_read32(buf + off + 9);
  578. uint32_t length = msg_len - 9;
  579. if (len - off >= msg_len + 4) {
  580. p->tp->downloaded += length;
  581. p->rate_to_me[btpd.seconds % RATEHISTORY] += length;
  582. peer_on_piece(p, index, begin, length, buf + off + 13);
  583. } else {
  584. struct piece_reader *rp;
  585. size_t mem = sizeof(*rp) + length;
  586. p->reader->kill(p->reader);
  587. rp = btpd_calloc(1, mem);
  588. rp->rd.kill = kill_piece;
  589. rp->rd.read = read_piece;
  590. rp->index = index;
  591. rp->begin = begin;
  592. rp->iob.buf = (char *)rp + sizeof(*rp);
  593. rp->iob.buf_len = length;
  594. rp->iob.buf_off = len - off - 13;
  595. bcopy(buf + off + 13, rp->iob.buf, rp->iob.buf_off);
  596. p->reader = (struct input_reader *)rp;
  597. event_add(&p->in_ev, NULL);
  598. p->tp->downloaded += rp->iob.buf_off;
  599. p->rate_to_me[btpd.seconds % RATEHISTORY] +=
  600. rp->iob.buf_off;
  601. return nread;
  602. }
  603. } else
  604. got_part = 1;
  605. break;
  606. case MSG_CANCEL:
  607. if (msg_len != 13)
  608. goto bad_data;
  609. else if (len - off >= msg_len + 4) {
  610. uint32_t index = net_read32(buf + off + 5);
  611. uint32_t begin = net_read32(buf + off + 9);
  612. uint32_t length = net_read32(buf + off + 13);
  613. if (index > p->tp->meta.npieces)
  614. goto bad_data;
  615. if (begin + length > torrent_piece_size(p->tp, index))
  616. goto bad_data;
  617. btpd_log(BTPD_L_MSG, "cancel: %u, %u, %u\n",
  618. index, begin, length);
  619. peer_on_cancel(p, index, begin, length);
  620. } else
  621. got_part = 1;
  622. break;
  623. default:
  624. goto bad_data;
  625. }
  626. if (!got_part)
  627. off += 4 + msg_len;
  628. }
  629. if (off != len) {
  630. gr->iob.buf_off = len - off;
  631. assert(gr->iob.buf_off <= gr->iob.buf_len);
  632. bcopy(buf + off, gr->iob.buf, gr->iob.buf_off);
  633. }
  634. event_add(&p->in_ev, NULL);
  635. return nread;
  636. bad_data:
  637. btpd_log(BTPD_L_MSG, "bad data\n");
  638. peer_kill(p);
  639. return nread;
  640. }
  641. static void
  642. net_generic_reader(struct peer *p)
  643. {
  644. struct generic_reader *gr;
  645. gr = btpd_calloc(1, sizeof(*gr));
  646. gr->rd.read = net_generic_read;
  647. gr->rd.kill = kill_generic;
  648. gr->iob.buf = gr->_io_buf;
  649. gr->iob.buf_len = MAX_INPUT_LEFT;
  650. gr->iob.buf_off = 0;
  651. p->reader = (struct input_reader *)gr;
  652. event_add(&p->in_ev, NULL);
  653. }
  654. static unsigned long
  655. net_shake_read(struct peer *p, unsigned long rmax)
  656. {
  657. struct handshake *hs = (struct handshake *)p->reader;
  658. struct io_buffer *in = &hs->in;
  659. size_t nread = net_read_to_buf(p, in, rmax);
  660. if (nread == 0)
  661. return 0;
  662. switch (hs->state) {
  663. case SHAKE_INIT:
  664. if (in->buf_off < 20)
  665. break;
  666. else if (bcmp(in->buf, "\x13""BitTorrent protocol", 20) == 0)
  667. hs->state = SHAKE_PSTR;
  668. else
  669. goto bad_shake;
  670. case SHAKE_PSTR:
  671. if (in->buf_off < 28)
  672. break;
  673. else
  674. hs->state = SHAKE_RESERVED;
  675. case SHAKE_RESERVED:
  676. if (in->buf_off < 48)
  677. break;
  678. else if (hs->incoming) {
  679. struct torrent *tp = torrent_get_by_hash(in->buf + 28);
  680. if (tp != NULL) {
  681. hs->state = SHAKE_INFO;
  682. p->tp = tp;
  683. net_send_shake(p);
  684. } else
  685. goto bad_shake;
  686. } else {
  687. if (bcmp(in->buf + 28, p->tp->meta.info_hash, 20) == 0)
  688. hs->state = SHAKE_INFO;
  689. else
  690. goto bad_shake;
  691. }
  692. case SHAKE_INFO:
  693. if (in->buf_off < 68)
  694. break;
  695. else {
  696. if (torrent_has_peer(p->tp, in->buf + 48))
  697. goto bad_shake; // Not really, but we're already connected.
  698. else if (bcmp(in->buf + 48, btpd.peer_id, 20) == 0)
  699. goto bad_shake; // Connection from myself.
  700. bcopy(in->buf + 48, p->id, 20);
  701. hs->state = SHAKE_ID;
  702. }
  703. default:
  704. assert(hs->state == SHAKE_ID);
  705. }
  706. if (hs->state == SHAKE_ID) {
  707. btpd_log(BTPD_L_CONN, "Got whole shake.\n");
  708. free(hs);
  709. p->piece_field = btpd_calloc(1, (int)ceil(p->tp->meta.npieces / 8.0));
  710. cm_on_new_peer(p);
  711. net_generic_reader(p);
  712. if (p->tp->have_npieces > 0) {
  713. if (p->tp->have_npieces * 9 < 5 + ceil(p->tp->meta.npieces / 8.0))
  714. net_send_multihave(p);
  715. else
  716. net_send_bitfield(p);
  717. }
  718. } else
  719. event_add(&p->in_ev, NULL);
  720. return nread;
  721. bad_shake:
  722. btpd_log(BTPD_L_CONN, "Bad shake(%d)\n", hs->state);
  723. peer_kill(p);
  724. return nread;
  725. }
  726. void
  727. net_handshake(struct peer *p, int incoming)
  728. {
  729. struct handshake *hs;
  730. hs = calloc(1, sizeof(*hs));
  731. hs->incoming = incoming;
  732. hs->state = SHAKE_INIT;
  733. hs->in.buf_len = SHAKE_LEN;
  734. hs->in.buf_off = 0;
  735. hs->in.buf = hs->_io_buf;
  736. p->reader = (struct input_reader *)hs;
  737. hs->rd.read = net_shake_read;
  738. hs->rd.kill = kill_shake;
  739. if (!incoming)
  740. net_send_shake(p);
  741. }
  742. int
  743. net_connect2(struct sockaddr *sa, socklen_t salen, int *sd)
  744. {
  745. if ((*sd = socket(PF_INET, SOCK_STREAM, 0)) == -1)
  746. return errno;
  747. set_nonblocking(*sd);
  748. if (connect(*sd, sa, salen) == -1 && errno != EINPROGRESS) {
  749. btpd_log(BTPD_L_CONN, "Botched connection %s.", strerror(errno));
  750. close(*sd);
  751. return errno;
  752. }
  753. return 0;
  754. }
  755. int
  756. net_connect(const char *ip, int port, int *sd)
  757. {
  758. struct addrinfo hints, *res;
  759. char portstr[6];
  760. assert(btpd.npeers < btpd.maxpeers);
  761. if (snprintf(portstr, sizeof(portstr), "%d", port) >= sizeof(portstr))
  762. return EINVAL;
  763. bzero(&hints, sizeof(hints));
  764. hints.ai_family = AF_UNSPEC;
  765. hints.ai_flags = AI_NUMERICHOST;
  766. hints.ai_socktype = SOCK_STREAM;
  767. if (getaddrinfo(ip, portstr, &hints, &res) != 0)
  768. return errno;
  769. int error = net_connect2(res->ai_addr, res->ai_addrlen, sd);
  770. freeaddrinfo(res);
  771. return error;
  772. }
  773. void
  774. net_connection_cb(int sd, short type, void *arg)
  775. {
  776. int nsd;
  777. nsd = accept(sd, NULL, NULL);
  778. if (nsd < 0) {
  779. if (errno == EWOULDBLOCK || errno == ECONNABORTED)
  780. return;
  781. else
  782. btpd_err("accept4: %s\n", strerror(errno));
  783. }
  784. if (set_nonblocking(nsd) != 0) {
  785. close(nsd);
  786. return;
  787. }
  788. assert(btpd.npeers <= btpd.maxpeers);
  789. if (btpd.npeers == btpd.maxpeers) {
  790. close(nsd);
  791. return;
  792. }
  793. peer_create_in(nsd);
  794. btpd_log(BTPD_L_CONN, "got connection.\n");
  795. }
  796. void
  797. net_bw_rate(void)
  798. {
  799. unsigned sum = 0;
  800. for (int i = 0; i < BWCALLHISTORY - 1; i++) {
  801. btpd.bwrate[i] = btpd.bwrate[i + 1];
  802. sum += btpd.bwrate[i];
  803. }
  804. btpd.bwrate[BWCALLHISTORY - 1] = btpd.bwcalls;
  805. sum += btpd.bwrate[BWCALLHISTORY - 1];
  806. btpd.bwcalls = 0;
  807. btpd.bw_hz_avg = sum / 5.0;
  808. }
  809. void
  810. net_bw_cb(int sd, short type, void *arg)
  811. {
  812. struct peer *p;
  813. btpd.bwcalls++;
  814. double avg_hz;
  815. if (btpd.seconds < BWCALLHISTORY)
  816. avg_hz = btpd.bw_hz;
  817. else
  818. avg_hz = btpd.bw_hz_avg;
  819. btpd.obw_left = btpd.obwlim / avg_hz;
  820. btpd.ibw_left = btpd.ibwlim / avg_hz;
  821. if (btpd.ibwlim > 0) {
  822. while ((p = BTPDQ_FIRST(&btpd.readq)) != NULL && btpd.ibw_left > 0) {
  823. BTPDQ_REMOVE(&btpd.readq, p, rq_entry);
  824. p->flags &= ~PF_ON_READQ;
  825. btpd.ibw_left -= p->reader->read(p, btpd.ibw_left);
  826. }
  827. } else {
  828. while ((p = BTPDQ_FIRST(&btpd.readq)) != NULL) {
  829. BTPDQ_REMOVE(&btpd.readq, p, rq_entry);
  830. p->flags &= ~PF_ON_READQ;
  831. p->reader->read(p, 0);
  832. }
  833. }
  834. if (btpd.obwlim) {
  835. while ((p = BTPDQ_FIRST(&btpd.writeq)) != NULL && btpd.obw_left > 0) {
  836. BTPDQ_REMOVE(&btpd.writeq, p, wq_entry);
  837. p->flags &= ~PF_ON_WRITEQ;
  838. btpd.obw_left -= net_write(p, btpd.obw_left);
  839. }
  840. } else {
  841. while ((p = BTPDQ_FIRST(&btpd.writeq)) != NULL) {
  842. BTPDQ_REMOVE(&btpd.writeq, p, wq_entry);
  843. p->flags &= ~PF_ON_WRITEQ;
  844. net_write(p, 0);
  845. }
  846. }
  847. event_add(&btpd.bwlim, (& (struct timeval) { 0, 1000000 / btpd.bw_hz }));
  848. }