A clone of btpd with my configuration changes.

604 lines
15 KiB

  1. #include "btpd.h"
  2. #include <ctype.h>
  3. void
  4. peer_kill(struct peer *p)
  5. {
  6. struct nb_link *nl;
  7. btpd_log(BTPD_L_CONN, "killed peer %p\n", p);
  8. if (p->flags & PF_ATTACHED) {
  9. BTPDQ_REMOVE(&p->n->peers, p, p_entry);
  10. p->n->npeers--;
  11. if (p->n->active) {
  12. ul_on_lost_peer(p);
  13. dl_on_lost_peer(p);
  14. }
  15. } else
  16. BTPDQ_REMOVE(&net_unattached, p, p_entry);
  17. if (p->flags & PF_ON_READQ)
  18. BTPDQ_REMOVE(&net_bw_readq, p, rq_entry);
  19. if (p->flags & PF_ON_WRITEQ)
  20. BTPDQ_REMOVE(&net_bw_writeq, p, wq_entry);
  21. btpd_ev_del(&p->ioev);
  22. close(p->sd);
  23. nl = BTPDQ_FIRST(&p->outq);
  24. while (nl != NULL) {
  25. struct nb_link *next = BTPDQ_NEXT(nl, entry);
  26. nb_drop(nl->nb);
  27. free(nl);
  28. nl = next;
  29. }
  30. if (p->in.buf != NULL)
  31. free(p->in.buf);
  32. if (p->piece_field != NULL)
  33. free(p->piece_field);
  34. free(p);
  35. net_npeers--;
  36. }
  37. void
  38. peer_set_in_state(struct peer *p, enum input_state state, size_t size)
  39. {
  40. p->in.state = state;
  41. p->in.st_bytes = size;
  42. }
  43. void
  44. peer_send(struct peer *p, struct net_buf *nb)
  45. {
  46. struct nb_link *nl = btpd_calloc(1, sizeof(*nl));
  47. nl->nb = nb;
  48. nb_hold(nb);
  49. if (BTPDQ_EMPTY(&p->outq)) {
  50. assert(p->outq_off == 0);
  51. btpd_ev_enable(&p->ioev, EV_WRITE);
  52. p->t_wantwrite = btpd_seconds;
  53. }
  54. BTPDQ_INSERT_TAIL(&p->outq, nl, entry);
  55. }
  56. /*
  57. * Remove a network buffer from the peer's outq.
  58. * If a part of the buffer already have been written
  59. * to the network it cannot be removed.
  60. *
  61. * Returns 1 if the buffer is removed, 0 if not.
  62. */
  63. int
  64. peer_unsend(struct peer *p, struct nb_link *nl)
  65. {
  66. if (!(nl == BTPDQ_FIRST(&p->outq) && p->outq_off > 0)) {
  67. BTPDQ_REMOVE(&p->outq, nl, entry);
  68. if (nl->nb->type == NB_TORRENTDATA) {
  69. assert(p->npiece_msgs > 0);
  70. p->npiece_msgs--;
  71. }
  72. nb_drop(nl->nb);
  73. free(nl);
  74. if (BTPDQ_EMPTY(&p->outq)) {
  75. if (p->flags & PF_ON_WRITEQ) {
  76. BTPDQ_REMOVE(&net_bw_writeq, p, wq_entry);
  77. p->flags &= ~PF_ON_WRITEQ;
  78. } else
  79. btpd_ev_disable(&p->ioev, EV_WRITE);
  80. }
  81. return 1;
  82. } else
  83. return 0;
  84. }
  85. void
  86. peer_sent(struct peer *p, struct net_buf *nb)
  87. {
  88. switch (nb->type) {
  89. case NB_KEEPALIVE:
  90. btpd_log(BTPD_L_MSG, "sent keepalive to %p\n", p);
  91. break;
  92. case NB_CHOKE:
  93. btpd_log(BTPD_L_MSG, "sent choke to %p\n", p);
  94. break;
  95. case NB_UNCHOKE:
  96. btpd_log(BTPD_L_MSG, "sent unchoke to %p\n", p);
  97. p->flags &= ~PF_NO_REQUESTS;
  98. break;
  99. case NB_INTEREST:
  100. btpd_log(BTPD_L_MSG, "sent interest to %p\n", p);
  101. break;
  102. case NB_UNINTEREST:
  103. btpd_log(BTPD_L_MSG, "sent uninterest to %p\n", p);
  104. break;
  105. case NB_HAVE:
  106. btpd_log(BTPD_L_MSG, "sent have(%u) to %p\n",
  107. nb_get_index(nb), p);
  108. break;
  109. case NB_BITFIELD:
  110. btpd_log(BTPD_L_MSG, "sent bitfield to %p\n", p);
  111. break;
  112. case NB_REQUEST:
  113. btpd_log(BTPD_L_MSG, "sent request(%u,%u,%u) to %p\n",
  114. nb_get_index(nb), nb_get_begin(nb), nb_get_length(nb), p);
  115. break;
  116. case NB_PIECE:
  117. btpd_log(BTPD_L_MSG, "sent piece(%u,%u,%u) to %p\n",
  118. nb_get_index(nb), nb_get_begin(nb), nb_get_length(nb), p);
  119. break;
  120. case NB_CANCEL:
  121. btpd_log(BTPD_L_MSG, "sent cancel(%u,%u,%u) to %p\n",
  122. nb_get_index(nb), nb_get_begin(nb), nb_get_length(nb), p);
  123. break;
  124. case NB_TORRENTDATA:
  125. btpd_log(BTPD_L_MSG, "sent data to %p\n", p);
  126. assert(p->npiece_msgs > 0);
  127. p->npiece_msgs--;
  128. break;
  129. case NB_MULTIHAVE:
  130. btpd_log(BTPD_L_MSG, "sent multihave to %p\n", p);
  131. break;
  132. case NB_BITDATA:
  133. btpd_log(BTPD_L_MSG, "sent bitdata to %p\n", p);
  134. break;
  135. case NB_SHAKE:
  136. btpd_log(BTPD_L_MSG, "sent shake to %p\n", p);
  137. break;
  138. }
  139. }
  140. void
  141. peer_request(struct peer *p, struct block_request *req)
  142. {
  143. assert(p->nreqs_out < MAXPIPEDREQUESTS);
  144. p->nreqs_out++;
  145. BTPDQ_INSERT_TAIL(&p->my_reqs, req, p_entry);
  146. peer_send(p, req->msg);
  147. }
  148. int
  149. peer_requested(struct peer *p, uint32_t piece, uint32_t block)
  150. {
  151. uint32_t begin = block * PIECE_BLOCKLEN;
  152. struct block_request *req;
  153. BTPDQ_FOREACH(req, &p->my_reqs, p_entry)
  154. if (nb_get_index(req->msg) == piece && nb_get_begin(req->msg) == begin)
  155. return 1;
  156. return 0;
  157. }
  158. void
  159. peer_keepalive(struct peer *p)
  160. {
  161. peer_send(p, nb_create_keepalive());
  162. }
  163. void
  164. peer_cancel(struct peer *p, struct block_request *req, struct net_buf *nb)
  165. {
  166. BTPDQ_REMOVE(&p->my_reqs, req, p_entry);
  167. p->nreqs_out--;
  168. int removed = 0;
  169. struct nb_link *nl;
  170. BTPDQ_FOREACH(nl, &p->outq, entry) {
  171. if (nl->nb == req->msg) {
  172. removed = peer_unsend(p, nl);
  173. break;
  174. }
  175. }
  176. if (!removed)
  177. peer_send(p, nb);
  178. if (p->nreqs_out == 0)
  179. peer_on_no_reqs(p);
  180. }
  181. void
  182. peer_unchoke(struct peer *p)
  183. {
  184. p->flags &= ~PF_I_CHOKE;
  185. peer_send(p, nb_create_unchoke());
  186. }
  187. void
  188. peer_choke(struct peer *p)
  189. {
  190. struct nb_link *nl = BTPDQ_FIRST(&p->outq);
  191. while (nl != NULL) {
  192. struct nb_link *next = BTPDQ_NEXT(nl, entry);
  193. if (nl->nb->type == NB_PIECE) {
  194. struct nb_link *data = next;
  195. next = BTPDQ_NEXT(next, entry);
  196. if (peer_unsend(p, nl))
  197. peer_unsend(p, data);
  198. }
  199. nl = next;
  200. }
  201. p->flags |= PF_I_CHOKE;
  202. peer_send(p, nb_create_choke());
  203. }
  204. void
  205. peer_want(struct peer *p, uint32_t index)
  206. {
  207. assert(p->nwant < p->npieces);
  208. p->nwant++;
  209. if (p->nwant == 1) {
  210. if (p->nreqs_out == 0) {
  211. assert((p->flags & PF_DO_UNWANT) == 0);
  212. int unsent = 0;
  213. struct nb_link *nl = BTPDQ_LAST(&p->outq, nb_tq);
  214. if (nl != NULL && nl->nb->type == NB_UNINTEREST)
  215. unsent = peer_unsend(p, nl);
  216. if (!unsent)
  217. peer_send(p, nb_create_interest());
  218. } else {
  219. assert((p->flags & PF_DO_UNWANT) != 0);
  220. p->flags &= ~PF_DO_UNWANT;
  221. }
  222. p->flags |= PF_I_WANT;
  223. }
  224. }
  225. void
  226. peer_unwant(struct peer *p, uint32_t index)
  227. {
  228. assert(p->nwant > 0);
  229. p->nwant--;
  230. if (p->nwant == 0) {
  231. p->flags &= ~PF_I_WANT;
  232. p->t_nointerest = btpd_seconds;
  233. if (p->nreqs_out == 0)
  234. peer_send(p, nb_create_uninterest());
  235. else
  236. p->flags |= PF_DO_UNWANT;
  237. }
  238. }
  239. static struct peer *
  240. peer_create_common(int sd)
  241. {
  242. struct peer *p = btpd_calloc(1, sizeof(*p));
  243. p->sd = sd;
  244. p->flags = PF_I_CHOKE | PF_P_CHOKE;
  245. p->t_created = btpd_seconds;
  246. p->t_lastwrite = btpd_seconds;
  247. p->t_nointerest = btpd_seconds;
  248. BTPDQ_INIT(&p->my_reqs);
  249. BTPDQ_INIT(&p->outq);
  250. peer_set_in_state(p, SHAKE_PSTR, 28);
  251. btpd_ev_new(&p->ioev, p->sd, EV_READ, net_io_cb, p);
  252. BTPDQ_INSERT_TAIL(&net_unattached, p, p_entry);
  253. net_npeers++;
  254. return p;
  255. }
  256. void
  257. peer_create_in(int sd)
  258. {
  259. struct peer *p = peer_create_common(sd);
  260. p->flags |= PF_INCOMING;
  261. }
  262. void
  263. peer_create_out(struct net *n, const uint8_t *id,
  264. const char *ip, int port)
  265. {
  266. int sd;
  267. struct peer *p;
  268. if (net_connect_name(ip, port, &sd) != 0)
  269. return;
  270. p = peer_create_common(sd);
  271. p->n = n;
  272. peer_send(p, nb_create_shake(n->tp));
  273. }
  274. void
  275. peer_create_out_compact(struct net *n, int family, const char *compact)
  276. {
  277. int sd;
  278. struct peer *p;
  279. struct sockaddr_storage addr;
  280. struct sockaddr_in *a4;
  281. struct sockaddr_in6 *a6;
  282. switch (family) {
  283. case AF_INET:
  284. if (!net_ipv4)
  285. return;
  286. a4 = (struct sockaddr_in *)&addr;
  287. a4->sin_family = AF_INET;
  288. bcopy(compact, &a4->sin_addr.s_addr, 4);
  289. bcopy(compact + 4, &a4->sin_port, 2);
  290. break;
  291. case AF_INET6:
  292. if (!net_ipv6)
  293. return;
  294. a6 = (struct sockaddr_in6 *)&addr;
  295. a6->sin6_family = AF_INET6;
  296. bcopy(compact, &a6->sin6_addr, 16);
  297. bcopy(compact + 16, &a6->sin6_port, 2);
  298. break;
  299. default:
  300. abort();
  301. }
  302. if (net_connect_addr(family, (struct sockaddr *)&addr,
  303. sizeof(addr), &sd) != 0)
  304. return;
  305. p = peer_create_common(sd);
  306. p->n = n;
  307. peer_send(p, nb_create_shake(n->tp));
  308. }
  309. void
  310. peer_on_no_reqs(struct peer *p)
  311. {
  312. if ((p->flags & PF_DO_UNWANT) != 0) {
  313. assert(p->nwant == 0);
  314. p->flags &= ~PF_DO_UNWANT;
  315. peer_send(p, nb_create_uninterest());
  316. }
  317. }
  318. void
  319. peer_on_keepalive(struct peer *p)
  320. {
  321. btpd_log(BTPD_L_MSG, "received keep alive from %p\n", p);
  322. }
  323. void
  324. peer_on_shake(struct peer *p)
  325. {
  326. uint8_t printid[21];
  327. int i;
  328. for (i = 0; i < 20 && isprint(p->id[i]); i++)
  329. printid[i] = p->id[i];
  330. printid[i] = '\0';
  331. btpd_log(BTPD_L_MSG, "received shake(%s) from %p\n", printid, p);
  332. p->piece_field = btpd_calloc(1, (int)ceil(p->n->tp->npieces / 8.0));
  333. if (cm_pieces(p->n->tp) > 0) {
  334. if ((cm_pieces(p->n->tp) * 9 < 5 +
  335. ceil(p->n->tp->npieces / 8.0)))
  336. peer_send(p, nb_create_multihave(p->n->tp));
  337. else {
  338. peer_send(p, nb_create_bitfield(p->n->tp));
  339. peer_send(p, nb_create_bitdata(p->n->tp));
  340. }
  341. }
  342. BTPDQ_REMOVE(&net_unattached, p, p_entry);
  343. BTPDQ_INSERT_HEAD(&p->n->peers, p, p_entry);
  344. p->flags |= PF_ATTACHED;
  345. p->n->npeers++;
  346. ul_on_new_peer(p);
  347. dl_on_new_peer(p);
  348. }
  349. void
  350. peer_on_choke(struct peer *p)
  351. {
  352. btpd_log(BTPD_L_MSG, "received choke from %p\n", p);
  353. if ((p->flags & PF_P_CHOKE) != 0)
  354. return;
  355. else {
  356. p->flags |= PF_P_CHOKE;
  357. dl_on_choke(p);
  358. struct nb_link *nl = BTPDQ_FIRST(&p->outq);
  359. while (nl != NULL) {
  360. struct nb_link *next = BTPDQ_NEXT(nl, entry);
  361. if (nl->nb->type == NB_REQUEST)
  362. peer_unsend(p, nl);
  363. nl = next;
  364. }
  365. }
  366. }
  367. void
  368. peer_on_unchoke(struct peer *p)
  369. {
  370. btpd_log(BTPD_L_MSG, "received unchoke from %p\n", p);
  371. if ((p->flags & PF_P_CHOKE) == 0)
  372. return;
  373. else {
  374. p->flags &= ~PF_P_CHOKE;
  375. dl_on_unchoke(p);
  376. }
  377. }
  378. void
  379. peer_on_interest(struct peer *p)
  380. {
  381. btpd_log(BTPD_L_MSG, "received interest from %p\n", p);
  382. if ((p->flags & PF_P_WANT) != 0)
  383. return;
  384. else {
  385. p->flags |= PF_P_WANT;
  386. ul_on_interest(p);
  387. }
  388. }
  389. void
  390. peer_on_uninterest(struct peer *p)
  391. {
  392. btpd_log(BTPD_L_MSG, "received uninterest from %p\n", p);
  393. if ((p->flags & PF_P_WANT) == 0)
  394. return;
  395. else {
  396. p->flags &= ~PF_P_WANT;
  397. p->t_nointerest = btpd_seconds;
  398. ul_on_uninterest(p);
  399. }
  400. }
  401. void
  402. peer_on_have(struct peer *p, uint32_t index)
  403. {
  404. btpd_log(BTPD_L_MSG, "received have(%u) from %p\n", index, p);
  405. if (!has_bit(p->piece_field, index)) {
  406. set_bit(p->piece_field, index);
  407. p->npieces++;
  408. dl_on_piece_ann(p, index);
  409. }
  410. }
  411. void
  412. peer_on_bitfield(struct peer *p, const uint8_t *field)
  413. {
  414. btpd_log(BTPD_L_MSG, "received bitfield from %p\n", p);
  415. assert(p->npieces == 0);
  416. bcopy(field, p->piece_field, (size_t)ceil(p->n->tp->npieces / 8.0));
  417. for (uint32_t i = 0; i < p->n->tp->npieces; i++) {
  418. if (has_bit(p->piece_field, i)) {
  419. p->npieces++;
  420. dl_on_piece_ann(p, i);
  421. }
  422. }
  423. }
  424. void
  425. peer_on_piece(struct peer *p, uint32_t index, uint32_t begin,
  426. uint32_t length, const char *data)
  427. {
  428. struct block_request *req;
  429. BTPDQ_FOREACH(req, &p->my_reqs, p_entry)
  430. if ((nb_get_begin(req->msg) == begin &&
  431. nb_get_index(req->msg) == index &&
  432. nb_get_length(req->msg) == length))
  433. break;
  434. if (req != NULL) {
  435. btpd_log(BTPD_L_MSG, "received piece(%u,%u,%u) from %p\n",
  436. index, begin, length, p);
  437. assert(p->nreqs_out > 0);
  438. p->nreqs_out--;
  439. BTPDQ_REMOVE(&p->my_reqs, req, p_entry);
  440. if (p->nreqs_out == 0)
  441. peer_on_no_reqs(p);
  442. dl_on_block(p, req, index, begin, length, data);
  443. } else
  444. btpd_log(BTPD_L_MSG, "discarded piece(%u,%u,%u) from %p\n",
  445. index, begin, length, p);
  446. }
  447. void
  448. peer_on_request(struct peer *p, uint32_t index, uint32_t begin,
  449. uint32_t length)
  450. {
  451. btpd_log(BTPD_L_MSG, "received request(%u,%u,%u) from %p\n",
  452. index, begin, length, p);
  453. if ((p->flags & PF_NO_REQUESTS) == 0) {
  454. peer_send(p, nb_create_piece(index, begin, length));
  455. peer_send(p, nb_create_torrentdata());
  456. p->npiece_msgs++;
  457. if (p->npiece_msgs >= MAXPIECEMSGS) {
  458. peer_send(p, nb_create_choke());
  459. peer_send(p, nb_create_unchoke());
  460. p->flags |= PF_NO_REQUESTS;
  461. }
  462. }
  463. }
  464. void
  465. peer_on_cancel(struct peer *p, uint32_t index, uint32_t begin,
  466. uint32_t length)
  467. {
  468. btpd_log(BTPD_L_MSG, "received cancel(%u,%u,%u) from %p\n",
  469. index, begin, length, p);
  470. struct nb_link *nl;
  471. BTPDQ_FOREACH(nl, &p->outq, entry)
  472. if (nl->nb->type == NB_PIECE
  473. && nb_get_begin(nl->nb) == begin
  474. && nb_get_index(nl->nb) == index
  475. && nb_get_length(nl->nb) == length) {
  476. struct nb_link *data = BTPDQ_NEXT(nl, entry);
  477. if (peer_unsend(p, nl))
  478. peer_unsend(p, data);
  479. break;
  480. }
  481. }
  482. void
  483. peer_on_tick(struct peer *p)
  484. {
  485. if (p->flags & PF_ATTACHED) {
  486. if (BTPDQ_EMPTY(&p->outq)) {
  487. if (btpd_seconds - p->t_lastwrite >= 120)
  488. peer_keepalive(p);
  489. } else if (btpd_seconds - p->t_wantwrite >= 60) {
  490. btpd_log(BTPD_L_CONN, "write attempt timed out.\n");
  491. goto kill;
  492. }
  493. if ((cm_full(p->n->tp) && !(p->flags & PF_P_WANT) &&
  494. btpd_seconds - p->t_nointerest >= 600)) {
  495. btpd_log(BTPD_L_CONN, "no interest for 10 minutes.\n");
  496. goto kill;
  497. }
  498. } else if (btpd_seconds - p->t_created >= 60) {
  499. btpd_log(BTPD_L_CONN, "hand shake timed out.\n");
  500. goto kill;
  501. }
  502. return;
  503. kill:
  504. peer_kill(p);
  505. }
  506. int
  507. peer_chokes(struct peer *p)
  508. {
  509. return p->flags & PF_P_CHOKE;
  510. }
  511. int
  512. peer_has(struct peer *p, uint32_t index)
  513. {
  514. return has_bit(p->piece_field, index);
  515. }
  516. int
  517. peer_laden(struct peer *p)
  518. {
  519. return p->nreqs_out >= MAXPIPEDREQUESTS;
  520. }
  521. int
  522. peer_wanted(struct peer *p)
  523. {
  524. return (p->flags & PF_I_WANT) == PF_I_WANT;
  525. }
  526. int
  527. peer_leech_ok(struct peer *p)
  528. {
  529. return (p->flags & (PF_I_WANT|PF_P_CHOKE)) == PF_I_WANT;
  530. }
  531. int
  532. peer_active_down(struct peer *p)
  533. {
  534. return peer_leech_ok(p) || p->nreqs_out > 0;
  535. }
  536. int
  537. peer_active_up(struct peer *p)
  538. {
  539. return (p->flags & (PF_P_WANT|PF_I_CHOKE)) == PF_P_WANT
  540. || p->npiece_msgs > 0;
  541. }
  542. int
  543. peer_full(struct peer *p)
  544. {
  545. return p->npieces == p->n->tp->npieces;
  546. }