A clone of btpd with my configuration changes.

595 lines
14 KiB

  1. #include <sys/types.h>
  2. #include <sys/socket.h>
  3. #include <netinet/in.h>
  4. #include <ctype.h>
  5. #include <math.h>
  6. #include <string.h>
  7. #include <unistd.h>
  8. #include "btpd.h"
  9. void
  10. peer_kill(struct peer *p)
  11. {
  12. struct nb_link *nl;
  13. btpd_log(BTPD_L_CONN, "killed peer %p\n", p);
  14. if (p->flags & PF_ATTACHED) {
  15. BTPDQ_REMOVE(&p->n->peers, p, p_entry);
  16. p->n->npeers--;
  17. if (p->n->active) {
  18. ul_on_lost_peer(p);
  19. dl_on_lost_peer(p);
  20. }
  21. } else
  22. BTPDQ_REMOVE(&net_unattached, p, p_entry);
  23. if (p->flags & PF_ON_READQ)
  24. BTPDQ_REMOVE(&net_bw_readq, p, rq_entry);
  25. if (p->flags & PF_ON_WRITEQ)
  26. BTPDQ_REMOVE(&net_bw_writeq, p, wq_entry);
  27. btpd_ev_del(&p->in_ev);
  28. btpd_ev_del(&p->out_ev);
  29. close(p->sd);
  30. nl = BTPDQ_FIRST(&p->outq);
  31. while (nl != NULL) {
  32. struct nb_link *next = BTPDQ_NEXT(nl, entry);
  33. nb_drop(nl->nb);
  34. free(nl);
  35. nl = next;
  36. }
  37. if (p->in.buf != NULL)
  38. free(p->in.buf);
  39. if (p->piece_field != NULL)
  40. free(p->piece_field);
  41. free(p);
  42. net_npeers--;
  43. }
  44. void
  45. peer_set_in_state(struct peer *p, enum input_state state, size_t size)
  46. {
  47. p->in.state = state;
  48. p->in.st_bytes = size;
  49. }
  50. void
  51. peer_send(struct peer *p, struct net_buf *nb)
  52. {
  53. struct nb_link *nl = btpd_calloc(1, sizeof(*nl));
  54. nl->nb = nb;
  55. nb_hold(nb);
  56. if (BTPDQ_EMPTY(&p->outq)) {
  57. assert(p->outq_off == 0);
  58. btpd_ev_add(&p->out_ev, NULL);
  59. p->t_wantwrite = btpd_seconds;
  60. }
  61. BTPDQ_INSERT_TAIL(&p->outq, nl, entry);
  62. }
  63. /*
  64. * Remove a network buffer from the peer's outq.
  65. * If a part of the buffer already have been written
  66. * to the network it cannot be removed.
  67. *
  68. * Returns 1 if the buffer is removed, 0 if not.
  69. */
  70. int
  71. peer_unsend(struct peer *p, struct nb_link *nl)
  72. {
  73. if (!(nl == BTPDQ_FIRST(&p->outq) && p->outq_off > 0)) {
  74. BTPDQ_REMOVE(&p->outq, nl, entry);
  75. if (nl->nb->type == NB_TORRENTDATA) {
  76. assert(p->npiece_msgs > 0);
  77. p->npiece_msgs--;
  78. }
  79. nb_drop(nl->nb);
  80. free(nl);
  81. if (BTPDQ_EMPTY(&p->outq)) {
  82. if (p->flags & PF_ON_WRITEQ) {
  83. BTPDQ_REMOVE(&net_bw_writeq, p, wq_entry);
  84. p->flags &= ~PF_ON_WRITEQ;
  85. } else
  86. btpd_ev_del(&p->out_ev);
  87. }
  88. return 1;
  89. } else
  90. return 0;
  91. }
  92. void
  93. peer_sent(struct peer *p, struct net_buf *nb)
  94. {
  95. switch (nb->type) {
  96. case NB_KEEPALIVE:
  97. btpd_log(BTPD_L_MSG, "sent keepalive to %p\n", p);
  98. break;
  99. case NB_CHOKE:
  100. btpd_log(BTPD_L_MSG, "sent choke to %p\n", p);
  101. break;
  102. case NB_UNCHOKE:
  103. btpd_log(BTPD_L_MSG, "sent unchoke to %p\n", p);
  104. p->flags &= ~PF_NO_REQUESTS;
  105. break;
  106. case NB_INTEREST:
  107. btpd_log(BTPD_L_MSG, "sent interest to %p\n", p);
  108. break;
  109. case NB_UNINTEREST:
  110. btpd_log(BTPD_L_MSG, "sent uninterest to %p\n", p);
  111. break;
  112. case NB_HAVE:
  113. btpd_log(BTPD_L_MSG, "sent have(%u) to %p\n",
  114. nb_get_index(nb), p);
  115. break;
  116. case NB_BITFIELD:
  117. btpd_log(BTPD_L_MSG, "sent bitfield to %p\n", p);
  118. break;
  119. case NB_REQUEST:
  120. btpd_log(BTPD_L_MSG, "sent request(%u,%u,%u) to %p\n",
  121. nb_get_index(nb), nb_get_begin(nb), nb_get_length(nb), p);
  122. break;
  123. case NB_PIECE:
  124. btpd_log(BTPD_L_MSG, "sent piece(%u,%u,%u) to %p\n",
  125. nb_get_index(nb), nb_get_begin(nb), nb_get_length(nb), p);
  126. break;
  127. case NB_CANCEL:
  128. btpd_log(BTPD_L_MSG, "sent cancel(%u,%u,%u) to %p\n",
  129. nb_get_index(nb), nb_get_begin(nb), nb_get_length(nb), p);
  130. break;
  131. case NB_TORRENTDATA:
  132. btpd_log(BTPD_L_MSG, "sent data to %p\n", p);
  133. assert(p->npiece_msgs > 0);
  134. p->npiece_msgs--;
  135. break;
  136. case NB_MULTIHAVE:
  137. btpd_log(BTPD_L_MSG, "sent multihave to %p\n", p);
  138. break;
  139. case NB_BITDATA:
  140. btpd_log(BTPD_L_MSG, "sent bitdata to %p\n", p);
  141. break;
  142. case NB_SHAKE:
  143. btpd_log(BTPD_L_MSG, "sent shake to %p\n", p);
  144. break;
  145. }
  146. }
  147. void
  148. peer_request(struct peer *p, struct block_request *req)
  149. {
  150. assert(p->nreqs_out < MAXPIPEDREQUESTS);
  151. p->nreqs_out++;
  152. BTPDQ_INSERT_TAIL(&p->my_reqs, req, p_entry);
  153. peer_send(p, req->msg);
  154. }
  155. int
  156. peer_requested(struct peer *p, uint32_t piece, uint32_t block)
  157. {
  158. uint32_t begin = block * PIECE_BLOCKLEN;
  159. struct block_request *req;
  160. BTPDQ_FOREACH(req, &p->my_reqs, p_entry)
  161. if (nb_get_index(req->msg) == piece && nb_get_begin(req->msg) == begin)
  162. return 1;
  163. return 0;
  164. }
  165. void
  166. peer_keepalive(struct peer *p)
  167. {
  168. peer_send(p, nb_create_keepalive());
  169. }
  170. void
  171. peer_cancel(struct peer *p, struct block_request *req, struct net_buf *nb)
  172. {
  173. BTPDQ_REMOVE(&p->my_reqs, req, p_entry);
  174. p->nreqs_out--;
  175. int removed = 0;
  176. struct nb_link *nl;
  177. BTPDQ_FOREACH(nl, &p->outq, entry) {
  178. if (nl->nb == req->msg) {
  179. removed = peer_unsend(p, nl);
  180. break;
  181. }
  182. }
  183. if (!removed)
  184. peer_send(p, nb);
  185. if (p->nreqs_out == 0)
  186. peer_on_no_reqs(p);
  187. }
  188. void
  189. peer_unchoke(struct peer *p)
  190. {
  191. p->flags &= ~PF_I_CHOKE;
  192. peer_send(p, nb_create_unchoke());
  193. }
  194. void
  195. peer_choke(struct peer *p)
  196. {
  197. struct nb_link *nl = BTPDQ_FIRST(&p->outq);
  198. while (nl != NULL) {
  199. struct nb_link *next = BTPDQ_NEXT(nl, entry);
  200. if (nl->nb->type == NB_PIECE) {
  201. struct nb_link *data = next;
  202. next = BTPDQ_NEXT(next, entry);
  203. if (peer_unsend(p, nl))
  204. peer_unsend(p, data);
  205. }
  206. nl = next;
  207. }
  208. p->flags |= PF_I_CHOKE;
  209. peer_send(p, nb_create_choke());
  210. }
  211. void
  212. peer_want(struct peer *p, uint32_t index)
  213. {
  214. assert(p->nwant < p->npieces);
  215. p->nwant++;
  216. if (p->nwant == 1) {
  217. if (p->nreqs_out == 0) {
  218. assert((p->flags & PF_DO_UNWANT) == 0);
  219. int unsent = 0;
  220. struct nb_link *nl = BTPDQ_LAST(&p->outq, nb_tq);
  221. if (nl != NULL && nl->nb->type == NB_UNINTEREST)
  222. unsent = peer_unsend(p, nl);
  223. if (!unsent)
  224. peer_send(p, nb_create_interest());
  225. } else {
  226. assert((p->flags & PF_DO_UNWANT) != 0);
  227. p->flags &= ~PF_DO_UNWANT;
  228. }
  229. p->flags |= PF_I_WANT;
  230. }
  231. }
  232. void
  233. peer_unwant(struct peer *p, uint32_t index)
  234. {
  235. assert(p->nwant > 0);
  236. p->nwant--;
  237. if (p->nwant == 0) {
  238. p->flags &= ~PF_I_WANT;
  239. p->t_nointerest = btpd_seconds;
  240. if (p->nreqs_out == 0)
  241. peer_send(p, nb_create_uninterest());
  242. else
  243. p->flags |= PF_DO_UNWANT;
  244. }
  245. }
  246. static struct peer *
  247. peer_create_common(int sd)
  248. {
  249. struct peer *p = btpd_calloc(1, sizeof(*p));
  250. p->sd = sd;
  251. p->flags = PF_I_CHOKE | PF_P_CHOKE;
  252. p->t_created = btpd_seconds;
  253. p->t_lastwrite = btpd_seconds;
  254. p->t_nointerest = btpd_seconds;
  255. BTPDQ_INIT(&p->my_reqs);
  256. BTPDQ_INIT(&p->outq);
  257. peer_set_in_state(p, SHAKE_PSTR, 28);
  258. event_set(&p->out_ev, p->sd, EV_WRITE, net_write_cb, p);
  259. event_set(&p->in_ev, p->sd, EV_READ, net_read_cb, p);
  260. btpd_ev_add(&p->in_ev, NULL);
  261. BTPDQ_INSERT_TAIL(&net_unattached, p, p_entry);
  262. net_npeers++;
  263. return p;
  264. }
  265. void
  266. peer_create_in(int sd)
  267. {
  268. struct peer *p = peer_create_common(sd);
  269. p->flags |= PF_INCOMING;
  270. }
  271. void
  272. peer_create_out(struct net *n, const uint8_t *id,
  273. const char *ip, int port)
  274. {
  275. int sd;
  276. struct peer *p;
  277. if (net_connect(ip, port, &sd) != 0)
  278. return;
  279. p = peer_create_common(sd);
  280. p->n = n;
  281. peer_send(p, nb_create_shake(n->tp));
  282. }
  283. void
  284. peer_create_out_compact(struct net *n, const char *compact)
  285. {
  286. int sd;
  287. struct peer *p;
  288. struct sockaddr_in addr;
  289. addr.sin_family = AF_INET;
  290. bcopy(compact, &addr.sin_addr.s_addr, 4);
  291. bcopy(compact + 4, &addr.sin_port, 2);
  292. if (net_connect2((struct sockaddr *)&addr, sizeof(addr), &sd) != 0)
  293. return;
  294. p = peer_create_common(sd);
  295. p->n = n;
  296. peer_send(p, nb_create_shake(n->tp));
  297. }
  298. void
  299. peer_on_no_reqs(struct peer *p)
  300. {
  301. if ((p->flags & PF_DO_UNWANT) != 0) {
  302. assert(p->nwant == 0);
  303. p->flags &= ~PF_DO_UNWANT;
  304. peer_send(p, nb_create_uninterest());
  305. }
  306. }
  307. void
  308. peer_on_keepalive(struct peer *p)
  309. {
  310. btpd_log(BTPD_L_MSG, "received keep alive from %p\n", p);
  311. }
  312. void
  313. peer_on_shake(struct peer *p)
  314. {
  315. uint8_t printid[21];
  316. int i;
  317. for (i = 0; i < 20 && isprint(p->id[i]); i++)
  318. printid[i] = p->id[i];
  319. printid[i] = '\0';
  320. btpd_log(BTPD_L_MSG, "received shake(%s) from %p\n", printid, p);
  321. p->piece_field = btpd_calloc(1, (int)ceil(p->n->tp->npieces / 8.0));
  322. if (cm_pieces(p->n->tp) > 0) {
  323. if ((cm_pieces(p->n->tp) * 9 < 5 +
  324. ceil(p->n->tp->npieces / 8.0)))
  325. peer_send(p, nb_create_multihave(p->n->tp));
  326. else {
  327. peer_send(p, nb_create_bitfield(p->n->tp));
  328. peer_send(p, nb_create_bitdata(p->n->tp));
  329. }
  330. }
  331. BTPDQ_REMOVE(&net_unattached, p, p_entry);
  332. BTPDQ_INSERT_HEAD(&p->n->peers, p, p_entry);
  333. p->flags |= PF_ATTACHED;
  334. p->n->npeers++;
  335. ul_on_new_peer(p);
  336. dl_on_new_peer(p);
  337. }
  338. void
  339. peer_on_choke(struct peer *p)
  340. {
  341. btpd_log(BTPD_L_MSG, "received choke from %p\n", p);
  342. if ((p->flags & PF_P_CHOKE) != 0)
  343. return;
  344. else {
  345. p->flags |= PF_P_CHOKE;
  346. dl_on_choke(p);
  347. struct nb_link *nl = BTPDQ_FIRST(&p->outq);
  348. while (nl != NULL) {
  349. struct nb_link *next = BTPDQ_NEXT(nl, entry);
  350. if (nl->nb->type == NB_REQUEST)
  351. peer_unsend(p, nl);
  352. nl = next;
  353. }
  354. }
  355. }
  356. void
  357. peer_on_unchoke(struct peer *p)
  358. {
  359. btpd_log(BTPD_L_MSG, "received unchoke from %p\n", p);
  360. if ((p->flags & PF_P_CHOKE) == 0)
  361. return;
  362. else {
  363. p->flags &= ~PF_P_CHOKE;
  364. dl_on_unchoke(p);
  365. }
  366. }
  367. void
  368. peer_on_interest(struct peer *p)
  369. {
  370. btpd_log(BTPD_L_MSG, "received interest from %p\n", p);
  371. if ((p->flags & PF_P_WANT) != 0)
  372. return;
  373. else {
  374. p->flags |= PF_P_WANT;
  375. ul_on_interest(p);
  376. }
  377. }
  378. void
  379. peer_on_uninterest(struct peer *p)
  380. {
  381. btpd_log(BTPD_L_MSG, "received uninterest from %p\n", p);
  382. if ((p->flags & PF_P_WANT) == 0)
  383. return;
  384. else {
  385. p->flags &= ~PF_P_WANT;
  386. p->t_nointerest = btpd_seconds;
  387. ul_on_uninterest(p);
  388. }
  389. }
  390. void
  391. peer_on_have(struct peer *p, uint32_t index)
  392. {
  393. btpd_log(BTPD_L_MSG, "received have(%u) from %p\n", index, p);
  394. if (!has_bit(p->piece_field, index)) {
  395. set_bit(p->piece_field, index);
  396. p->npieces++;
  397. dl_on_piece_ann(p, index);
  398. }
  399. }
  400. void
  401. peer_on_bitfield(struct peer *p, const uint8_t *field)
  402. {
  403. btpd_log(BTPD_L_MSG, "received bitfield from %p\n", p);
  404. assert(p->npieces == 0);
  405. bcopy(field, p->piece_field, (size_t)ceil(p->n->tp->npieces / 8.0));
  406. for (uint32_t i = 0; i < p->n->tp->npieces; i++) {
  407. if (has_bit(p->piece_field, i)) {
  408. p->npieces++;
  409. dl_on_piece_ann(p, i);
  410. }
  411. }
  412. }
  413. void
  414. peer_on_piece(struct peer *p, uint32_t index, uint32_t begin,
  415. uint32_t length, const char *data)
  416. {
  417. struct block_request *req;
  418. BTPDQ_FOREACH(req, &p->my_reqs, p_entry)
  419. if ((nb_get_begin(req->msg) == begin &&
  420. nb_get_index(req->msg) == index &&
  421. nb_get_length(req->msg) == length))
  422. break;
  423. if (req != NULL) {
  424. btpd_log(BTPD_L_MSG, "received piece(%u,%u,%u) from %p\n",
  425. index, begin, length, p);
  426. assert(p->nreqs_out > 0);
  427. p->nreqs_out--;
  428. BTPDQ_REMOVE(&p->my_reqs, req, p_entry);
  429. dl_on_block(p, req, index, begin, length, data);
  430. if (p->nreqs_out == 0)
  431. peer_on_no_reqs(p);
  432. } else
  433. btpd_log(BTPD_L_MSG, "discarded piece(%u,%u,%u) from %p\n",
  434. index, begin, length, p);
  435. }
  436. void
  437. peer_on_request(struct peer *p, uint32_t index, uint32_t begin,
  438. uint32_t length)
  439. {
  440. btpd_log(BTPD_L_MSG, "received request(%u,%u,%u) from %p\n",
  441. index, begin, length, p);
  442. if ((p->flags & PF_NO_REQUESTS) == 0) {
  443. peer_send(p, nb_create_piece(index, begin, length));
  444. peer_send(p, nb_create_torrentdata());
  445. p->npiece_msgs++;
  446. if (p->npiece_msgs >= MAXPIECEMSGS) {
  447. peer_send(p, nb_create_choke());
  448. peer_send(p, nb_create_unchoke());
  449. p->flags |= PF_NO_REQUESTS;
  450. }
  451. }
  452. }
  453. void
  454. peer_on_cancel(struct peer *p, uint32_t index, uint32_t begin,
  455. uint32_t length)
  456. {
  457. btpd_log(BTPD_L_MSG, "received cancel(%u,%u,%u) from %p\n",
  458. index, begin, length, p);
  459. struct nb_link *nl;
  460. BTPDQ_FOREACH(nl, &p->outq, entry)
  461. if (nl->nb->type == NB_PIECE
  462. && nb_get_begin(nl->nb) == begin
  463. && nb_get_index(nl->nb) == index
  464. && nb_get_length(nl->nb) == length) {
  465. struct nb_link *data = BTPDQ_NEXT(nl, entry);
  466. if (peer_unsend(p, nl))
  467. peer_unsend(p, data);
  468. break;
  469. }
  470. }
  471. void
  472. peer_on_tick(struct peer *p)
  473. {
  474. if (p->flags & PF_ATTACHED) {
  475. if (BTPDQ_EMPTY(&p->outq)) {
  476. if (btpd_seconds - p->t_lastwrite >= 120)
  477. peer_keepalive(p);
  478. } else if (btpd_seconds - p->t_wantwrite >= 60) {
  479. btpd_log(BTPD_L_CONN, "write attempt timed out.\n");
  480. goto kill;
  481. }
  482. if ((cm_full(p->n->tp) && !(p->flags & PF_P_WANT) &&
  483. btpd_seconds - p->t_nointerest >= 600)) {
  484. btpd_log(BTPD_L_CONN, "no interest for 10 minutes.\n");
  485. goto kill;
  486. }
  487. } else if (btpd_seconds - p->t_created >= 60) {
  488. btpd_log(BTPD_L_CONN, "hand shake timed out.\n");
  489. goto kill;
  490. }
  491. return;
  492. kill:
  493. peer_kill(p);
  494. }
  495. int
  496. peer_chokes(struct peer *p)
  497. {
  498. return p->flags & PF_P_CHOKE;
  499. }
  500. int
  501. peer_has(struct peer *p, uint32_t index)
  502. {
  503. return has_bit(p->piece_field, index);
  504. }
  505. int
  506. peer_laden(struct peer *p)
  507. {
  508. return p->nreqs_out >= MAXPIPEDREQUESTS;
  509. }
  510. int
  511. peer_wanted(struct peer *p)
  512. {
  513. return (p->flags & PF_I_WANT) == PF_I_WANT;
  514. }
  515. int
  516. peer_leech_ok(struct peer *p)
  517. {
  518. return (p->flags & (PF_I_WANT|PF_P_CHOKE)) == PF_I_WANT;
  519. }
  520. int
  521. peer_active_down(struct peer *p)
  522. {
  523. return peer_leech_ok(p) || p->nreqs_out > 0;
  524. }
  525. int
  526. peer_active_up(struct peer *p)
  527. {
  528. return (p->flags & (PF_P_WANT|PF_I_CHOKE)) == PF_P_WANT
  529. || p->npiece_msgs > 0;
  530. }
  531. int
  532. peer_full(struct peer *p)
  533. {
  534. return p->npieces == p->n->tp->npieces;
  535. }