A clone of btpd with my configuration changes.

557 строки
13 KiB

  1. #include <sys/types.h>
  2. #include <sys/socket.h>
  3. #include <netinet/in.h>
  4. #include <ctype.h>
  5. #include <math.h>
  6. #include <string.h>
  7. #include <unistd.h>
  8. #include "btpd.h"
  9. void
  10. peer_kill(struct peer *p)
  11. {
  12. struct nb_link *nl;
  13. btpd_log(BTPD_L_CONN, "killed peer %p\n", p);
  14. if (p->flags & PF_ATTACHED) {
  15. BTPDQ_REMOVE(&p->n->peers, p, p_entry);
  16. p->n->npeers--;
  17. if (p->n->active) {
  18. ul_on_lost_peer(p);
  19. dl_on_lost_peer(p);
  20. }
  21. } else
  22. BTPDQ_REMOVE(&net_unattached, p, p_entry);
  23. if (p->flags & PF_ON_READQ)
  24. BTPDQ_REMOVE(&net_bw_readq, p, rq_entry);
  25. if (p->flags & PF_ON_WRITEQ)
  26. BTPDQ_REMOVE(&net_bw_writeq, p, wq_entry);
  27. close(p->sd);
  28. event_del(&p->in_ev);
  29. event_del(&p->out_ev);
  30. nl = BTPDQ_FIRST(&p->outq);
  31. while (nl != NULL) {
  32. struct nb_link *next = BTPDQ_NEXT(nl, entry);
  33. nb_drop(nl->nb);
  34. free(nl);
  35. nl = next;
  36. }
  37. if (p->in.buf != NULL)
  38. free(p->in.buf);
  39. if (p->piece_field != NULL)
  40. free(p->piece_field);
  41. free(p);
  42. net_npeers--;
  43. }
  44. void
  45. peer_set_in_state(struct peer *p, enum input_state state, size_t size)
  46. {
  47. p->in.state = state;
  48. p->in.st_bytes = size;
  49. }
  50. void
  51. peer_send(struct peer *p, struct net_buf *nb)
  52. {
  53. struct nb_link *nl = btpd_calloc(1, sizeof(*nl));
  54. nl->nb = nb;
  55. nb_hold(nb);
  56. if (BTPDQ_EMPTY(&p->outq)) {
  57. assert(p->outq_off == 0);
  58. event_add(&p->out_ev, WRITE_TIMEOUT);
  59. }
  60. BTPDQ_INSERT_TAIL(&p->outq, nl, entry);
  61. }
  62. /*
  63. * Remove a network buffer from the peer's outq.
  64. * If a part of the buffer already have been written
  65. * to the network it cannot be removed.
  66. *
  67. * Returns 1 if the buffer is removed, 0 if not.
  68. */
  69. int
  70. peer_unsend(struct peer *p, struct nb_link *nl)
  71. {
  72. if (!(nl == BTPDQ_FIRST(&p->outq) && p->outq_off > 0)) {
  73. BTPDQ_REMOVE(&p->outq, nl, entry);
  74. if (nl->nb->type == NB_TORRENTDATA) {
  75. assert(p->npiece_msgs > 0);
  76. p->npiece_msgs--;
  77. }
  78. nb_drop(nl->nb);
  79. free(nl);
  80. if (BTPDQ_EMPTY(&p->outq)) {
  81. if (p->flags & PF_ON_WRITEQ) {
  82. BTPDQ_REMOVE(&net_bw_writeq, p, wq_entry);
  83. p->flags &= ~PF_ON_WRITEQ;
  84. } else
  85. event_del(&p->out_ev);
  86. }
  87. return 1;
  88. } else
  89. return 0;
  90. }
  91. void
  92. peer_sent(struct peer *p, struct net_buf *nb)
  93. {
  94. switch (nb->type) {
  95. case NB_CHOKE:
  96. btpd_log(BTPD_L_MSG, "sent choke to %p\n", p);
  97. break;
  98. case NB_UNCHOKE:
  99. btpd_log(BTPD_L_MSG, "sent unchoke to %p\n", p);
  100. p->flags &= ~PF_NO_REQUESTS;
  101. break;
  102. case NB_INTEREST:
  103. btpd_log(BTPD_L_MSG, "sent interest to %p\n", p);
  104. break;
  105. case NB_UNINTEREST:
  106. btpd_log(BTPD_L_MSG, "sent uninterest to %p\n", p);
  107. break;
  108. case NB_HAVE:
  109. btpd_log(BTPD_L_MSG, "sent have(%u) to %p\n",
  110. nb_get_index(nb), p);
  111. break;
  112. case NB_BITFIELD:
  113. btpd_log(BTPD_L_MSG, "sent bitfield to %p\n", p);
  114. break;
  115. case NB_REQUEST:
  116. btpd_log(BTPD_L_MSG, "sent request(%u,%u,%u) to %p\n",
  117. nb_get_index(nb), nb_get_begin(nb), nb_get_length(nb), p);
  118. break;
  119. case NB_PIECE:
  120. btpd_log(BTPD_L_MSG, "sent piece(%u,%u,%u) to %p\n",
  121. nb_get_index(nb), nb_get_begin(nb), nb_get_length(nb), p);
  122. break;
  123. case NB_CANCEL:
  124. btpd_log(BTPD_L_MSG, "sent cancel(%u,%u,%u) to %p\n",
  125. nb_get_index(nb), nb_get_begin(nb), nb_get_length(nb), p);
  126. break;
  127. case NB_TORRENTDATA:
  128. btpd_log(BTPD_L_MSG, "sent data to %p\n", p);
  129. assert(p->npiece_msgs > 0);
  130. p->npiece_msgs--;
  131. break;
  132. case NB_MULTIHAVE:
  133. btpd_log(BTPD_L_MSG, "sent multihave to %p\n", p);
  134. break;
  135. case NB_BITDATA:
  136. btpd_log(BTPD_L_MSG, "sent bitdata to %p\n", p);
  137. break;
  138. case NB_SHAKE:
  139. btpd_log(BTPD_L_MSG, "sent shake to %p\n", p);
  140. break;
  141. }
  142. }
  143. void
  144. peer_request(struct peer *p, struct block_request *req)
  145. {
  146. assert(p->nreqs_out < MAXPIPEDREQUESTS);
  147. p->nreqs_out++;
  148. BTPDQ_INSERT_TAIL(&p->my_reqs, req, p_entry);
  149. peer_send(p, req->blk->msg);
  150. }
  151. int
  152. peer_requested(struct peer *p, struct block *blk)
  153. {
  154. struct block_request *req;
  155. BTPDQ_FOREACH(req, &p->my_reqs, p_entry)
  156. if (req->blk == blk)
  157. return 1;
  158. return 0;
  159. }
  160. void
  161. peer_cancel(struct peer *p, struct block_request *req, struct net_buf *nb)
  162. {
  163. BTPDQ_REMOVE(&p->my_reqs, req, p_entry);
  164. p->nreqs_out--;
  165. int removed = 0;
  166. struct nb_link *nl;
  167. BTPDQ_FOREACH(nl, &p->outq, entry) {
  168. if (nl->nb == req->blk->msg) {
  169. removed = peer_unsend(p, nl);
  170. break;
  171. }
  172. }
  173. if (!removed)
  174. peer_send(p, nb);
  175. if (p->nreqs_out == 0)
  176. peer_on_no_reqs(p);
  177. }
  178. void
  179. peer_unchoke(struct peer *p)
  180. {
  181. p->flags &= ~PF_I_CHOKE;
  182. peer_send(p, nb_create_unchoke());
  183. }
  184. void
  185. peer_choke(struct peer *p)
  186. {
  187. struct nb_link *nl = BTPDQ_FIRST(&p->outq);
  188. while (nl != NULL) {
  189. struct nb_link *next = BTPDQ_NEXT(nl, entry);
  190. if (nl->nb->type == NB_PIECE) {
  191. struct nb_link *data = next;
  192. next = BTPDQ_NEXT(next, entry);
  193. if (peer_unsend(p, nl))
  194. peer_unsend(p, data);
  195. }
  196. nl = next;
  197. }
  198. p->flags |= PF_I_CHOKE;
  199. peer_send(p, nb_create_choke());
  200. }
  201. void
  202. peer_want(struct peer *p, uint32_t index)
  203. {
  204. assert(p->nwant < p->npieces);
  205. p->nwant++;
  206. if (p->nwant == 1) {
  207. if (p->nreqs_out == 0) {
  208. assert((p->flags & PF_DO_UNWANT) == 0);
  209. int unsent = 0;
  210. struct nb_link *nl = BTPDQ_LAST(&p->outq, nb_tq);
  211. if (nl != NULL && nl->nb->type == NB_UNINTEREST)
  212. unsent = peer_unsend(p, nl);
  213. if (!unsent)
  214. peer_send(p, nb_create_interest());
  215. } else {
  216. assert((p->flags & PF_DO_UNWANT) != 0);
  217. p->flags &= ~PF_DO_UNWANT;
  218. }
  219. p->flags |= PF_I_WANT;
  220. }
  221. }
  222. void
  223. peer_unwant(struct peer *p, uint32_t index)
  224. {
  225. assert(p->nwant > 0);
  226. p->nwant--;
  227. if (p->nwant == 0) {
  228. p->flags &= ~PF_I_WANT;
  229. if (p->nreqs_out == 0)
  230. peer_send(p, nb_create_uninterest());
  231. else
  232. p->flags |= PF_DO_UNWANT;
  233. }
  234. }
  235. static struct peer *
  236. peer_create_common(int sd)
  237. {
  238. struct peer *p = btpd_calloc(1, sizeof(*p));
  239. p->sd = sd;
  240. p->flags = PF_I_CHOKE | PF_P_CHOKE;
  241. BTPDQ_INIT(&p->my_reqs);
  242. BTPDQ_INIT(&p->outq);
  243. peer_set_in_state(p, SHAKE_PSTR, 28);
  244. event_set(&p->out_ev, p->sd, EV_WRITE, net_write_cb, p);
  245. event_set(&p->in_ev, p->sd, EV_READ, net_read_cb, p);
  246. event_add(&p->in_ev, NULL);
  247. BTPDQ_INSERT_TAIL(&net_unattached, p, p_entry);
  248. net_npeers++;
  249. return p;
  250. }
  251. void
  252. peer_create_in(int sd)
  253. {
  254. struct peer *p = peer_create_common(sd);
  255. p->flags |= PF_INCOMING;
  256. }
  257. void
  258. peer_create_out(struct net *n, const uint8_t *id,
  259. const char *ip, int port)
  260. {
  261. int sd;
  262. struct peer *p;
  263. if (net_connect(ip, port, &sd) != 0)
  264. return;
  265. p = peer_create_common(sd);
  266. p->n = n;
  267. peer_send(p, nb_create_shake(n->tp));
  268. }
  269. void
  270. peer_create_out_compact(struct net *n, const char *compact)
  271. {
  272. int sd;
  273. struct peer *p;
  274. struct sockaddr_in addr;
  275. addr.sin_family = AF_INET;
  276. addr.sin_addr.s_addr = *(long *)compact;
  277. addr.sin_port = *(short *)(compact + 4);
  278. if (net_connect2((struct sockaddr *)&addr, sizeof(addr), &sd) != 0)
  279. return;
  280. p = peer_create_common(sd);
  281. p->n = n;
  282. peer_send(p, nb_create_shake(n->tp));
  283. }
  284. void
  285. peer_on_no_reqs(struct peer *p)
  286. {
  287. if ((p->flags & PF_DO_UNWANT) != 0) {
  288. assert(p->nwant == 0);
  289. p->flags &= ~PF_DO_UNWANT;
  290. peer_send(p, nb_create_uninterest());
  291. }
  292. }
  293. void
  294. peer_on_keepalive(struct peer *p)
  295. {
  296. btpd_log(BTPD_L_MSG, "received keep alive from %p\n", p);
  297. }
  298. void
  299. peer_on_shake(struct peer *p)
  300. {
  301. uint8_t printid[21];
  302. int i;
  303. for (i = 0; i < 20 && isprint(p->id[i]); i++)
  304. printid[i] = p->id[i];
  305. printid[i] = '\0';
  306. btpd_log(BTPD_L_MSG, "received shake(%s) from %p\n", printid, p);
  307. p->piece_field = btpd_calloc(1, (int)ceil(p->n->tp->meta.npieces / 8.0));
  308. if (cm_pieces(p->n->tp) > 0) {
  309. if ((cm_pieces(p->n->tp) * 9 < 5 +
  310. ceil(p->n->tp->meta.npieces / 8.0)))
  311. peer_send(p, nb_create_multihave(p->n->tp));
  312. else {
  313. peer_send(p, nb_create_bitfield(p->n->tp));
  314. peer_send(p, nb_create_bitdata(p->n->tp));
  315. }
  316. }
  317. BTPDQ_REMOVE(&net_unattached, p, p_entry);
  318. BTPDQ_INSERT_HEAD(&p->n->peers, p, p_entry);
  319. p->flags |= PF_ATTACHED;
  320. p->n->npeers++;
  321. ul_on_new_peer(p);
  322. dl_on_new_peer(p);
  323. }
  324. void
  325. peer_on_choke(struct peer *p)
  326. {
  327. btpd_log(BTPD_L_MSG, "received choke from %p\n", p);
  328. if ((p->flags & PF_P_CHOKE) != 0)
  329. return;
  330. else {
  331. p->flags |= PF_P_CHOKE;
  332. dl_on_choke(p);
  333. struct nb_link *nl = BTPDQ_FIRST(&p->outq);
  334. while (nl != NULL) {
  335. struct nb_link *next = BTPDQ_NEXT(nl, entry);
  336. if (nl->nb->type == NB_REQUEST)
  337. peer_unsend(p, nl);
  338. nl = next;
  339. }
  340. }
  341. }
  342. void
  343. peer_on_unchoke(struct peer *p)
  344. {
  345. btpd_log(BTPD_L_MSG, "received unchoke from %p\n", p);
  346. if ((p->flags & PF_P_CHOKE) == 0)
  347. return;
  348. else {
  349. p->flags &= ~PF_P_CHOKE;
  350. dl_on_unchoke(p);
  351. }
  352. }
  353. void
  354. peer_on_interest(struct peer *p)
  355. {
  356. btpd_log(BTPD_L_MSG, "received interest from %p\n", p);
  357. if ((p->flags & PF_P_WANT) != 0)
  358. return;
  359. else {
  360. p->flags |= PF_P_WANT;
  361. ul_on_interest(p);
  362. }
  363. }
  364. void
  365. peer_on_uninterest(struct peer *p)
  366. {
  367. btpd_log(BTPD_L_MSG, "received uninterest from %p\n", p);
  368. if ((p->flags & PF_P_WANT) == 0)
  369. return;
  370. else {
  371. p->flags &= ~PF_P_WANT;
  372. ul_on_uninterest(p);
  373. }
  374. }
  375. void
  376. peer_on_have(struct peer *p, uint32_t index)
  377. {
  378. btpd_log(BTPD_L_MSG, "received have(%u) from %p\n", index, p);
  379. if (!has_bit(p->piece_field, index)) {
  380. set_bit(p->piece_field, index);
  381. p->npieces++;
  382. dl_on_piece_ann(p, index);
  383. }
  384. }
  385. void
  386. peer_on_bitfield(struct peer *p, const uint8_t *field)
  387. {
  388. btpd_log(BTPD_L_MSG, "received bitfield from %p\n", p);
  389. assert(p->npieces == 0);
  390. bcopy(field, p->piece_field, (size_t)ceil(p->n->tp->meta.npieces / 8.0));
  391. for (uint32_t i = 0; i < p->n->tp->meta.npieces; i++) {
  392. if (has_bit(p->piece_field, i)) {
  393. p->npieces++;
  394. dl_on_piece_ann(p, i);
  395. }
  396. }
  397. }
  398. void
  399. peer_on_piece(struct peer *p, uint32_t index, uint32_t begin,
  400. uint32_t length, const char *data)
  401. {
  402. struct block_request *req;
  403. BTPDQ_FOREACH(req, &p->my_reqs, p_entry)
  404. if ((nb_get_begin(req->blk->msg) == begin &&
  405. nb_get_index(req->blk->msg) == index &&
  406. nb_get_length(req->blk->msg) == length))
  407. break;
  408. if (req != NULL) {
  409. btpd_log(BTPD_L_MSG, "received piece(%u,%u,%u) from %p\n",
  410. index, begin, length, p);
  411. assert(p->nreqs_out > 0);
  412. p->nreqs_out--;
  413. BTPDQ_REMOVE(&p->my_reqs, req, p_entry);
  414. dl_on_block(p, req, index, begin, length, data);
  415. if (p->nreqs_out == 0)
  416. peer_on_no_reqs(p);
  417. } else
  418. btpd_log(BTPD_L_MSG, "discarded piece(%u,%u,%u) from %p\n",
  419. index, begin, length, p);
  420. }
  421. void
  422. peer_on_request(struct peer *p, uint32_t index, uint32_t begin,
  423. uint32_t length)
  424. {
  425. btpd_log(BTPD_L_MSG, "received request(%u,%u,%u) from %p\n",
  426. index, begin, length, p);
  427. if ((p->flags & PF_NO_REQUESTS) == 0) {
  428. uint8_t *content;
  429. if (cm_get_bytes(p->n->tp, index, begin, length, &content) == 0) {
  430. peer_send(p, nb_create_piece(index, begin, length));
  431. peer_send(p, nb_create_torrentdata(content, length));
  432. p->npiece_msgs++;
  433. if (p->npiece_msgs >= MAXPIECEMSGS) {
  434. peer_send(p, nb_create_choke());
  435. peer_send(p, nb_create_unchoke());
  436. p->flags |= PF_NO_REQUESTS;
  437. }
  438. }
  439. }
  440. }
  441. void
  442. peer_on_cancel(struct peer *p, uint32_t index, uint32_t begin,
  443. uint32_t length)
  444. {
  445. btpd_log(BTPD_L_MSG, "received cancel(%u,%u,%u) from %p\n",
  446. index, begin, length, p);
  447. struct nb_link *nl;
  448. BTPDQ_FOREACH(nl, &p->outq, entry)
  449. if (nl->nb->type == NB_PIECE
  450. && nb_get_begin(nl->nb) == begin
  451. && nb_get_index(nl->nb) == index
  452. && nb_get_length(nl->nb) == length) {
  453. struct nb_link *data = BTPDQ_NEXT(nl, entry);
  454. if (peer_unsend(p, nl))
  455. peer_unsend(p, data);
  456. break;
  457. }
  458. }
  459. int
  460. peer_chokes(struct peer *p)
  461. {
  462. return p->flags & PF_P_CHOKE;
  463. }
  464. int
  465. peer_has(struct peer *p, uint32_t index)
  466. {
  467. return has_bit(p->piece_field, index);
  468. }
  469. int
  470. peer_laden(struct peer *p)
  471. {
  472. return p->nreqs_out >= MAXPIPEDREQUESTS;
  473. }
  474. int
  475. peer_wanted(struct peer *p)
  476. {
  477. return (p->flags & PF_I_WANT) == PF_I_WANT;
  478. }
  479. int
  480. peer_leech_ok(struct peer *p)
  481. {
  482. return (p->flags & (PF_I_WANT|PF_P_CHOKE)) == PF_I_WANT;
  483. }
  484. int
  485. peer_active_down(struct peer *p)
  486. {
  487. return peer_leech_ok(p) || p->nreqs_out > 0;
  488. }
  489. int
  490. peer_active_up(struct peer *p)
  491. {
  492. return (p->flags & (PF_P_WANT|PF_I_CHOKE)) == PF_P_WANT
  493. || p->npiece_msgs > 0;
  494. }
  495. int
  496. peer_full(struct peer *p)
  497. {
  498. return p->npieces == p->n->tp->meta.npieces;
  499. }