A clone of btpd with my configuration changes.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

207 line
5.2 KiB

  1. #include <math.h>
  2. #include "btpd.h"
  3. #include "tracker_req.h"
  4. /*
  5. * Called when a peer announces it's got a new piece.
  6. *
  7. * If the piece is missing or unfull we increase the peer's
  8. * wanted level and if possible call dl_on_download.
  9. */
  10. void
  11. dl_on_piece_ann(struct peer *p, uint32_t index)
  12. {
  13. struct net *n = p->n;
  14. n->piece_count[index]++;
  15. if (cm_has_piece(n->tp, index))
  16. return;
  17. struct piece *pc = dl_find_piece(n, index);
  18. if (n->endgame) {
  19. assert(pc != NULL);
  20. peer_want(p, index);
  21. if (!peer_chokes(p) && !peer_laden(p))
  22. dl_assign_requests_eg(p);
  23. } else if (pc == NULL) {
  24. peer_want(p, index);
  25. if (!peer_chokes(p) && !peer_laden(p)) {
  26. pc = dl_new_piece(n, index);
  27. if (pc != NULL)
  28. dl_piece_assign_requests(pc, p);
  29. }
  30. } else if (!piece_full(pc)) {
  31. peer_want(p, index);
  32. if (!peer_chokes(p) && !peer_laden(p))
  33. dl_piece_assign_requests(pc, p);
  34. }
  35. }
  36. void
  37. dl_on_download(struct peer *p)
  38. {
  39. assert(peer_wanted(p));
  40. struct net *n = p->n;
  41. if (n->endgame) {
  42. dl_assign_requests_eg(p);
  43. } else {
  44. unsigned count = dl_assign_requests(p);
  45. if (count == 0 && !p->n->endgame) // We may have entered end game.
  46. assert(!peer_wanted(p) || peer_laden(p));
  47. }
  48. }
  49. void
  50. dl_on_unchoke(struct peer *p)
  51. {
  52. if (peer_wanted(p))
  53. dl_on_download(p);
  54. }
  55. void
  56. dl_on_undownload(struct peer *p)
  57. {
  58. if (!p->n->endgame)
  59. dl_unassign_requests(p);
  60. else
  61. dl_unassign_requests_eg(p);
  62. }
  63. void
  64. dl_on_choke(struct peer *p)
  65. {
  66. if (p->nreqs_out > 0)
  67. dl_on_undownload(p);
  68. }
  69. /**
  70. * Called when a piece has been tested positively.
  71. */
  72. void
  73. dl_on_ok_piece(struct net *n, uint32_t piece)
  74. {
  75. struct peer *p, *next;
  76. struct piece *pc = dl_find_piece(n, piece);
  77. btpd_log(BTPD_L_POL, "Got piece: %u.\n", pc->index);
  78. struct net_buf *have = nb_create_have(pc->index);
  79. nb_hold(have);
  80. BTPDQ_FOREACH(p, &n->peers, p_entry)
  81. if (!peer_has(p, pc->index))
  82. peer_send(p, have);
  83. nb_drop(have);
  84. if (n->endgame)
  85. BTPDQ_FOREACH(p, &n->peers, p_entry)
  86. if (peer_has(p, pc->index))
  87. peer_unwant(p, pc->index);
  88. assert(pc->nreqs == 0);
  89. piece_free(pc);
  90. if (cm_full(n->tp)) {
  91. btpd_log(BTPD_L_BTPD, "Finished downloading '%s'.\n",
  92. torrent_name(n->tp));
  93. tr_complete(n->tp);
  94. BTPDQ_FOREACH_MUTABLE(p, &n->peers, p_entry, next) {
  95. assert(p->nwant == 0);
  96. if (peer_full(p))
  97. peer_kill(p);
  98. }
  99. }
  100. }
  101. /*
  102. * Called when a piece has been tested negatively.
  103. */
  104. void
  105. dl_on_bad_piece(struct net *n, uint32_t piece)
  106. {
  107. struct piece *pc = dl_find_piece(n, piece);
  108. btpd_log(BTPD_L_ERROR, "Bad hash for piece %u of '%s'.\n",
  109. pc->index, torrent_name(n->tp));
  110. for (uint32_t i = 0; i < pc->nblocks; i++)
  111. clear_bit(pc->down_field, i);
  112. pc->ngot = 0;
  113. pc->nbusy = 0;
  114. if (n->endgame) {
  115. struct peer *p;
  116. BTPDQ_FOREACH(p, &n->peers, p_entry) {
  117. if (peer_has(p, pc->index) && peer_leech_ok(p) && !peer_laden(p))
  118. dl_assign_requests_eg(p);
  119. }
  120. } else
  121. dl_on_piece_unfull(pc); // XXX: May get bad data again.
  122. }
  123. void
  124. dl_on_new_peer(struct peer *p)
  125. {
  126. }
  127. void
  128. dl_on_lost_peer(struct peer *p)
  129. {
  130. struct net *n = p->n;
  131. for (uint32_t i = 0; i < n->tp->npieces; i++)
  132. if (peer_has(p, i))
  133. n->piece_count[i]--;
  134. if (p->nreqs_out > 0)
  135. dl_on_undownload(p);
  136. }
  137. void
  138. dl_on_block(struct peer *p, struct block_request *req,
  139. uint32_t index, uint32_t begin, uint32_t length, const uint8_t *data)
  140. {
  141. struct net *n = p->n;
  142. struct piece *pc = dl_find_piece(n, index);
  143. cm_put_bytes(p->n->tp, index, begin, data, length);
  144. pc->ngot++;
  145. if (n->endgame) {
  146. struct block_request *req, *next;
  147. struct net_buf *cancel = nb_create_cancel(index, begin, length);
  148. nb_hold(cancel);
  149. BTPDQ_FOREACH(req, &pc->reqs, blk_entry) {
  150. if (nb_get_begin(req->msg) == begin) {
  151. if (req->p != p)
  152. peer_cancel(req->p, req, cancel);
  153. pc->nreqs--;
  154. }
  155. }
  156. nb_drop(cancel);
  157. dl_piece_reorder_eg(pc);
  158. BTPDQ_FOREACH_MUTABLE(req, &pc->reqs, blk_entry, next) {
  159. if (nb_get_begin(req->msg) != begin)
  160. continue;
  161. BTPDQ_REMOVE(&pc->reqs, req, blk_entry);
  162. nb_drop(req->msg);
  163. if (peer_leech_ok(req->p) && !peer_laden(req->p))
  164. dl_assign_requests_eg(req->p);
  165. free(req);
  166. }
  167. if (pc->ngot == pc->nblocks)
  168. cm_test_piece(pc->n->tp, pc->index);
  169. } else {
  170. BTPDQ_REMOVE(&pc->reqs, req, blk_entry);
  171. nb_drop(req->msg);
  172. free(req);
  173. pc->nreqs--;
  174. // XXX: Needs to be looked at if we introduce snubbing.
  175. clear_bit(pc->down_field, begin / PIECE_BLOCKLEN);
  176. pc->nbusy--;
  177. if (pc->ngot == pc->nblocks)
  178. cm_test_piece(pc->n->tp, pc->index);
  179. if (peer_leech_ok(p) && !peer_laden(p))
  180. dl_assign_requests(p);
  181. }
  182. }