A clone of btpd with my configuration changes.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

296 lines
6.6 KiB

  1. #include <sys/types.h>
  2. #include <sys/mman.h>
  3. #include "btpd.h"
  4. #include "tracker_req.h"
  5. void
  6. cm_by_second(struct torrent *tp)
  7. {
  8. if (btpd.seconds == tp->tracker_time)
  9. tracker_req(tp, TR_EMPTY);
  10. if (btpd.seconds == tp->opt_time)
  11. next_optimistic(tp, NULL);
  12. if (btpd.seconds == tp->choke_time)
  13. choke_alg(tp);
  14. struct peer *p;
  15. int ri = btpd.seconds % RATEHISTORY;
  16. BTPDQ_FOREACH(p, &tp->peers, cm_entry) {
  17. p->rate_to_me[ri] = 0;
  18. p->rate_from_me[ri] = 0;
  19. }
  20. }
  21. /*
  22. * Called when a peer announces it's got a new piece.
  23. *
  24. * If the piece is missing or unfull we increase the peer's
  25. * wanted level and if possible call cm_on_download.
  26. */
  27. void
  28. cm_on_piece_ann(struct peer *p, uint32_t index)
  29. {
  30. struct torrent *tp = p->tp;
  31. tp->piece_count[index]++;
  32. if (has_bit(tp->piece_field, index))
  33. return;
  34. struct piece *pc = cm_find_piece(tp, index);
  35. if (tp->endgame) {
  36. assert(pc != NULL);
  37. peer_want(p, index);
  38. if (!peer_chokes(p) && !peer_laden(p))
  39. cm_assign_requests_eg(p);
  40. } else if (pc == NULL) {
  41. peer_want(p, index);
  42. if (!peer_chokes(p) && !peer_laden(p)) {
  43. pc = cm_new_piece(tp, index);
  44. if (pc != NULL)
  45. cm_piece_assign_requests(pc, p);
  46. }
  47. } else if (!piece_full(pc)) {
  48. peer_want(p, index);
  49. if (!peer_chokes(p) && !peer_laden(p))
  50. cm_piece_assign_requests(pc, p);
  51. }
  52. }
  53. void
  54. cm_on_download(struct peer *p)
  55. {
  56. assert(peer_wanted(p));
  57. struct torrent *tp = p->tp;
  58. if (tp->endgame) {
  59. cm_assign_requests_eg(p);
  60. } else {
  61. unsigned count = cm_assign_requests(p);
  62. if (count == 0 && !p->tp->endgame) // We may have entered end game.
  63. assert(!peer_wanted(p) || peer_laden(p));
  64. }
  65. }
  66. void
  67. cm_on_unchoke(struct peer *p)
  68. {
  69. if (peer_wanted(p))
  70. cm_on_download(p);
  71. }
  72. void
  73. cm_on_undownload(struct peer *p)
  74. {
  75. if (!p->tp->endgame)
  76. cm_unassign_requests(p);
  77. else
  78. cm_unassign_requests_eg(p);
  79. }
  80. void
  81. cm_on_choke(struct peer *p)
  82. {
  83. if (p->nreqs_out > 0)
  84. cm_on_undownload(p);
  85. }
  86. void
  87. cm_on_upload(struct peer *p)
  88. {
  89. choke_alg(p->tp);
  90. }
  91. void
  92. cm_on_interest(struct peer *p)
  93. {
  94. if ((p->flags & PF_I_CHOKE) == 0)
  95. cm_on_upload(p);
  96. }
  97. void
  98. cm_on_unupload(struct peer *p)
  99. {
  100. choke_alg(p->tp);
  101. }
  102. void
  103. cm_on_uninterest(struct peer *p)
  104. {
  105. if ((p->flags & PF_I_CHOKE) == 0)
  106. cm_on_unupload(p);
  107. }
  108. /**
  109. * Called when a piece has been tested positively.
  110. */
  111. void
  112. cm_on_ok_piece(struct piece *pc)
  113. {
  114. struct peer *p;
  115. struct torrent *tp = pc->tp;
  116. btpd_log(BTPD_L_POL, "Got piece: %u.\n", pc->index);
  117. set_bit(tp->piece_field, pc->index);
  118. tp->have_npieces++;
  119. msync(tp->imem, tp->isiz, MS_ASYNC);
  120. struct net_buf *have = nb_create_have(pc->index);
  121. BTPDQ_FOREACH(p, &tp->peers, cm_entry)
  122. peer_send(p, have);
  123. if (tp->endgame)
  124. BTPDQ_FOREACH(p, &tp->peers, cm_entry)
  125. if (peer_has(p, pc->index))
  126. peer_unwant(p, pc->index);
  127. assert(pc->nreqs == 0);
  128. piece_free(pc);
  129. if (torrent_has_all(tp)) {
  130. btpd_log(BTPD_L_BTPD, "Finished: %s.\n", tp->relpath);
  131. tracker_req(tp, TR_COMPLETED);
  132. BTPDQ_FOREACH(p, &tp->peers, cm_entry)
  133. assert(p->nwant == 0);
  134. }
  135. }
  136. /*
  137. * Called when a piece has been tested negatively.
  138. */
  139. void
  140. cm_on_bad_piece(struct piece *pc)
  141. {
  142. struct torrent *tp = pc->tp;
  143. btpd_log(BTPD_L_ERROR, "Bad hash for piece %u of %s.\n",
  144. pc->index, tp->relpath);
  145. for (uint32_t i = 0; i < pc->nblocks; i++) {
  146. clear_bit(pc->down_field, i);
  147. clear_bit(pc->have_field, i);
  148. }
  149. pc->ngot = 0;
  150. pc->nbusy = 0;
  151. msync(tp->imem, tp->isiz, MS_ASYNC);
  152. if (tp->endgame) {
  153. struct peer *p;
  154. BTPDQ_FOREACH(p, &tp->peers, cm_entry) {
  155. if (peer_has(p, pc->index) && peer_leech_ok(p) && !peer_laden(p))
  156. cm_assign_requests_eg(p);
  157. }
  158. } else
  159. cm_on_piece_unfull(pc); // XXX: May get bad data again.
  160. }
  161. void
  162. cm_on_new_peer(struct peer *p)
  163. {
  164. struct torrent *tp = p->tp;
  165. tp->npeers++;
  166. p->flags |= PF_ATTACHED;
  167. BTPDQ_REMOVE(&btpd.unattached, p, cm_entry);
  168. if (tp->npeers == 1) {
  169. BTPDQ_INSERT_HEAD(&tp->peers, p, cm_entry);
  170. next_optimistic(tp, p);
  171. } else {
  172. if (random() > RAND_MAX / 3)
  173. BTPDQ_INSERT_AFTER(&tp->peers, tp->optimistic, p, cm_entry);
  174. else
  175. BTPDQ_INSERT_TAIL(&tp->peers, p, cm_entry);
  176. }
  177. }
  178. void
  179. cm_on_lost_peer(struct peer *p)
  180. {
  181. struct torrent *tp = p->tp;
  182. tp->npeers--;
  183. p->flags &= ~PF_ATTACHED;
  184. if (tp->npeers == 0) {
  185. BTPDQ_REMOVE(&tp->peers, p, cm_entry);
  186. tp->optimistic = NULL;
  187. tp->choke_time = tp->opt_time = 0;
  188. } else if (tp->optimistic == p) {
  189. struct peer *next = BTPDQ_NEXT(p, cm_entry);
  190. BTPDQ_REMOVE(&tp->peers, p, cm_entry);
  191. next_optimistic(tp, next);
  192. } else if ((p->flags & (PF_P_WANT|PF_I_CHOKE)) == PF_P_WANT) {
  193. BTPDQ_REMOVE(&tp->peers, p, cm_entry);
  194. cm_on_unupload(p);
  195. } else {
  196. BTPDQ_REMOVE(&tp->peers, p, cm_entry);
  197. }
  198. for (uint32_t i = 0; i < tp->meta.npieces; i++)
  199. if (peer_has(p, i))
  200. tp->piece_count[i]--;
  201. if (p->nreqs_out > 0)
  202. cm_on_undownload(p);
  203. #if 0
  204. struct piece *pc = BTPDQ_FIRST(&tp->getlst);
  205. while (pc != NULL) {
  206. struct piece *next = BTPDQ_NEXT(pc, entry);
  207. if (peer_has(p, pc->index) && tp->piece_count[pc->index] == 0)
  208. cm_on_peerless_piece(pc);
  209. pc = next;
  210. }
  211. #endif
  212. }
  213. void
  214. cm_on_block(struct peer *p, struct block_request *req,
  215. uint32_t index, uint32_t begin, uint32_t length, const char *data)
  216. {
  217. struct torrent *tp = p->tp;
  218. struct block *blk = req->blk;
  219. struct piece *pc = blk->pc;
  220. off_t cbegin = index * p->tp->meta.piece_length + begin;
  221. torrent_put_bytes(p->tp, data, cbegin, length);
  222. set_bit(pc->have_field, begin / PIECE_BLOCKLEN);
  223. pc->ngot++;
  224. if (tp->endgame) {
  225. struct block_request *req;
  226. struct net_buf *cancel = nb_create_cancel(index, begin, length);
  227. nb_hold(cancel);
  228. BTPDQ_FOREACH(req, &blk->reqs, blk_entry) {
  229. if (req->p != p)
  230. peer_cancel(req->p, req, cancel);
  231. pc->nreqs--;
  232. }
  233. nb_drop(cancel);
  234. cm_piece_reorder_eg(pc);
  235. req = BTPDQ_FIRST(&blk->reqs);
  236. while (req != NULL) {
  237. struct block_request *next = BTPDQ_NEXT(req, blk_entry);
  238. if (peer_leech_ok(req->p) && !peer_laden(req->p))
  239. cm_assign_requests_eg(req->p);
  240. free(req);
  241. req = next;
  242. }
  243. BTPDQ_INIT(&blk->reqs);
  244. if (pc->ngot == pc->nblocks)
  245. cm_on_piece(pc);
  246. } else {
  247. BTPDQ_REMOVE(&blk->reqs, req, blk_entry);
  248. free(req);
  249. pc->nreqs--;
  250. // XXX: Needs to be looked at if we introduce snubbing.
  251. clear_bit(pc->down_field, begin / PIECE_BLOCKLEN);
  252. pc->nbusy--;
  253. if (pc->ngot == pc->nblocks)
  254. cm_on_piece(pc);
  255. if (peer_leech_ok(p) && !peer_laden(p))
  256. cm_assign_requests(p);
  257. }
  258. }