A clone of btpd with my configuration changes.
Ви не можете вибрати більше 25 тем Теми мають розпочинатися з літери або цифри, можуть містити дефіси (-) і не повинні перевищувати 35 символів.

513 рядки
14 KiB

  1. /*
  2. * The commandments:
  3. *
  4. * A peer is wanted except when it only has pieces we've already
  5. * downloaded or fully requested. Thus, a peer's wanted count is
  6. * increased for each missing or unfull piece it announces, or
  7. * when a piece it has becomes unfull.
  8. *
  9. * When a peer we want unchokes us, requests will primarily
  10. * be put on pieces we're already downloading and then on
  11. * possible new pieces.
  12. *
  13. * When choosing between several different new pieces to start
  14. * downloading, the rarest piece will be chosen.
  15. *
  16. * End game mode sets in when all missing blocks are requested.
  17. * In end game mode no piece is counted as full unless it's
  18. * downloaded.
  19. *
  20. */
  21. #include <fcntl.h>
  22. #include <math.h>
  23. #include <string.h>
  24. #include <unistd.h>
  25. #include <openssl/sha.h>
  26. #include "btpd.h"
  27. #include "stream.h"
  28. static struct piece *
  29. piece_alloc(struct net *n, uint32_t index)
  30. {
  31. assert(!has_bit(n->busy_field, index)
  32. && n->npcs_busy < n->tp->meta.npieces);
  33. struct piece *pc;
  34. size_t mem, field, blocks;
  35. unsigned nblocks;
  36. off_t piece_length = torrent_piece_size(n->tp, index);
  37. nblocks = (unsigned)ceil((double)piece_length / PIECE_BLOCKLEN);
  38. blocks = sizeof(pc->blocks[0]) * nblocks;
  39. field = (size_t)ceil(nblocks / 8.0);
  40. mem = sizeof(*pc) + field + blocks;
  41. pc = btpd_calloc(1, mem);
  42. pc->n = n;
  43. pc->down_field = (uint8_t *)(pc + 1);
  44. pc->have_field = cm_get_block_field(n->tp, index);
  45. pc->index = index;
  46. pc->nblocks = nblocks;
  47. pc->nreqs = 0;
  48. pc->next_block = 0;
  49. for (unsigned i = 0; i < nblocks; i++)
  50. if (has_bit(pc->have_field, i))
  51. pc->ngot++;
  52. assert(pc->ngot < pc->nblocks);
  53. pc->blocks = (struct block *)(pc->down_field + field);
  54. for (unsigned i = 0; i < nblocks; i++) {
  55. uint32_t start = i * PIECE_BLOCKLEN;
  56. uint32_t len = torrent_block_size(pc, i);
  57. struct block *blk = &pc->blocks[i];
  58. blk->pc = pc;
  59. BTPDQ_INIT(&blk->reqs);
  60. blk->msg = nb_create_request(index, start, len);
  61. nb_hold(blk->msg);
  62. }
  63. n->npcs_busy++;
  64. set_bit(n->busy_field, index);
  65. BTPDQ_INSERT_HEAD(&n->getlst, pc, entry);
  66. return pc;
  67. }
  68. void
  69. piece_free(struct piece *pc)
  70. {
  71. struct net *n = pc->n;
  72. assert(n->npcs_busy > 0);
  73. n->npcs_busy--;
  74. clear_bit(n->busy_field, pc->index);
  75. BTPDQ_REMOVE(&pc->n->getlst, pc, entry);
  76. for (unsigned i = 0; i < pc->nblocks; i++) {
  77. struct block_request *req = BTPDQ_FIRST(&pc->blocks[i].reqs);
  78. while (req != NULL) {
  79. struct block_request *next = BTPDQ_NEXT(req, blk_entry);
  80. free(req);
  81. req = next;
  82. }
  83. nb_drop(pc->blocks[i].msg);
  84. }
  85. free(pc);
  86. }
  87. int
  88. piece_full(struct piece *pc)
  89. {
  90. return pc->ngot + pc->nbusy == pc->nblocks;
  91. }
  92. static int
  93. dl_should_enter_endgame(struct net *n)
  94. {
  95. int should;
  96. if (cm_get_npieces(n->tp) + n->npcs_busy == n->tp->meta.npieces) {
  97. should = 1;
  98. struct piece *pc;
  99. BTPDQ_FOREACH(pc, &n->getlst, entry) {
  100. if (!piece_full(pc)) {
  101. should = 0;
  102. break;
  103. }
  104. }
  105. } else
  106. should = 0;
  107. return should;
  108. }
  109. static void
  110. dl_piece_insert_eg(struct piece *pc)
  111. {
  112. struct piece_tq *getlst = &pc->n->getlst;
  113. if (pc->nblocks == pc->ngot)
  114. BTPDQ_INSERT_TAIL(getlst, pc, entry);
  115. else {
  116. unsigned r = pc->nreqs / (pc->nblocks - pc->ngot);
  117. struct piece *it;
  118. BTPDQ_FOREACH(it, getlst, entry) {
  119. if ((it->nblocks == it->ngot
  120. || r < it->nreqs / (it->nblocks - it->ngot))) {
  121. BTPDQ_INSERT_BEFORE(it, pc, entry);
  122. break;
  123. }
  124. }
  125. if (it == NULL)
  126. BTPDQ_INSERT_TAIL(getlst, pc, entry);
  127. }
  128. }
  129. void
  130. dl_piece_reorder_eg(struct piece *pc)
  131. {
  132. BTPDQ_REMOVE(&pc->n->getlst, pc, entry);
  133. dl_piece_insert_eg(pc);
  134. }
  135. static void
  136. dl_enter_endgame(struct net *n)
  137. {
  138. struct peer *p;
  139. struct piece *pc;
  140. struct piece *pcs[n->npcs_busy];
  141. unsigned pi;
  142. btpd_log(BTPD_L_POL, "Entering end game\n");
  143. n->endgame = 1;
  144. pi = 0;
  145. BTPDQ_FOREACH(pc, &n->getlst, entry) {
  146. for (unsigned i = 0; i < pc->nblocks; i++)
  147. clear_bit(pc->down_field, i);
  148. pc->nbusy = 0;
  149. pcs[pi] = pc;
  150. pi++;
  151. }
  152. BTPDQ_INIT(&n->getlst);
  153. while (pi > 0) {
  154. pi--;
  155. dl_piece_insert_eg(pcs[pi]);
  156. }
  157. BTPDQ_FOREACH(p, &n->peers, p_entry) {
  158. assert(p->nwant == 0);
  159. BTPDQ_FOREACH(pc, &n->getlst, entry) {
  160. if (peer_has(p, pc->index))
  161. peer_want(p, pc->index);
  162. }
  163. if (p->nwant > 0 && peer_leech_ok(p) && !peer_laden(p))
  164. dl_assign_requests_eg(p);
  165. }
  166. }
  167. struct piece *
  168. dl_find_piece(struct net *n, uint32_t index)
  169. {
  170. struct piece *pc;
  171. BTPDQ_FOREACH(pc, &n->getlst, entry)
  172. if (pc->index == index)
  173. break;
  174. return pc;
  175. }
  176. static int
  177. dl_piece_startable(struct peer *p, uint32_t index)
  178. {
  179. return peer_has(p, index) && !cm_has_piece(p->n->tp, index)
  180. && !has_bit(p->n->busy_field, index);
  181. }
  182. /*
  183. * Find the rarest piece the peer has, that isn't already allocated
  184. * for download or already downloaded. If no such piece can be found
  185. * return ENOENT.
  186. *
  187. * Return 0 or ENOENT, index in res.
  188. */
  189. static int
  190. dl_choose_rarest(struct peer *p, uint32_t *res)
  191. {
  192. uint32_t i;
  193. struct net *n = p->n;
  194. assert(n->endgame == 0);
  195. for (i = 0; i < n->tp->meta.npieces && !dl_piece_startable(p, i); i++)
  196. ;
  197. if (i == n->tp->meta.npieces)
  198. return ENOENT;
  199. uint32_t min_i = i;
  200. uint32_t min_c = 1;
  201. for(i++; i < n->tp->meta.npieces; i++) {
  202. if (dl_piece_startable(p, i)) {
  203. if (n->piece_count[i] == n->piece_count[min_i])
  204. min_c++;
  205. else if (n->piece_count[i] < n->piece_count[min_i]) {
  206. min_i = i;
  207. min_c = 1;
  208. }
  209. }
  210. }
  211. if (min_c > 1) {
  212. min_c = rand_between(1, min_c);
  213. for (i = min_i; min_c > 0; i++) {
  214. if (dl_piece_startable(p, i)
  215. && n->piece_count[i] == n->piece_count[min_i]) {
  216. min_c--;
  217. min_i = i;
  218. }
  219. }
  220. }
  221. *res = min_i;
  222. return 0;
  223. }
  224. /*
  225. * Called from dl_piece_assign_requests when a piece becomes full.
  226. * The wanted level of the peers that has this piece will be decreased.
  227. * This function is the only one that may trigger end game.
  228. */
  229. static void
  230. dl_on_piece_full(struct piece *pc)
  231. {
  232. struct peer *p;
  233. BTPDQ_FOREACH(p, &pc->n->peers, p_entry) {
  234. if (peer_has(p, pc->index))
  235. peer_unwant(p, pc->index);
  236. }
  237. if (dl_should_enter_endgame(pc->n))
  238. dl_enter_endgame(pc->n);
  239. }
  240. /*
  241. * Allocate the piece indicated by the index for download.
  242. * There's a small possibility that a piece is fully downloaded
  243. * but haven't been tested. If such is the case the piece will
  244. * be tested and NULL will be returned. Also, we might then enter
  245. * end game.
  246. *
  247. * Return the piece or NULL.
  248. */
  249. struct piece *
  250. dl_new_piece(struct net *n, uint32_t index)
  251. {
  252. btpd_log(BTPD_L_POL, "Started on piece %u.\n", index);
  253. cm_prealloc(n->tp, index);
  254. return piece_alloc(n, index);
  255. }
  256. /*
  257. * Called when a previously full piece loses a peer.
  258. * This is needed because we have decreased the wanted
  259. * level for the peers that have this piece when it got
  260. * full. Thus we have to increase the wanted level and
  261. * try to assign requests for this piece.
  262. */
  263. void
  264. dl_on_piece_unfull(struct piece *pc)
  265. {
  266. struct net *n = pc->n;
  267. struct peer *p;
  268. assert(!piece_full(pc) && n->endgame == 0);
  269. BTPDQ_FOREACH(p, &n->peers, p_entry)
  270. if (peer_has(p, pc->index))
  271. peer_want(p, pc->index);
  272. p = BTPDQ_FIRST(&n->peers);
  273. while (p != NULL && !piece_full(pc)) {
  274. if (peer_leech_ok(p) && !peer_laden(p))
  275. dl_piece_assign_requests(pc, p); // Cannot provoke end game here.
  276. p = BTPDQ_NEXT(p, p_entry);
  277. }
  278. }
  279. #define INCNEXTBLOCK(pc) \
  280. (pc)->next_block = ((pc)->next_block + 1) % (pc)->nblocks
  281. /*
  282. * Request as many blocks as possible on this piece from
  283. * the peer. If the piece becomes full we call dl_on_piece_full.
  284. *
  285. * Return the number of requests sent.
  286. */
  287. unsigned
  288. dl_piece_assign_requests(struct piece *pc, struct peer *p)
  289. {
  290. assert(!piece_full(pc) && !peer_laden(p));
  291. unsigned count = 0;
  292. do {
  293. while ((has_bit(pc->have_field, pc->next_block)
  294. || has_bit(pc->down_field, pc->next_block)))
  295. INCNEXTBLOCK(pc);
  296. struct block *blk = &pc->blocks[pc->next_block];
  297. struct block_request *req = btpd_malloc(sizeof(*req));
  298. req->p = p;
  299. req->blk = blk;
  300. BTPDQ_INSERT_TAIL(&blk->reqs, req, blk_entry);
  301. peer_request(p, req);
  302. set_bit(pc->down_field, pc->next_block);
  303. pc->nbusy++;
  304. pc->nreqs++;
  305. count++;
  306. INCNEXTBLOCK(pc);
  307. } while (!piece_full(pc) && !peer_laden(p));
  308. if (piece_full(pc))
  309. dl_on_piece_full(pc);
  310. return count;
  311. }
  312. /*
  313. * Request as many blocks as possible from the peer. Puts
  314. * requests on already active pieces before starting on new
  315. * ones. Care must be taken since end game mode may be triggered
  316. * by the calls to dl_piece_assign_requests.
  317. *
  318. * Returns number of requests sent.
  319. *
  320. * XXX: should do something smart when deciding on which
  321. * already started piece to put requests on.
  322. */
  323. unsigned
  324. dl_assign_requests(struct peer *p)
  325. {
  326. assert(!p->n->endgame && !peer_laden(p));
  327. struct piece *pc;
  328. struct net *n = p->n;
  329. unsigned count = 0;
  330. BTPDQ_FOREACH(pc, &n->getlst, entry) {
  331. if (piece_full(pc) || !peer_has(p, pc->index))
  332. continue;
  333. count += dl_piece_assign_requests(pc, p);
  334. if (n->endgame)
  335. break;
  336. if (!piece_full(pc))
  337. assert(peer_laden(p));
  338. if (peer_laden(p))
  339. break;
  340. }
  341. while (!peer_laden(p) && !n->endgame) {
  342. uint32_t index;
  343. if (dl_choose_rarest(p, &index) == 0) {
  344. pc = dl_new_piece(n, index);
  345. if (pc != NULL)
  346. count += dl_piece_assign_requests(pc, p);
  347. } else
  348. break;
  349. }
  350. return count;
  351. }
  352. void
  353. dl_unassign_requests(struct peer *p)
  354. {
  355. while (p->nreqs_out > 0) {
  356. struct block_request *req = BTPDQ_FIRST(&p->my_reqs);
  357. struct piece *pc = req->blk->pc;
  358. int was_full = piece_full(pc);
  359. while (req != NULL) {
  360. struct block_request *next = BTPDQ_NEXT(req, p_entry);
  361. uint32_t blki = nb_get_begin(req->blk->msg) / PIECE_BLOCKLEN;
  362. struct block *blk = req->blk;
  363. // XXX: Needs to be looked at if we introduce snubbing.
  364. assert(has_bit(pc->down_field, blki));
  365. clear_bit(pc->down_field, blki);
  366. pc->nbusy--;
  367. BTPDQ_REMOVE(&p->my_reqs, req, p_entry);
  368. p->nreqs_out--;
  369. BTPDQ_REMOVE(&blk->reqs, req, blk_entry);
  370. free(req);
  371. pc->nreqs--;
  372. while (next != NULL && next->blk->pc != pc)
  373. next = BTPDQ_NEXT(next, p_entry);
  374. req = next;
  375. }
  376. if (p->nreqs_out == 0)
  377. peer_on_no_reqs(p);
  378. if (was_full && !piece_full(pc))
  379. dl_on_piece_unfull(pc);
  380. }
  381. assert(BTPDQ_EMPTY(&p->my_reqs));
  382. }
  383. static void
  384. dl_piece_assign_requests_eg(struct piece *pc, struct peer *p)
  385. {
  386. unsigned first_block = pc->next_block;
  387. do {
  388. if ((has_bit(pc->have_field, pc->next_block)
  389. || peer_requested(p, &pc->blocks[pc->next_block]))) {
  390. INCNEXTBLOCK(pc);
  391. continue;
  392. }
  393. struct block_request *req = btpd_calloc(1, sizeof(*req));
  394. req->blk = &pc->blocks[pc->next_block];
  395. req->p = p;
  396. BTPDQ_INSERT_TAIL(&pc->blocks[pc->next_block].reqs, req, blk_entry);
  397. pc->nreqs++;
  398. INCNEXTBLOCK(pc);
  399. peer_request(p, req);
  400. } while (!peer_laden(p) && pc->next_block != first_block);
  401. }
  402. void
  403. dl_assign_requests_eg(struct peer *p)
  404. {
  405. assert(!peer_laden(p));
  406. struct net *n = p->n;
  407. struct piece_tq tmp;
  408. BTPDQ_INIT(&tmp);
  409. struct piece *pc = BTPDQ_FIRST(&n->getlst);
  410. while (!peer_laden(p) && pc != NULL) {
  411. struct piece *next = BTPDQ_NEXT(pc, entry);
  412. if (peer_has(p, pc->index) && pc->nblocks != pc->ngot) {
  413. dl_piece_assign_requests_eg(pc, p);
  414. BTPDQ_REMOVE(&n->getlst, pc, entry);
  415. BTPDQ_INSERT_HEAD(&tmp, pc, entry);
  416. }
  417. pc = next;
  418. }
  419. pc = BTPDQ_FIRST(&tmp);
  420. while (pc != NULL) {
  421. struct piece *next = BTPDQ_NEXT(pc, entry);
  422. dl_piece_insert_eg(pc);
  423. pc = next;
  424. }
  425. }
  426. void
  427. dl_unassign_requests_eg(struct peer *p)
  428. {
  429. struct block_request *req;
  430. struct piece *pc;
  431. struct piece_tq tmp;
  432. BTPDQ_INIT(&tmp);
  433. while (p->nreqs_out > 0) {
  434. req = BTPDQ_FIRST(&p->my_reqs);
  435. pc = req->blk->pc;
  436. BTPDQ_REMOVE(&pc->n->getlst, pc, entry);
  437. BTPDQ_INSERT_HEAD(&tmp, pc, entry);
  438. while (req != NULL) {
  439. struct block_request *next = BTPDQ_NEXT(req, p_entry);
  440. BTPDQ_REMOVE(&p->my_reqs, req, p_entry);
  441. p->nreqs_out--;
  442. BTPDQ_REMOVE(&req->blk->reqs, req, blk_entry);
  443. free(req);
  444. pc->nreqs--;
  445. while (next != NULL && next->blk->pc != pc)
  446. next = BTPDQ_NEXT(next, p_entry);
  447. req = next;
  448. }
  449. }
  450. assert(BTPDQ_EMPTY(&p->my_reqs));
  451. peer_on_no_reqs(p);
  452. pc = BTPDQ_FIRST(&tmp);
  453. while (pc != NULL) {
  454. struct piece *next = BTPDQ_NEXT(pc, entry);
  455. dl_piece_insert_eg(pc);
  456. pc = next;
  457. }
  458. }