A clone of btpd with my configuration changes.

587 satır
14 KiB

  1. /*
  2. * The commandments:
  3. *
  4. * A peer is wanted except when it only has pieces we've already
  5. * downloaded or fully requested. Thus, a peer's wanted count is
  6. * increased for each missing or unfull piece it announces, or
  7. * when a piece it has becomes unfull.
  8. *
  9. * When a peer we want unchokes us, requests will primarily
  10. * be put on pieces we're already downloading and then on
  11. * possible new pieces.
  12. *
  13. * When choosing between several different new pieces to start
  14. * downloading, the rarest piece will be chosen.
  15. *
  16. * End game mode sets in when all missing blocks are requested.
  17. * In end game mode no piece is counted as full unless it's
  18. * downloaded.
  19. *
  20. */
  21. #include <fcntl.h>
  22. #include <math.h>
  23. #include <string.h>
  24. #include <unistd.h>
  25. #include <openssl/sha.h>
  26. #include "btpd.h"
  27. #include "stream.h"
  28. static struct piece *
  29. piece_alloc(struct torrent *tp, uint32_t index)
  30. {
  31. assert(!has_bit(tp->busy_field, index)
  32. && tp->npcs_busy < tp->meta.npieces);
  33. struct piece *pc;
  34. size_t mem, field, blocks;
  35. unsigned nblocks;
  36. off_t piece_length = torrent_piece_size(tp, index);
  37. nblocks = (unsigned)ceil((double)piece_length / PIECE_BLOCKLEN);
  38. blocks = sizeof(pc->blocks[0]) * nblocks;
  39. field = (size_t)ceil(nblocks / 8.0);
  40. mem = sizeof(*pc) + field + blocks;
  41. pc = btpd_calloc(1, mem);
  42. pc->tp = tp;
  43. pc->down_field = (uint8_t *)(pc + 1);
  44. pc->have_field =
  45. tp->block_field +
  46. index * (size_t)ceil(tp->meta.piece_length / (double)(1 << 17));
  47. pc->index = index;
  48. pc->nblocks = nblocks;
  49. pc->nreqs = 0;
  50. pc->next_block = 0;
  51. for (unsigned i = 0; i < nblocks; i++)
  52. if (has_bit(pc->have_field, i))
  53. pc->ngot++;
  54. pc->blocks = (struct block *)(pc->down_field + field);
  55. for (unsigned i = 0; i < nblocks; i++) {
  56. uint32_t start = i * PIECE_BLOCKLEN;
  57. uint32_t len = torrent_block_size(pc, i);
  58. struct block *blk = &pc->blocks[i];
  59. blk->pc = pc;
  60. BTPDQ_INIT(&blk->reqs);
  61. blk->msg = nb_create_request(index, start, len);
  62. nb_hold(blk->msg);
  63. }
  64. tp->npcs_busy++;
  65. set_bit(tp->busy_field, index);
  66. BTPDQ_INSERT_HEAD(&tp->getlst, pc, entry);
  67. return pc;
  68. }
  69. void
  70. piece_free(struct piece *pc)
  71. {
  72. struct torrent *tp = pc->tp;
  73. assert(tp->npcs_busy > 0);
  74. tp->npcs_busy--;
  75. clear_bit(tp->busy_field, pc->index);
  76. BTPDQ_REMOVE(&pc->tp->getlst, pc, entry);
  77. for (unsigned i = 0; i < pc->nblocks; i++) {
  78. struct block_request *req = BTPDQ_FIRST(&pc->blocks[i].reqs);
  79. while (req != NULL) {
  80. struct block_request *next = BTPDQ_NEXT(req, blk_entry);
  81. free(req);
  82. req = next;
  83. }
  84. nb_drop(pc->blocks[i].msg);
  85. }
  86. free(pc);
  87. }
  88. int
  89. piece_full(struct piece *pc)
  90. {
  91. return pc->ngot + pc->nbusy == pc->nblocks;
  92. }
  93. static int
  94. dl_should_enter_endgame(struct torrent *tp)
  95. {
  96. int should;
  97. if (tp->have_npieces + tp->npcs_busy == tp->meta.npieces) {
  98. should = 1;
  99. struct piece *pc;
  100. BTPDQ_FOREACH(pc, &tp->getlst, entry) {
  101. if (!piece_full(pc)) {
  102. should = 0;
  103. break;
  104. }
  105. }
  106. } else
  107. should = 0;
  108. return should;
  109. }
  110. static void
  111. dl_piece_insert_eg(struct piece *pc)
  112. {
  113. struct piece_tq *getlst = &pc->tp->getlst;
  114. if (pc->nblocks == pc->ngot)
  115. BTPDQ_INSERT_TAIL(getlst, pc, entry);
  116. else {
  117. unsigned r = pc->nreqs / (pc->nblocks - pc->ngot);
  118. struct piece *it;
  119. BTPDQ_FOREACH(it, getlst, entry) {
  120. if ((it->nblocks == it->ngot
  121. || r < it->nreqs / (it->nblocks - it->ngot))) {
  122. BTPDQ_INSERT_BEFORE(it, pc, entry);
  123. break;
  124. }
  125. }
  126. if (it == NULL)
  127. BTPDQ_INSERT_TAIL(getlst, pc, entry);
  128. }
  129. }
  130. void
  131. dl_piece_reorder_eg(struct piece *pc)
  132. {
  133. BTPDQ_REMOVE(&pc->tp->getlst, pc, entry);
  134. dl_piece_insert_eg(pc);
  135. }
  136. static void
  137. dl_enter_endgame(struct torrent *tp)
  138. {
  139. struct peer *p;
  140. struct piece *pc;
  141. struct piece *pcs[tp->npcs_busy];
  142. unsigned pi;
  143. btpd_log(BTPD_L_POL, "Entering end game\n");
  144. tp->endgame = 1;
  145. pi = 0;
  146. BTPDQ_FOREACH(pc, &tp->getlst, entry) {
  147. for (unsigned i = 0; i < pc->nblocks; i++)
  148. clear_bit(pc->down_field, i);
  149. pc->nbusy = 0;
  150. pcs[pi] = pc;
  151. pi++;
  152. }
  153. BTPDQ_INIT(&tp->getlst);
  154. while (pi > 0) {
  155. pi--;
  156. dl_piece_insert_eg(pcs[pi]);
  157. }
  158. BTPDQ_FOREACH(p, &tp->peers, p_entry) {
  159. assert(p->nwant == 0);
  160. BTPDQ_FOREACH(pc, &tp->getlst, entry) {
  161. if (peer_has(p, pc->index))
  162. peer_want(p, pc->index);
  163. }
  164. if (p->nwant > 0 && peer_leech_ok(p) && !peer_laden(p))
  165. dl_assign_requests_eg(p);
  166. }
  167. }
  168. struct piece *
  169. dl_find_piece(struct torrent *tp, uint32_t index)
  170. {
  171. struct piece *pc;
  172. BTPDQ_FOREACH(pc, &tp->getlst, entry)
  173. if (pc->index == index)
  174. break;
  175. return pc;
  176. }
  177. static int
  178. test_hash(struct torrent *tp, uint8_t *hash, unsigned long index)
  179. {
  180. if (tp->meta.piece_hash != NULL)
  181. return memcmp(hash, tp->meta.piece_hash[index], SHA_DIGEST_LENGTH);
  182. else {
  183. char piece_hash[SHA_DIGEST_LENGTH];
  184. int fd;
  185. int bufi;
  186. int err;
  187. err = vopen(&fd, O_RDONLY, "%s/torrent", tp->relpath);
  188. if (err != 0)
  189. btpd_err("test_hash: %s\n", strerror(err));
  190. err = lseek(fd, tp->meta.pieces_off + index * SHA_DIGEST_LENGTH,
  191. SEEK_SET);
  192. if (err < 0)
  193. btpd_err("test_hash: %s\n", strerror(errno));
  194. bufi = 0;
  195. while (bufi < SHA_DIGEST_LENGTH) {
  196. ssize_t nread =
  197. read(fd, piece_hash + bufi, SHA_DIGEST_LENGTH - bufi);
  198. bufi += nread;
  199. }
  200. close(fd);
  201. return memcmp(hash, piece_hash, SHA_DIGEST_LENGTH);
  202. }
  203. }
  204. static int
  205. ro_fd_cb(const char *path, int *fd, void *arg)
  206. {
  207. struct torrent *tp = arg;
  208. return vopen(fd, O_RDONLY, "%s/content/%s", tp->relpath, path);
  209. }
  210. static void
  211. torrent_test_piece(struct piece *pc)
  212. {
  213. struct torrent *tp = pc->tp;
  214. int err;
  215. uint8_t hash[20];
  216. struct bt_stream_ro *bts;
  217. off_t plen = torrent_piece_size(tp, pc->index);
  218. if ((bts = bts_open_ro(&tp->meta, pc->index * tp->meta.piece_length,
  219. ro_fd_cb, tp)) == NULL)
  220. btpd_err("Out of memory.\n");
  221. if ((err = bts_sha(bts, plen, hash)) != 0)
  222. btpd_err("Ouch! %s\n", strerror(err));
  223. bts_close_ro(bts);
  224. if (test_hash(tp, hash, pc->index) == 0)
  225. dl_on_ok_piece(pc);
  226. else
  227. dl_on_bad_piece(pc);
  228. }
  229. void
  230. dl_on_piece(struct piece *pc)
  231. {
  232. torrent_test_piece(pc);
  233. }
  234. static int
  235. dl_piece_startable(struct peer *p, uint32_t index)
  236. {
  237. return peer_has(p, index) && !has_bit(p->tp->piece_field, index)
  238. && !has_bit(p->tp->busy_field, index);
  239. }
  240. /*
  241. * Find the rarest piece the peer has, that isn't already allocated
  242. * for download or already downloaded. If no such piece can be found
  243. * return ENOENT.
  244. *
  245. * Return 0 or ENOENT, index in res.
  246. */
  247. static int
  248. dl_choose_rarest(struct peer *p, uint32_t *res)
  249. {
  250. uint32_t i;
  251. struct torrent *tp = p->tp;
  252. assert(tp->endgame == 0);
  253. for (i = 0; i < tp->meta.npieces && !dl_piece_startable(p, i); i++)
  254. ;
  255. if (i == tp->meta.npieces)
  256. return ENOENT;
  257. uint32_t min_i = i;
  258. uint32_t min_c = 1;
  259. for(i++; i < tp->meta.npieces; i++) {
  260. if (dl_piece_startable(p, i)) {
  261. if (tp->piece_count[i] == tp->piece_count[min_i])
  262. min_c++;
  263. else if (tp->piece_count[i] < tp->piece_count[min_i]) {
  264. min_i = i;
  265. min_c = 1;
  266. }
  267. }
  268. }
  269. if (min_c > 1) {
  270. min_c = 1 + rint((double)random() * (min_c - 1) / RAND_MAX);
  271. for (i = min_i; min_c > 0; i++) {
  272. if (dl_piece_startable(p, i)
  273. && tp->piece_count[i] == tp->piece_count[min_i]) {
  274. min_c--;
  275. min_i = i;
  276. }
  277. }
  278. }
  279. *res = min_i;
  280. return 0;
  281. }
  282. /*
  283. * Called from either dl_piece_assign_requests or dl_new_piece,
  284. * when a pice becomes full. The wanted level of the peers
  285. * that has this piece will be decreased. This function is
  286. * the only one that may trigger end game.
  287. */
  288. static void
  289. dl_on_piece_full(struct piece *pc)
  290. {
  291. struct peer *p;
  292. BTPDQ_FOREACH(p, &pc->tp->peers, p_entry) {
  293. if (peer_has(p, pc->index))
  294. peer_unwant(p, pc->index);
  295. }
  296. if (dl_should_enter_endgame(pc->tp))
  297. dl_enter_endgame(pc->tp);
  298. }
  299. /*
  300. * Allocate the piece indicated by the index for download.
  301. * There's a small possibility that a piece is fully downloaded
  302. * but haven't been tested. If such is the case the piece will
  303. * be tested and NULL will be returned. Also, we might then enter
  304. * end game.
  305. *
  306. * Return the piece or NULL.
  307. */
  308. struct piece *
  309. dl_new_piece(struct torrent *tp, uint32_t index)
  310. {
  311. btpd_log(BTPD_L_POL, "Started on piece %u.\n", index);
  312. struct piece *pc = piece_alloc(tp, index);
  313. if (pc->ngot == pc->nblocks) {
  314. dl_on_piece_full(pc);
  315. dl_on_piece(pc);
  316. if (dl_should_enter_endgame(tp))
  317. dl_enter_endgame(tp);
  318. return NULL;
  319. } else
  320. return pc;
  321. }
  322. /*
  323. * Called when a previously full piece loses a peer.
  324. * This is needed because we have decreased the wanted
  325. * level for the peers that have this piece when it got
  326. * full. Thus we have to increase the wanted level and
  327. * try to assign requests for this piece.
  328. */
  329. void
  330. dl_on_piece_unfull(struct piece *pc)
  331. {
  332. struct torrent *tp = pc->tp;
  333. struct peer *p;
  334. assert(!piece_full(pc) && tp->endgame == 0);
  335. BTPDQ_FOREACH(p, &tp->peers, p_entry)
  336. if (peer_has(p, pc->index))
  337. peer_want(p, pc->index);
  338. p = BTPDQ_FIRST(&tp->peers);
  339. while (p != NULL && !piece_full(pc)) {
  340. if (peer_leech_ok(p) && !peer_laden(p))
  341. dl_piece_assign_requests(pc, p); // Cannot provoke end game here.
  342. p = BTPDQ_NEXT(p, p_entry);
  343. }
  344. }
  345. #define INCNEXTBLOCK(pc) \
  346. (pc)->next_block = ((pc)->next_block + 1) % (pc)->nblocks
  347. /*
  348. * Request as many blocks as possible on this piece from
  349. * the peer. If the piece becomes full we call dl_on_piece_full.
  350. *
  351. * Return the number of requests sent.
  352. */
  353. unsigned
  354. dl_piece_assign_requests(struct piece *pc, struct peer *p)
  355. {
  356. assert(!piece_full(pc) && !peer_laden(p));
  357. unsigned count = 0;
  358. do {
  359. while ((has_bit(pc->have_field, pc->next_block)
  360. || has_bit(pc->down_field, pc->next_block)))
  361. INCNEXTBLOCK(pc);
  362. struct block *blk = &pc->blocks[pc->next_block];
  363. struct block_request *req = btpd_malloc(sizeof(*req));
  364. req->p = p;
  365. req->blk = blk;
  366. BTPDQ_INSERT_TAIL(&blk->reqs, req, blk_entry);
  367. peer_request(p, req);
  368. set_bit(pc->down_field, pc->next_block);
  369. pc->nbusy++;
  370. pc->nreqs++;
  371. count++;
  372. INCNEXTBLOCK(pc);
  373. } while (!piece_full(pc) && !peer_laden(p));
  374. if (piece_full(pc))
  375. dl_on_piece_full(pc);
  376. return count;
  377. }
  378. /*
  379. * Request as many blocks as possible from the peer. Puts
  380. * requests on already active pieces before starting on new
  381. * ones. Care must be taken since end game mode may be triggered
  382. * by the calls to dl_piece_assign_requests.
  383. *
  384. * Returns number of requests sent.
  385. *
  386. * XXX: should do something smart when deciding on which
  387. * already started piece to put requests on.
  388. */
  389. unsigned
  390. dl_assign_requests(struct peer *p)
  391. {
  392. assert(!p->tp->endgame && !peer_laden(p));
  393. struct piece *pc;
  394. struct torrent *tp = p->tp;
  395. unsigned count = 0;
  396. BTPDQ_FOREACH(pc, &tp->getlst, entry) {
  397. if (piece_full(pc) || !peer_has(p, pc->index))
  398. continue;
  399. count += dl_piece_assign_requests(pc, p);
  400. if (tp->endgame)
  401. break;
  402. if (!piece_full(pc))
  403. assert(peer_laden(p));
  404. if (peer_laden(p))
  405. break;
  406. }
  407. while (!peer_laden(p) && !tp->endgame) {
  408. uint32_t index;
  409. if (dl_choose_rarest(p, &index) == 0) {
  410. pc = dl_new_piece(tp, index);
  411. if (pc != NULL)
  412. count += dl_piece_assign_requests(pc, p);
  413. } else
  414. break;
  415. }
  416. return count;
  417. }
  418. void
  419. dl_unassign_requests(struct peer *p)
  420. {
  421. while (p->nreqs_out > 0) {
  422. struct block_request *req = BTPDQ_FIRST(&p->my_reqs);
  423. struct piece *pc = req->blk->pc;
  424. int was_full = piece_full(pc);
  425. while (req != NULL) {
  426. struct block_request *next = BTPDQ_NEXT(req, p_entry);
  427. uint32_t blki = nb_get_begin(req->blk->msg) / PIECE_BLOCKLEN;
  428. struct block *blk = req->blk;
  429. // XXX: Needs to be looked at if we introduce snubbing.
  430. assert(has_bit(pc->down_field, blki));
  431. clear_bit(pc->down_field, blki);
  432. pc->nbusy--;
  433. BTPDQ_REMOVE(&p->my_reqs, req, p_entry);
  434. p->nreqs_out--;
  435. BTPDQ_REMOVE(&blk->reqs, req, blk_entry);
  436. free(req);
  437. pc->nreqs--;
  438. while (next != NULL && next->blk->pc != pc)
  439. next = BTPDQ_NEXT(next, p_entry);
  440. req = next;
  441. }
  442. if (was_full && !piece_full(pc))
  443. dl_on_piece_unfull(pc);
  444. }
  445. assert(BTPDQ_EMPTY(&p->my_reqs));
  446. }
  447. static void
  448. dl_piece_assign_requests_eg(struct piece *pc, struct peer *p)
  449. {
  450. unsigned first_block = pc->next_block;
  451. do {
  452. if ((has_bit(pc->have_field, pc->next_block)
  453. || peer_requested(p, &pc->blocks[pc->next_block]))) {
  454. INCNEXTBLOCK(pc);
  455. continue;
  456. }
  457. struct block_request *req = btpd_calloc(1, sizeof(*req));
  458. req->blk = &pc->blocks[pc->next_block];
  459. req->p = p;
  460. BTPDQ_INSERT_TAIL(&pc->blocks[pc->next_block].reqs, req, blk_entry);
  461. pc->nreqs++;
  462. INCNEXTBLOCK(pc);
  463. peer_request(p, req);
  464. } while (!peer_laden(p) && pc->next_block != first_block);
  465. }
  466. void
  467. dl_assign_requests_eg(struct peer *p)
  468. {
  469. assert(!peer_laden(p));
  470. struct torrent *tp = p->tp;
  471. struct piece_tq tmp;
  472. BTPDQ_INIT(&tmp);
  473. struct piece *pc = BTPDQ_FIRST(&tp->getlst);
  474. while (!peer_laden(p) && pc != NULL) {
  475. struct piece *next = BTPDQ_NEXT(pc, entry);
  476. if (peer_has(p, pc->index) && pc->nblocks != pc->ngot) {
  477. dl_piece_assign_requests_eg(pc, p);
  478. BTPDQ_REMOVE(&tp->getlst, pc, entry);
  479. BTPDQ_INSERT_HEAD(&tmp, pc, entry);
  480. }
  481. pc = next;
  482. }
  483. pc = BTPDQ_FIRST(&tmp);
  484. while (pc != NULL) {
  485. struct piece *next = BTPDQ_NEXT(pc, entry);
  486. dl_piece_insert_eg(pc);
  487. pc = next;
  488. }
  489. }
  490. void
  491. dl_unassign_requests_eg(struct peer *p)
  492. {
  493. struct block_request *req;
  494. struct piece *pc;
  495. struct piece_tq tmp;
  496. BTPDQ_INIT(&tmp);
  497. while (p->nreqs_out > 0) {
  498. req = BTPDQ_FIRST(&p->my_reqs);
  499. pc = req->blk->pc;
  500. BTPDQ_REMOVE(&pc->tp->getlst, pc, entry);
  501. BTPDQ_INSERT_HEAD(&tmp, pc, entry);
  502. while (req != NULL) {
  503. struct block_request *next = BTPDQ_NEXT(req, p_entry);
  504. BTPDQ_REMOVE(&p->my_reqs, req, p_entry);
  505. p->nreqs_out--;
  506. BTPDQ_REMOVE(&req->blk->reqs, req, blk_entry);
  507. free(req);
  508. pc->nreqs--;
  509. while (next != NULL && next->blk->pc != pc)
  510. next = BTPDQ_NEXT(next, p_entry);
  511. req = next;
  512. }
  513. }
  514. assert(BTPDQ_EMPTY(&p->my_reqs));
  515. pc = BTPDQ_FIRST(&tmp);
  516. while (pc != NULL) {
  517. struct piece *next = BTPDQ_NEXT(pc, entry);
  518. dl_piece_insert_eg(pc);
  519. pc = next;
  520. }
  521. }