diff --git a/btpd/btpd.c b/btpd/btpd.c index f013f90..026bb26 100644 --- a/btpd/btpd.c +++ b/btpd/btpd.c @@ -106,7 +106,7 @@ heartbeat_cb(int sd, short type, void *arg) btpd_seconds++; BTPDQ_FOREACH(tp, &m_torrents, entry) - cm_by_second(tp); + dl_by_second(tp); evtimer_add(&m_heartbeat, (& (struct timeval) { 1, 0 })); } diff --git a/btpd/peer.c b/btpd/peer.c index c81d439..ff16f92 100644 --- a/btpd/peer.c +++ b/btpd/peer.c @@ -26,9 +26,9 @@ peer_kill(struct peer *p) btpd_log(BTPD_L_CONN, "killed peer %p\n", p); if (p->flags & PF_ATTACHED) - cm_on_lost_peer(p); + dl_on_lost_peer(p); else - BTPDQ_REMOVE(&net_unattached, p, cm_entry); + BTPDQ_REMOVE(&net_unattached, p, p_entry); if (p->flags & PF_ON_READQ) BTPDQ_REMOVE(&net_bw_readq, p, rq_entry); if (p->flags & PF_ON_WRITEQ) @@ -268,7 +268,7 @@ peer_create_common(int sd) event_set(&p->in_ev, p->sd, EV_READ, net_read_cb, p); event_add(&p->in_ev, NULL); - BTPDQ_INSERT_TAIL(&net_unattached, p, cm_entry); + BTPDQ_INSERT_TAIL(&net_unattached, p, p_entry); net_npeers++; return p; } @@ -348,7 +348,7 @@ peer_on_shake(struct peer *p) peer_send(p, nb_create_bitdata(p->tp)); } } - cm_on_new_peer(p); + dl_on_new_peer(p); } void @@ -361,7 +361,7 @@ peer_on_choke(struct peer *p) if (p->nreqs_out > 0) peer_on_no_reqs(p); p->flags |= PF_P_CHOKE; - cm_on_choke(p); + dl_on_choke(p); struct nb_link *nl = BTPDQ_FIRST(&p->outq); while (nl != NULL) { struct nb_link *next = BTPDQ_NEXT(nl, entry); @@ -380,7 +380,7 @@ peer_on_unchoke(struct peer *p) return; else { p->flags &= ~PF_P_CHOKE; - cm_on_unchoke(p); + dl_on_unchoke(p); } } @@ -392,7 +392,7 @@ peer_on_interest(struct peer *p) return; else { p->flags |= PF_P_WANT; - cm_on_interest(p); + dl_on_interest(p); } } @@ -404,7 +404,7 @@ peer_on_uninterest(struct peer *p) return; else { p->flags &= ~PF_P_WANT; - cm_on_uninterest(p); + dl_on_uninterest(p); } } @@ -415,7 +415,7 @@ peer_on_have(struct peer *p, uint32_t index) if (!has_bit(p->piece_field, index)) { set_bit(p->piece_field, index); p->npieces++; - cm_on_piece_ann(p, index); + dl_on_piece_ann(p, index); } } @@ -428,7 +428,7 @@ peer_on_bitfield(struct peer *p, const uint8_t *field) for (uint32_t i = 0; i < p->tp->meta.npieces; i++) { if (has_bit(p->piece_field, i)) { p->npieces++; - cm_on_piece_ann(p, i); + dl_on_piece_ann(p, i); } } } @@ -449,7 +449,7 @@ peer_on_piece(struct peer *p, uint32_t index, uint32_t begin, assert(p->nreqs_out > 0); p->nreqs_out--; BTPDQ_REMOVE(&p->my_reqs, req, p_entry); - cm_on_block(p, req, index, begin, length, data); + dl_on_block(p, req, index, begin, length, data); if (p->nreqs_out == 0) peer_on_no_reqs(p); } else diff --git a/btpd/peer.h b/btpd/peer.h index e288401..36d7fe8 100644 --- a/btpd/peer.h +++ b/btpd/peer.h @@ -61,7 +61,7 @@ struct peer { size_t off; } net; - BTPDQ_ENTRY(peer) cm_entry; + BTPDQ_ENTRY(peer) p_entry; BTPDQ_ENTRY(peer) rq_entry; BTPDQ_ENTRY(peer) wq_entry; diff --git a/btpd/policy.h b/btpd/policy.h index c039644..5adf4a1 100644 --- a/btpd/policy.h +++ b/btpd/policy.h @@ -11,38 +11,38 @@ void next_optimistic(struct torrent *tp, struct peer *np); int piece_full(struct piece *pc); void piece_free(struct piece *pc); -void cm_on_piece_unfull(struct piece *pc); -void cm_on_piece(struct piece *pc); - -struct piece *cm_new_piece(struct torrent *tp, uint32_t index); -struct piece *cm_find_piece(struct torrent *tp, uint32_t index); -unsigned cm_piece_assign_requests(struct piece *pc, struct peer *p); -unsigned cm_assign_requests(struct peer *p); -void cm_assign_requests_eg(struct peer *p); -void cm_unassign_requests(struct peer *p); -void cm_unassign_requests_eg(struct peer *p); -void cm_piece_reorder_eg(struct piece *pc); +void dl_on_piece_unfull(struct piece *pc); +void dl_on_piece(struct piece *pc); + +struct piece *dl_new_piece(struct torrent *tp, uint32_t index); +struct piece *dl_find_piece(struct torrent *tp, uint32_t index); +unsigned dl_piece_assign_requests(struct piece *pc, struct peer *p); +unsigned dl_assign_requests(struct peer *p); +void dl_assign_requests_eg(struct peer *p); +void dl_unassign_requests(struct peer *p); +void dl_unassign_requests_eg(struct peer *p); +void dl_piece_reorder_eg(struct piece *pc); // policy_if.c -void cm_by_second(struct torrent *tp); - -void cm_on_new_peer(struct peer *p); -void cm_on_lost_peer(struct peer *p); - -void cm_on_choke(struct peer *p); -void cm_on_unchoke(struct peer *p); -void cm_on_upload(struct peer *p); -void cm_on_unupload(struct peer *p); -void cm_on_interest(struct peer *p); -void cm_on_uninterest(struct peer *p); -void cm_on_download(struct peer *p); -void cm_on_undownload(struct peer *p); -void cm_on_piece_ann(struct peer *p, uint32_t index); -void cm_on_block(struct peer *p, struct block_request *req, +void dl_by_second(struct torrent *tp); + +void dl_on_new_peer(struct peer *p); +void dl_on_lost_peer(struct peer *p); + +void dl_on_choke(struct peer *p); +void dl_on_unchoke(struct peer *p); +void dl_on_upload(struct peer *p); +void dl_on_unupload(struct peer *p); +void dl_on_interest(struct peer *p); +void dl_on_uninterest(struct peer *p); +void dl_on_download(struct peer *p); +void dl_on_undownload(struct peer *p); +void dl_on_piece_ann(struct peer *p, uint32_t index); +void dl_on_block(struct peer *p, struct block_request *req, uint32_t index, uint32_t begin, uint32_t length, const char *data); -void cm_on_ok_piece(struct piece *pc); -void cm_on_bad_piece(struct piece *pc); +void dl_on_ok_piece(struct piece *pc); +void dl_on_bad_piece(struct piece *pc); #endif diff --git a/btpd/policy_choke.c b/btpd/policy_choke.c index dccb1ea..310b8aa 100644 --- a/btpd/policy_choke.c +++ b/btpd/policy_choke.c @@ -37,7 +37,7 @@ choke_alg(struct torrent *tp) struct peer *psort[tp->npeers]; i = 0; - BTPDQ_FOREACH(p, &tp->peers, cm_entry) + BTPDQ_FOREACH(p, &tp->peers, p_entry) psort[i++] = p; if (tp->have_npieces == tp->meta.npieces) @@ -78,7 +78,7 @@ next_optimistic(struct torrent *tp, struct peer *np) else if (tp->optimistic == NULL) tp->optimistic = BTPDQ_FIRST(&tp->peers); else { - np = BTPDQ_NEXT(tp->optimistic, cm_entry); + np = BTPDQ_NEXT(tp->optimistic, p_entry); if (np != NULL) tp->optimistic = np; else diff --git a/btpd/policy_if.c b/btpd/policy_if.c index e660928..a550b2e 100644 --- a/btpd/policy_if.c +++ b/btpd/policy_if.c @@ -5,7 +5,7 @@ #include "tracker_req.h" void -cm_by_second(struct torrent *tp) +dl_by_second(struct torrent *tp) { if (btpd_seconds == tp->tracker_time) tracker_req(tp, TR_EMPTY); @@ -19,7 +19,7 @@ cm_by_second(struct torrent *tp) struct peer *p; int ri = btpd_seconds % RATEHISTORY; - BTPDQ_FOREACH(p, &tp->peers, cm_entry) { + BTPDQ_FOREACH(p, &tp->peers, p_entry) { p->rate_to_me[ri] = 0; p->rate_from_me[ri] = 0; } @@ -29,103 +29,103 @@ cm_by_second(struct torrent *tp) * Called when a peer announces it's got a new piece. * * If the piece is missing or unfull we increase the peer's - * wanted level and if possible call cm_on_download. + * wanted level and if possible call dl_on_download. */ void -cm_on_piece_ann(struct peer *p, uint32_t index) +dl_on_piece_ann(struct peer *p, uint32_t index) { struct torrent *tp = p->tp; tp->piece_count[index]++; if (has_bit(tp->piece_field, index)) return; - struct piece *pc = cm_find_piece(tp, index); + struct piece *pc = dl_find_piece(tp, index); if (tp->endgame) { assert(pc != NULL); peer_want(p, index); if (!peer_chokes(p) && !peer_laden(p)) - cm_assign_requests_eg(p); + dl_assign_requests_eg(p); } else if (pc == NULL) { peer_want(p, index); if (!peer_chokes(p) && !peer_laden(p)) { - pc = cm_new_piece(tp, index); + pc = dl_new_piece(tp, index); if (pc != NULL) - cm_piece_assign_requests(pc, p); + dl_piece_assign_requests(pc, p); } } else if (!piece_full(pc)) { peer_want(p, index); if (!peer_chokes(p) && !peer_laden(p)) - cm_piece_assign_requests(pc, p); + dl_piece_assign_requests(pc, p); } } void -cm_on_download(struct peer *p) +dl_on_download(struct peer *p) { assert(peer_wanted(p)); struct torrent *tp = p->tp; if (tp->endgame) { - cm_assign_requests_eg(p); + dl_assign_requests_eg(p); } else { - unsigned count = cm_assign_requests(p); + unsigned count = dl_assign_requests(p); if (count == 0 && !p->tp->endgame) // We may have entered end game. assert(!peer_wanted(p) || peer_laden(p)); } } void -cm_on_unchoke(struct peer *p) +dl_on_unchoke(struct peer *p) { if (peer_wanted(p)) - cm_on_download(p); + dl_on_download(p); } void -cm_on_undownload(struct peer *p) +dl_on_undownload(struct peer *p) { if (!p->tp->endgame) - cm_unassign_requests(p); + dl_unassign_requests(p); else - cm_unassign_requests_eg(p); + dl_unassign_requests_eg(p); } void -cm_on_choke(struct peer *p) +dl_on_choke(struct peer *p) { if (p->nreqs_out > 0) - cm_on_undownload(p); + dl_on_undownload(p); } void -cm_on_upload(struct peer *p) +dl_on_upload(struct peer *p) { choke_alg(p->tp); } void -cm_on_interest(struct peer *p) +dl_on_interest(struct peer *p) { if ((p->flags & PF_I_CHOKE) == 0) - cm_on_upload(p); + dl_on_upload(p); } void -cm_on_unupload(struct peer *p) +dl_on_unupload(struct peer *p) { choke_alg(p->tp); } void -cm_on_uninterest(struct peer *p) +dl_on_uninterest(struct peer *p) { if ((p->flags & PF_I_CHOKE) == 0) - cm_on_unupload(p); + dl_on_unupload(p); } /** * Called when a piece has been tested positively. */ void -cm_on_ok_piece(struct piece *pc) +dl_on_ok_piece(struct piece *pc) { struct peer *p; struct torrent *tp = pc->tp; @@ -137,11 +137,11 @@ cm_on_ok_piece(struct piece *pc) msync(tp->imem, tp->isiz, MS_ASYNC); struct net_buf *have = nb_create_have(pc->index); - BTPDQ_FOREACH(p, &tp->peers, cm_entry) + BTPDQ_FOREACH(p, &tp->peers, p_entry) peer_send(p, have); if (tp->endgame) - BTPDQ_FOREACH(p, &tp->peers, cm_entry) + BTPDQ_FOREACH(p, &tp->peers, p_entry) if (peer_has(p, pc->index)) peer_unwant(p, pc->index); @@ -151,7 +151,7 @@ cm_on_ok_piece(struct piece *pc) if (torrent_has_all(tp)) { btpd_log(BTPD_L_BTPD, "Finished: %s.\n", tp->relpath); tracker_req(tp, TR_COMPLETED); - BTPDQ_FOREACH(p, &tp->peers, cm_entry) + BTPDQ_FOREACH(p, &tp->peers, p_entry) assert(p->nwant == 0); } } @@ -160,7 +160,7 @@ cm_on_ok_piece(struct piece *pc) * Called when a piece has been tested negatively. */ void -cm_on_bad_piece(struct piece *pc) +dl_on_bad_piece(struct piece *pc) { struct torrent *tp = pc->tp; @@ -177,54 +177,54 @@ cm_on_bad_piece(struct piece *pc) if (tp->endgame) { struct peer *p; - BTPDQ_FOREACH(p, &tp->peers, cm_entry) { + BTPDQ_FOREACH(p, &tp->peers, p_entry) { if (peer_has(p, pc->index) && peer_leech_ok(p) && !peer_laden(p)) - cm_assign_requests_eg(p); + dl_assign_requests_eg(p); } } else - cm_on_piece_unfull(pc); // XXX: May get bad data again. + dl_on_piece_unfull(pc); // XXX: May get bad data again. } void -cm_on_new_peer(struct peer *p) +dl_on_new_peer(struct peer *p) { struct torrent *tp = p->tp; tp->npeers++; p->flags |= PF_ATTACHED; - BTPDQ_REMOVE(&net_unattached, p, cm_entry); + BTPDQ_REMOVE(&net_unattached, p, p_entry); if (tp->npeers == 1) { - BTPDQ_INSERT_HEAD(&tp->peers, p, cm_entry); + BTPDQ_INSERT_HEAD(&tp->peers, p, p_entry); next_optimistic(tp, p); } else { if (random() > RAND_MAX / 3) - BTPDQ_INSERT_AFTER(&tp->peers, tp->optimistic, p, cm_entry); + BTPDQ_INSERT_AFTER(&tp->peers, tp->optimistic, p, p_entry); else - BTPDQ_INSERT_TAIL(&tp->peers, p, cm_entry); + BTPDQ_INSERT_TAIL(&tp->peers, p, p_entry); } } void -cm_on_lost_peer(struct peer *p) +dl_on_lost_peer(struct peer *p) { struct torrent *tp = p->tp; tp->npeers--; p->flags &= ~PF_ATTACHED; if (tp->npeers == 0) { - BTPDQ_REMOVE(&tp->peers, p, cm_entry); + BTPDQ_REMOVE(&tp->peers, p, p_entry); tp->optimistic = NULL; tp->choke_time = tp->opt_time = 0; } else if (tp->optimistic == p) { - struct peer *next = BTPDQ_NEXT(p, cm_entry); - BTPDQ_REMOVE(&tp->peers, p, cm_entry); + struct peer *next = BTPDQ_NEXT(p, p_entry); + BTPDQ_REMOVE(&tp->peers, p, p_entry); next_optimistic(tp, next); } else if ((p->flags & (PF_P_WANT|PF_I_CHOKE)) == PF_P_WANT) { - BTPDQ_REMOVE(&tp->peers, p, cm_entry); - cm_on_unupload(p); + BTPDQ_REMOVE(&tp->peers, p, p_entry); + dl_on_unupload(p); } else { - BTPDQ_REMOVE(&tp->peers, p, cm_entry); + BTPDQ_REMOVE(&tp->peers, p, p_entry); } for (uint32_t i = 0; i < tp->meta.npieces; i++) @@ -232,20 +232,20 @@ cm_on_lost_peer(struct peer *p) tp->piece_count[i]--; if (p->nreqs_out > 0) - cm_on_undownload(p); + dl_on_undownload(p); #if 0 struct piece *pc = BTPDQ_FIRST(&tp->getlst); while (pc != NULL) { struct piece *next = BTPDQ_NEXT(pc, entry); if (peer_has(p, pc->index) && tp->piece_count[pc->index] == 0) - cm_on_peerless_piece(pc); + dl_on_peerless_piece(pc); pc = next; } #endif } void -cm_on_block(struct peer *p, struct block_request *req, +dl_on_block(struct peer *p, struct block_request *req, uint32_t index, uint32_t begin, uint32_t length, const char *data) { struct torrent *tp = p->tp; @@ -268,18 +268,18 @@ cm_on_block(struct peer *p, struct block_request *req, pc->nreqs--; } nb_drop(cancel); - cm_piece_reorder_eg(pc); + dl_piece_reorder_eg(pc); req = BTPDQ_FIRST(&blk->reqs); while (req != NULL) { struct block_request *next = BTPDQ_NEXT(req, blk_entry); if (peer_leech_ok(req->p) && !peer_laden(req->p)) - cm_assign_requests_eg(req->p); + dl_assign_requests_eg(req->p); free(req); req = next; } BTPDQ_INIT(&blk->reqs); if (pc->ngot == pc->nblocks) - cm_on_piece(pc); + dl_on_piece(pc); } else { BTPDQ_REMOVE(&blk->reqs, req, blk_entry); free(req); @@ -288,8 +288,8 @@ cm_on_block(struct peer *p, struct block_request *req, clear_bit(pc->down_field, begin / PIECE_BLOCKLEN); pc->nbusy--; if (pc->ngot == pc->nblocks) - cm_on_piece(pc); + dl_on_piece(pc); if (peer_leech_ok(p) && !peer_laden(p)) - cm_assign_requests(p); + dl_assign_requests(p); } } diff --git a/btpd/policy_subr.c b/btpd/policy_subr.c index 5ef26fe..f0429f0 100644 --- a/btpd/policy_subr.c +++ b/btpd/policy_subr.c @@ -105,7 +105,7 @@ piece_full(struct piece *pc) } static int -cm_should_enter_endgame(struct torrent *tp) +dl_should_enter_endgame(struct torrent *tp) { int should; if (tp->have_npieces + tp->npcs_busy == tp->meta.npieces) { @@ -123,7 +123,7 @@ cm_should_enter_endgame(struct torrent *tp) } static void -cm_piece_insert_eg(struct piece *pc) +dl_piece_insert_eg(struct piece *pc) { struct piece_tq *getlst = &pc->tp->getlst; if (pc->nblocks == pc->ngot) @@ -144,14 +144,14 @@ cm_piece_insert_eg(struct piece *pc) } void -cm_piece_reorder_eg(struct piece *pc) +dl_piece_reorder_eg(struct piece *pc) { BTPDQ_REMOVE(&pc->tp->getlst, pc, entry); - cm_piece_insert_eg(pc); + dl_piece_insert_eg(pc); } static void -cm_enter_endgame(struct torrent *tp) +dl_enter_endgame(struct torrent *tp) { struct peer *p; struct piece *pc; @@ -172,21 +172,21 @@ cm_enter_endgame(struct torrent *tp) BTPDQ_INIT(&tp->getlst); while (pi > 0) { pi--; - cm_piece_insert_eg(pcs[pi]); + dl_piece_insert_eg(pcs[pi]); } - BTPDQ_FOREACH(p, &tp->peers, cm_entry) { + BTPDQ_FOREACH(p, &tp->peers, p_entry) { assert(p->nwant == 0); BTPDQ_FOREACH(pc, &tp->getlst, entry) { if (peer_has(p, pc->index)) peer_want(p, pc->index); } if (p->nwant > 0 && peer_leech_ok(p) && !peer_laden(p)) - cm_assign_requests_eg(p); + dl_assign_requests_eg(p); } } struct piece * -cm_find_piece(struct torrent *tp, uint32_t index) +dl_find_piece(struct torrent *tp, uint32_t index) { struct piece *pc; BTPDQ_FOREACH(pc, &tp->getlst, entry) @@ -253,19 +253,19 @@ torrent_test_piece(struct piece *pc) bts_close_ro(bts); if (test_hash(tp, hash, pc->index) == 0) - cm_on_ok_piece(pc); + dl_on_ok_piece(pc); else - cm_on_bad_piece(pc); + dl_on_bad_piece(pc); } void -cm_on_piece(struct piece *pc) +dl_on_piece(struct piece *pc) { torrent_test_piece(pc); } static int -cm_piece_startable(struct peer *p, uint32_t index) +dl_piece_startable(struct peer *p, uint32_t index) { return peer_has(p, index) && !has_bit(p->tp->piece_field, index) && !has_bit(p->tp->busy_field, index); @@ -279,14 +279,14 @@ cm_piece_startable(struct peer *p, uint32_t index) * Return 0 or ENOENT, index in res. */ static int -cm_choose_rarest(struct peer *p, uint32_t *res) +dl_choose_rarest(struct peer *p, uint32_t *res) { uint32_t i; struct torrent *tp = p->tp; assert(tp->endgame == 0); - for (i = 0; i < tp->meta.npieces && !cm_piece_startable(p, i); i++) + for (i = 0; i < tp->meta.npieces && !dl_piece_startable(p, i); i++) ; if (i == tp->meta.npieces) @@ -295,7 +295,7 @@ cm_choose_rarest(struct peer *p, uint32_t *res) uint32_t min_i = i; uint32_t min_c = 1; for(i++; i < tp->meta.npieces; i++) { - if (cm_piece_startable(p, i)) { + if (dl_piece_startable(p, i)) { if (tp->piece_count[i] == tp->piece_count[min_i]) min_c++; else if (tp->piece_count[i] < tp->piece_count[min_i]) { @@ -307,7 +307,7 @@ cm_choose_rarest(struct peer *p, uint32_t *res) if (min_c > 1) { min_c = 1 + rint((double)random() * (min_c - 1) / RAND_MAX); for (i = min_i; min_c > 0; i++) { - if (cm_piece_startable(p, i) + if (dl_piece_startable(p, i) && tp->piece_count[i] == tp->piece_count[min_i]) { min_c--; min_i = i; @@ -319,21 +319,21 @@ cm_choose_rarest(struct peer *p, uint32_t *res) } /* - * Called from either cm_piece_assign_requests or cm_new_piece, + * Called from either dl_piece_assign_requests or dl_new_piece, * when a pice becomes full. The wanted level of the peers * that has this piece will be decreased. This function is * the only one that may trigger end game. */ static void -cm_on_piece_full(struct piece *pc) +dl_on_piece_full(struct piece *pc) { struct peer *p; - BTPDQ_FOREACH(p, &pc->tp->peers, cm_entry) { + BTPDQ_FOREACH(p, &pc->tp->peers, p_entry) { if (peer_has(p, pc->index)) peer_unwant(p, pc->index); } - if (cm_should_enter_endgame(pc->tp)) - cm_enter_endgame(pc->tp); + if (dl_should_enter_endgame(pc->tp)) + dl_enter_endgame(pc->tp); } /* @@ -346,15 +346,15 @@ cm_on_piece_full(struct piece *pc) * Return the piece or NULL. */ struct piece * -cm_new_piece(struct torrent *tp, uint32_t index) +dl_new_piece(struct torrent *tp, uint32_t index) { btpd_log(BTPD_L_POL, "Started on piece %u.\n", index); struct piece *pc = piece_alloc(tp, index); if (pc->ngot == pc->nblocks) { - cm_on_piece_full(pc); - cm_on_piece(pc); - if (cm_should_enter_endgame(tp)) - cm_enter_endgame(tp); + dl_on_piece_full(pc); + dl_on_piece(pc); + if (dl_should_enter_endgame(tp)) + dl_enter_endgame(tp); return NULL; } else return pc; @@ -368,19 +368,19 @@ cm_new_piece(struct torrent *tp, uint32_t index) * try to assign requests for this piece. */ void -cm_on_piece_unfull(struct piece *pc) +dl_on_piece_unfull(struct piece *pc) { struct torrent *tp = pc->tp; struct peer *p; assert(!piece_full(pc) && tp->endgame == 0); - BTPDQ_FOREACH(p, &tp->peers, cm_entry) + BTPDQ_FOREACH(p, &tp->peers, p_entry) if (peer_has(p, pc->index)) peer_want(p, pc->index); p = BTPDQ_FIRST(&tp->peers); while (p != NULL && !piece_full(pc)) { if (peer_leech_ok(p) && !peer_laden(p)) - cm_piece_assign_requests(pc, p); // Cannot provoke end game here. - p = BTPDQ_NEXT(p, cm_entry); + dl_piece_assign_requests(pc, p); // Cannot provoke end game here. + p = BTPDQ_NEXT(p, p_entry); } } @@ -390,12 +390,12 @@ cm_on_piece_unfull(struct piece *pc) /* * Request as many blocks as possible on this piece from - * the peer. If the piece becomes full we call cm_on_piece_full. + * the peer. If the piece becomes full we call dl_on_piece_full. * * Return the number of requests sent. */ unsigned -cm_piece_assign_requests(struct piece *pc, struct peer *p) +dl_piece_assign_requests(struct piece *pc, struct peer *p) { assert(!piece_full(pc) && !peer_laden(p)); unsigned count = 0; @@ -420,7 +420,7 @@ cm_piece_assign_requests(struct piece *pc, struct peer *p) } while (!piece_full(pc) && !peer_laden(p)); if (piece_full(pc)) - cm_on_piece_full(pc); + dl_on_piece_full(pc); return count; } @@ -429,7 +429,7 @@ cm_piece_assign_requests(struct piece *pc, struct peer *p) * Request as many blocks as possible from the peer. Puts * requests on already active pieces before starting on new * ones. Care must be taken since end game mode may be triggered - * by the calls to cm_piece_assign_requests. + * by the calls to dl_piece_assign_requests. * * Returns number of requests sent. * @@ -437,7 +437,7 @@ cm_piece_assign_requests(struct piece *pc, struct peer *p) * already started piece to put requests on. */ unsigned -cm_assign_requests(struct peer *p) +dl_assign_requests(struct peer *p) { assert(!p->tp->endgame && !peer_laden(p)); struct piece *pc; @@ -446,7 +446,7 @@ cm_assign_requests(struct peer *p) BTPDQ_FOREACH(pc, &tp->getlst, entry) { if (piece_full(pc) || !peer_has(p, pc->index)) continue; - count += cm_piece_assign_requests(pc, p); + count += dl_piece_assign_requests(pc, p); if (tp->endgame) break; if (!piece_full(pc)) @@ -456,10 +456,10 @@ cm_assign_requests(struct peer *p) } while (!peer_laden(p) && !tp->endgame) { uint32_t index; - if (cm_choose_rarest(p, &index) == 0) { - pc = cm_new_piece(tp, index); + if (dl_choose_rarest(p, &index) == 0) { + pc = dl_new_piece(tp, index); if (pc != NULL) - count += cm_piece_assign_requests(pc, p); + count += dl_piece_assign_requests(pc, p); } else break; } @@ -467,7 +467,7 @@ cm_assign_requests(struct peer *p) } void -cm_unassign_requests(struct peer *p) +dl_unassign_requests(struct peer *p) { while (p->nreqs_out > 0) { struct block_request *req = BTPDQ_FIRST(&p->my_reqs); @@ -495,13 +495,13 @@ cm_unassign_requests(struct peer *p) } if (was_full && !piece_full(pc)) - cm_on_piece_unfull(pc); + dl_on_piece_unfull(pc); } assert(BTPDQ_EMPTY(&p->my_reqs)); } static void -cm_piece_assign_requests_eg(struct piece *pc, struct peer *p) +dl_piece_assign_requests_eg(struct piece *pc, struct peer *p) { unsigned first_block = pc->next_block; do { @@ -521,7 +521,7 @@ cm_piece_assign_requests_eg(struct piece *pc, struct peer *p) } void -cm_assign_requests_eg(struct peer *p) +dl_assign_requests_eg(struct peer *p) { assert(!peer_laden(p)); struct torrent *tp = p->tp; @@ -532,7 +532,7 @@ cm_assign_requests_eg(struct peer *p) while (!peer_laden(p) && pc != NULL) { struct piece *next = BTPDQ_NEXT(pc, entry); if (peer_has(p, pc->index) && pc->nblocks != pc->ngot) { - cm_piece_assign_requests_eg(pc, p); + dl_piece_assign_requests_eg(pc, p); BTPDQ_REMOVE(&tp->getlst, pc, entry); BTPDQ_INSERT_HEAD(&tmp, pc, entry); } @@ -542,13 +542,13 @@ cm_assign_requests_eg(struct peer *p) pc = BTPDQ_FIRST(&tmp); while (pc != NULL) { struct piece *next = BTPDQ_NEXT(pc, entry); - cm_piece_insert_eg(pc); + dl_piece_insert_eg(pc); pc = next; } } void -cm_unassign_requests_eg(struct peer *p) +dl_unassign_requests_eg(struct peer *p) { struct block_request *req; struct piece *pc; @@ -580,7 +580,7 @@ cm_unassign_requests_eg(struct peer *p) pc = BTPDQ_FIRST(&tmp); while (pc != NULL) { struct piece *next = BTPDQ_NEXT(pc, entry); - cm_piece_insert_eg(pc); + dl_piece_insert_eg(pc); pc = next; } } diff --git a/btpd/torrent.c b/btpd/torrent.c index 0737f6b..f206e05 100644 --- a/btpd/torrent.c +++ b/btpd/torrent.c @@ -158,16 +158,16 @@ torrent_unload(struct torrent *tp) peer = BTPDQ_FIRST(&tp->peers); while (peer != NULL) { - struct peer *next = BTPDQ_NEXT(peer, cm_entry); - BTPDQ_REMOVE(&tp->peers, peer, cm_entry); - BTPDQ_INSERT_TAIL(&net_unattached, peer, cm_entry); + struct peer *next = BTPDQ_NEXT(peer, p_entry); + BTPDQ_REMOVE(&tp->peers, peer, p_entry); + BTPDQ_INSERT_TAIL(&net_unattached, peer, p_entry); peer->flags &= ~PF_ATTACHED; peer = next; } peer = BTPDQ_FIRST(&net_unattached); while (peer != NULL) { - struct peer *next = BTPDQ_NEXT(peer, cm_entry); + struct peer *next = BTPDQ_NEXT(peer, p_entry); if (peer->tp == tp) peer_kill(peer); peer = next; @@ -237,7 +237,7 @@ torrent_has_peer(struct torrent *tp, const uint8_t *id) has = 1; break; } - p = BTPDQ_NEXT(p, cm_entry); + p = BTPDQ_NEXT(p, p_entry); } return has; }