and net_del_torrent does the opposite (surprise!). * Some code shuffle has been done to separate net_ and dl_ from torrent_ but there's still much to be done. * Removed a couple of dead vars from struct torrent.master
@@ -4,6 +4,30 @@ | |||
#include "btpd.h" | |||
#include "tracker_req.h" | |||
void | |||
dl_start(struct torrent *tp) | |||
{ | |||
} | |||
void | |||
dl_stop(struct torrent *tp) | |||
{ | |||
struct peer *peer; | |||
struct piece *piece; | |||
peer = BTPDQ_FIRST(&tp->peers); | |||
while (peer != NULL) { | |||
struct peer *next = BTPDQ_NEXT(peer, p_entry); | |||
BTPDQ_REMOVE(&tp->peers, peer, p_entry); | |||
BTPDQ_INSERT_TAIL(&net_unattached, peer, p_entry); | |||
peer->flags &= ~PF_ATTACHED; | |||
peer = next; | |||
} | |||
while ((piece = BTPDQ_FIRST(&tp->getlst)) != NULL) | |||
piece_free(piece); | |||
} | |||
/* | |||
* Called when a peer announces it's got a new piece. | |||
* | |||
@@ -146,7 +170,6 @@ dl_on_new_peer(struct peer *p) | |||
p->flags |= PF_ATTACHED; | |||
BTPDQ_REMOVE(&net_unattached, p, p_entry); | |||
BTPDQ_INSERT_HEAD(&tp->peers, p, p_entry); | |||
ul_on_new_peer(p); | |||
} | |||
void | |||
@@ -158,8 +181,6 @@ dl_on_lost_peer(struct peer *p) | |||
tp->npeers--; | |||
p->flags &= ~PF_ATTACHED; | |||
ul_on_lost_peer(p); | |||
for (uint32_t i = 0; i < tp->meta.npieces; i++) | |||
if (peer_has(p, i)) | |||
tp->piece_count[i]--; | |||
@@ -20,6 +20,9 @@ void dl_piece_reorder_eg(struct piece *pc); | |||
// download.c | |||
void dl_start(struct torrent *tp); | |||
void dl_stop(struct torrent *tp); | |||
void dl_on_new_peer(struct peer *p); | |||
void dl_on_lost_peer(struct peer *p); | |||
@@ -26,12 +26,42 @@ static unsigned long m_bw_bytes_out; | |||
static struct event m_net_incoming; | |||
static unsigned m_ntorrents; | |||
static struct torrent_tq m_torrents = BTPDQ_HEAD_INITIALIZER(m_torrents); | |||
unsigned net_npeers; | |||
struct peer_tq net_bw_readq = BTPDQ_HEAD_INITIALIZER(net_bw_readq); | |||
struct peer_tq net_bw_writeq = BTPDQ_HEAD_INITIALIZER(net_bw_writeq); | |||
struct peer_tq net_unattached = BTPDQ_HEAD_INITIALIZER(net_unattached); | |||
void | |||
net_add_torrent(struct torrent *tp) | |||
{ | |||
BTPDQ_INSERT_HEAD(&m_torrents, tp, net_entry); | |||
m_ntorrents++; | |||
dl_start(tp); | |||
} | |||
void | |||
net_del_torrent(struct torrent *tp) | |||
{ | |||
assert(m_ntorrents > 0); | |||
m_ntorrents--; | |||
BTPDQ_REMOVE(&m_torrents, tp, net_entry); | |||
ul_on_lost_torrent(tp); | |||
dl_stop(tp); | |||
struct peer *p = BTPDQ_FIRST(&net_unattached); | |||
while (p != NULL) { | |||
struct peer *next = BTPDQ_NEXT(p, p_entry); | |||
if (p->tp == tp) | |||
peer_kill(p); | |||
p = next; | |||
} | |||
} | |||
void | |||
net_write32(void *buf, uint32_t num) | |||
{ | |||
@@ -236,7 +266,10 @@ net_state(struct peer *p, const char *buf) | |||
break; | |||
case SHAKE_INFO: | |||
if (p->flags & PF_INCOMING) { | |||
struct torrent *tp = btpd_get_torrent(buf); | |||
struct torrent *tp; | |||
BTPDQ_FOREACH(tp, &m_torrents, net_entry) | |||
if (bcmp(buf, tp->meta.info_hash, 20) == 0) | |||
break; | |||
if (tp == NULL) | |||
goto bad; | |||
p->tp = tp; | |||
@@ -449,7 +482,7 @@ compute_rate_sub(long rate) | |||
static void | |||
compute_peer_rates(void) { | |||
struct torrent *tp; | |||
BTPDQ_FOREACH(tp, btpd_get_torrents(), entry) { | |||
BTPDQ_FOREACH(tp, &m_torrents, net_entry) { | |||
struct peer *p; | |||
BTPDQ_FOREACH(p, &tp->peers, p_entry) { | |||
if (p->count_up > 0 || peer_active_up(p)) { | |||
@@ -31,7 +31,9 @@ enum net_state { | |||
void net_set_state(struct peer *p, enum net_state state, size_t size); | |||
void net_init(void); | |||
void net_bw_cb(int sd, short type, void *arg); | |||
void net_add_torrent(struct torrent *tp); | |||
void net_del_torrent(struct torrent *tp); | |||
void net_read_cb(int sd, short type, void *arg); | |||
void net_write_cb(int sd, short type, void *arg); | |||
@@ -16,9 +16,10 @@ peer_kill(struct peer *p) | |||
btpd_log(BTPD_L_CONN, "killed peer %p\n", p); | |||
if (p->flags & PF_ATTACHED) | |||
if (p->flags & PF_ATTACHED) { | |||
ul_on_lost_peer(p); | |||
dl_on_lost_peer(p); | |||
else | |||
} else | |||
BTPDQ_REMOVE(&net_unattached, p, p_entry); | |||
if (p->flags & PF_ON_READQ) | |||
BTPDQ_REMOVE(&net_bw_readq, p, rq_entry); | |||
@@ -339,6 +340,7 @@ peer_on_shake(struct peer *p) | |||
peer_send(p, nb_create_bitdata(p->tp)); | |||
} | |||
} | |||
ul_on_new_peer(p); | |||
dl_on_new_peer(p); | |||
} | |||
@@ -63,6 +63,7 @@ torrent_load3(const char *file, struct metainfo *mi, char *mem, size_t memsiz) | |||
free(mi); | |||
btpd_add_torrent(tp); | |||
net_add_torrent(tp); | |||
tracker_req(tp, TR_STARTED); | |||
@@ -149,32 +150,11 @@ torrent_load(const char *name) | |||
void | |||
torrent_unload(struct torrent *tp) | |||
{ | |||
struct peer *peer; | |||
struct piece *piece; | |||
btpd_log(BTPD_L_BTPD, "Unloading %s.\n", tp->relpath); | |||
tracker_req(tp, TR_STOPPED); | |||
peer = BTPDQ_FIRST(&tp->peers); | |||
while (peer != NULL) { | |||
struct peer *next = BTPDQ_NEXT(peer, p_entry); | |||
BTPDQ_REMOVE(&tp->peers, peer, p_entry); | |||
BTPDQ_INSERT_TAIL(&net_unattached, peer, p_entry); | |||
peer->flags &= ~PF_ATTACHED; | |||
peer = next; | |||
} | |||
net_del_torrent(tp); | |||
peer = BTPDQ_FIRST(&net_unattached); | |||
while (peer != NULL) { | |||
struct peer *next = BTPDQ_NEXT(peer, p_entry); | |||
if (peer->tp == tp) | |||
peer_kill(peer); | |||
peer = next; | |||
} | |||
while ((piece = BTPDQ_FIRST(&tp->getlst)) != NULL) | |||
piece_free(piece); | |||
tracker_req(tp, TR_STOPPED); | |||
free(tp->piece_count); | |||
free(tp->busy_field); | |||
@@ -36,6 +36,7 @@ struct torrent { | |||
struct metainfo meta; | |||
BTPDQ_ENTRY(torrent) entry; | |||
BTPDQ_ENTRY(torrent) net_entry; | |||
void *imem; | |||
size_t isiz; | |||
@@ -52,9 +53,6 @@ struct torrent { | |||
uint64_t uploaded, downloaded; | |||
short ndown; | |||
struct peer *optimistic; | |||
unsigned npeers; | |||
struct peer_tq peers; | |||