which may happen if some other header includes sys/queue.h.master
@@ -111,13 +111,13 @@ btpd_init(void) | |||
btpd.logmask = BTPD_L_BTPD | BTPD_L_ERROR; | |||
TAILQ_INIT(&btpd.kids); | |||
BTPDQ_INIT(&btpd.kids); | |||
btpd.ntorrents = 0; | |||
TAILQ_INIT(&btpd.cm_list); | |||
BTPDQ_INIT(&btpd.cm_list); | |||
TAILQ_INIT(&btpd.readq); | |||
TAILQ_INIT(&btpd.writeq); | |||
BTPDQ_INIT(&btpd.readq); | |||
BTPDQ_INIT(&btpd.writeq); | |||
btpd.port = 6881; | |||
@@ -145,9 +145,9 @@ btpd_shutdown(void) | |||
{ | |||
struct torrent *tp; | |||
tp = TAILQ_FIRST(&btpd.cm_list); | |||
tp = BTPDQ_FIRST(&btpd.cm_list); | |||
while (tp != NULL) { | |||
struct torrent *next = TAILQ_NEXT(tp, entry); | |||
struct torrent *next = BTPDQ_NEXT(tp, entry); | |||
torrent_unload(tp); | |||
tp = next; | |||
} | |||
@@ -170,11 +170,11 @@ child_cb(int signal, short type, void *arg) | |||
while ((pid = waitpid(-1, &status, WNOHANG)) > 0) { | |||
if (WIFEXITED(status) || WIFSIGNALED(status)) { | |||
struct child *kid = TAILQ_FIRST(&btpd.kids); | |||
struct child *kid = BTPDQ_FIRST(&btpd.kids); | |||
while (kid != NULL && kid->pid != pid) | |||
kid = TAILQ_NEXT(kid, entry); | |||
kid = BTPDQ_NEXT(kid, entry); | |||
assert(kid != NULL); | |||
TAILQ_REMOVE(&btpd.kids, kid, entry); | |||
BTPDQ_REMOVE(&btpd.kids, kid, entry); | |||
kid->child_done(kid); | |||
} | |||
} | |||
@@ -189,7 +189,7 @@ heartbeat_cb(int sd, short type, void *arg) | |||
btpd.seconds++; | |||
TAILQ_FOREACH(tp, &btpd.cm_list, entry) | |||
BTPDQ_FOREACH(tp, &btpd.cm_list, entry) | |||
cm_by_second(tp); | |||
net_by_second(); | |||
@@ -28,10 +28,10 @@ struct child { | |||
pid_t pid; | |||
void *data; | |||
void (*child_done)(struct child *child); | |||
TAILQ_ENTRY(child) entry; | |||
BTPDQ_ENTRY(child) entry; | |||
}; | |||
TAILQ_HEAD(child_tq, child); | |||
BTPDQ_HEAD(child_tq, child); | |||
struct btpd { | |||
uint8_t peer_id[20]; | |||
@@ -36,7 +36,7 @@ cmd_stat(int argc, const char *args, FILE *fp) | |||
errdie(buf_print(&iob, "9:ntorrentsi%ue", btpd.ntorrents)); | |||
errdie(buf_print(&iob, "7:secondsi%lue", btpd.seconds)); | |||
errdie(buf_swrite(&iob, "8:torrentsl")); | |||
TAILQ_FOREACH(tp, &btpd.cm_list, entry) { | |||
BTPDQ_FOREACH(tp, &btpd.cm_list, entry) { | |||
uint32_t seen_npieces = 0; | |||
for (uint32_t i = 0; i < tp->meta.npieces; i++) | |||
if (tp->piece_count[i] > 0) | |||
@@ -33,7 +33,7 @@ net_read_cb(int sd, short type, void *arg) | |||
btpd.ibw_left -= p->reader->read(p, btpd.ibw_left); | |||
} else { | |||
p->flags |= PF_ON_READQ; | |||
TAILQ_INSERT_TAIL(&btpd.readq, p, rq_entry); | |||
BTPDQ_INSERT_TAIL(&btpd.readq, p, rq_entry); | |||
} | |||
} | |||
@@ -47,7 +47,7 @@ net_write_cb(int sd, short type, void *arg) | |||
btpd.obw_left -= net_write(p, btpd.obw_left); | |||
} else { | |||
p->flags |= PF_ON_WRITEQ; | |||
TAILQ_INSERT_TAIL(&btpd.writeq, p, wq_entry); | |||
BTPDQ_INSERT_TAIL(&btpd.writeq, p, wq_entry); | |||
} | |||
} | |||
@@ -92,21 +92,21 @@ net_unsend_piece(struct peer *p, struct piece_req *req) | |||
{ | |||
struct iob_link *piece; | |||
TAILQ_REMOVE(&p->p_reqs, req, entry); | |||
BTPDQ_REMOVE(&p->p_reqs, req, entry); | |||
piece = TAILQ_NEXT(req->head, entry); | |||
TAILQ_REMOVE(&p->outq, piece, entry); | |||
piece = BTPDQ_NEXT(req->head, entry); | |||
BTPDQ_REMOVE(&p->outq, piece, entry); | |||
piece->kill_buf(&piece->iob); | |||
free(piece); | |||
TAILQ_REMOVE(&p->outq, req->head, entry); | |||
BTPDQ_REMOVE(&p->outq, req->head, entry); | |||
req->head->kill_buf(&req->head->iob); | |||
free(req->head); | |||
free(req); | |||
if (TAILQ_EMPTY(&p->outq)) { | |||
if (BTPDQ_EMPTY(&p->outq)) { | |||
if (p->flags & PF_ON_WRITEQ) { | |||
TAILQ_REMOVE(&btpd.writeq, p, wq_entry); | |||
BTPDQ_REMOVE(&btpd.writeq, p, wq_entry); | |||
p->flags &= ~PF_ON_WRITEQ; | |||
} else | |||
event_del(&p->out_ev); | |||
@@ -135,7 +135,7 @@ net_write(struct peer *p, unsigned long wmax) | |||
limited = wmax > 0; | |||
niov = 0; | |||
assert((iol = TAILQ_FIRST(&p->outq)) != NULL); | |||
assert((iol = BTPDQ_FIRST(&p->outq)) != NULL); | |||
while (niov < NIOV && iol != NULL | |||
&& (!limited || (limited && wmax > 0))) { | |||
iov[niov].iov_base = iol->iob.buf + iol->iob.buf_off; | |||
@@ -146,7 +146,7 @@ net_write(struct peer *p, unsigned long wmax) | |||
wmax -= iov[niov].iov_len; | |||
} | |||
niov++; | |||
iol = TAILQ_NEXT(iol, entry); | |||
iol = BTPDQ_NEXT(iol, entry); | |||
} | |||
again: | |||
@@ -167,29 +167,29 @@ again: | |||
bcount = nwritten; | |||
p->rate_from_me[btpd.seconds % RATEHISTORY] += nwritten; | |||
req = TAILQ_FIRST(&p->p_reqs); | |||
iol = TAILQ_FIRST(&p->outq); | |||
req = BTPDQ_FIRST(&p->p_reqs); | |||
iol = BTPDQ_FIRST(&p->outq); | |||
while (bcount > 0) { | |||
if (req != NULL && req->head == iol) { | |||
struct iob_link *piece = TAILQ_NEXT(req->head, entry); | |||
struct piece_req *next = TAILQ_NEXT(req, entry); | |||
TAILQ_REMOVE(&p->p_reqs, req, entry); | |||
struct iob_link *piece = BTPDQ_NEXT(req->head, entry); | |||
struct piece_req *next = BTPDQ_NEXT(req, entry); | |||
BTPDQ_REMOVE(&p->p_reqs, req, entry); | |||
free(req); | |||
req = next; | |||
p->tp->uploaded += piece->iob.buf_len; | |||
} | |||
if (bcount >= iol->iob.buf_len - iol->iob.buf_off) { | |||
bcount -= iol->iob.buf_len - iol->iob.buf_off; | |||
TAILQ_REMOVE(&p->outq, iol, entry); | |||
BTPDQ_REMOVE(&p->outq, iol, entry); | |||
iol->kill_buf(&iol->iob); | |||
free(iol); | |||
iol = TAILQ_FIRST(&p->outq); | |||
iol = BTPDQ_FIRST(&p->outq); | |||
} else { | |||
iol->iob.buf_off += bcount; | |||
bcount = 0; | |||
} | |||
} | |||
if (!TAILQ_EMPTY(&p->outq)) | |||
if (!BTPDQ_EMPTY(&p->outq)) | |||
event_add(&p->out_ev, NULL); | |||
else if (p->flags & PF_WRITE_CLOSE) { | |||
btpd_log(BTPD_L_CONN, "Closed because of write flag.\n"); | |||
@@ -202,9 +202,9 @@ again: | |||
void | |||
net_send(struct peer *p, struct iob_link *iol) | |||
{ | |||
if (TAILQ_EMPTY(&p->outq)) | |||
if (BTPDQ_EMPTY(&p->outq)) | |||
event_add(&p->out_ev, NULL); | |||
TAILQ_INSERT_TAIL(&p->outq, iol, entry); | |||
BTPDQ_INSERT_TAIL(&p->outq, iol, entry); | |||
} | |||
void | |||
@@ -243,7 +243,7 @@ net_send_piece(struct peer *p, uint32_t index, uint32_t begin, | |||
req->begin = begin; | |||
req->length = blen; | |||
req->head = head; | |||
TAILQ_INSERT_TAIL(&p->p_reqs, req, entry); | |||
BTPDQ_INSERT_TAIL(&p->p_reqs, req, entry); | |||
} | |||
void | |||
@@ -368,7 +368,7 @@ net_read(struct peer *p, char *buf, size_t len) | |||
} | |||
} else if (nread == 0) { | |||
btpd_log(BTPD_L_CONN, "conn closed by other side.\n"); | |||
if (!TAILQ_EMPTY(&p->outq)) | |||
if (!BTPDQ_EMPTY(&p->outq)) | |||
p->flags |= PF_WRITE_CLOSE; | |||
else | |||
peer_kill(p); | |||
@@ -437,7 +437,7 @@ read_piece(struct peer *p, unsigned long rmax) | |||
p->rate_to_me[btpd.seconds % RATEHISTORY] += nread; | |||
p->tp->downloaded += nread; | |||
if (rd->iob.buf_off == rd->iob.buf_len) { | |||
struct piece_req *req = TAILQ_FIRST(&p->my_reqs); | |||
struct piece_req *req = BTPDQ_FIRST(&p->my_reqs); | |||
if (req != NULL && | |||
req->index == rd->index && | |||
req->begin == rd->begin && | |||
@@ -619,7 +619,7 @@ net_generic_read(struct peer *p, unsigned long rmax) | |||
uint32_t begin = net_read32(buf + off + 9); | |||
uint32_t length = msg_len - 9; | |||
#if 0 | |||
struct piece_req *req = TAILQ_FIRST(&p->my_reqs); | |||
struct piece_req *req = BTPDQ_FIRST(&p->my_reqs); | |||
if (req == NULL) | |||
goto bad_data; | |||
if (!(index == req->index && | |||
@@ -631,7 +631,7 @@ net_generic_read(struct peer *p, unsigned long rmax) | |||
off_t cbegin = index * p->tp->meta.piece_length + begin; | |||
p->tp->downloaded += length; | |||
p->rate_to_me[btpd.seconds % RATEHISTORY] += length; | |||
struct piece_req *req = TAILQ_FIRST(&p->my_reqs); | |||
struct piece_req *req = BTPDQ_FIRST(&p->my_reqs); | |||
if (req != NULL && | |||
req->index == index && | |||
req->begin == begin && | |||
@@ -677,7 +677,7 @@ net_generic_read(struct peer *p, unsigned long rmax) | |||
btpd_log(BTPD_L_MSG, "cancel: %u, %u, %u\n", | |||
index, begin, length); | |||
req = TAILQ_FIRST(&p->p_reqs); | |||
req = BTPDQ_FIRST(&p->p_reqs); | |||
while (req != NULL) { | |||
if (req->index == index && | |||
req->begin == begin && | |||
@@ -686,7 +686,7 @@ net_generic_read(struct peer *p, unsigned long rmax) | |||
net_unsend_piece(p, req); | |||
break; | |||
} | |||
req = TAILQ_NEXT(req, entry); | |||
req = BTPDQ_NEXT(req, entry); | |||
} | |||
} else | |||
got_part = 1; | |||
@@ -771,12 +771,12 @@ net_shake_read(struct peer *p, unsigned long rmax) | |||
else if (hs->incoming) { | |||
struct torrent *tp = torrent_get_by_hash(in->buf + 28); | |||
#if 0 | |||
tp = TAILQ_FIRST(&btpd.cm_list); | |||
tp = BTPDQ_FIRST(&btpd.cm_list); | |||
while (tp != NULL) { | |||
if (bcmp(in->buf + 28, tp->meta.info_hash, 20) == 0) | |||
break; | |||
else | |||
tp = TAILQ_NEXT(tp, entry); | |||
tp = BTPDQ_NEXT(tp, entry); | |||
} | |||
#endif | |||
if (tp != NULL) { | |||
@@ -920,8 +920,8 @@ net_by_second(void) | |||
struct torrent *tp; | |||
int ri = btpd.seconds % RATEHISTORY; | |||
TAILQ_FOREACH(tp, &btpd.cm_list, entry) { | |||
TAILQ_FOREACH(p, &tp->peers, cm_entry) { | |||
BTPDQ_FOREACH(tp, &btpd.cm_list, entry) { | |||
BTPDQ_FOREACH(p, &tp->peers, cm_entry) { | |||
p->rate_to_me[ri] = 0; | |||
p->rate_from_me[ri] = 0; | |||
} | |||
@@ -931,28 +931,28 @@ net_by_second(void) | |||
btpd.ibw_left = btpd.ibwlim; | |||
if (btpd.ibwlim > 0) { | |||
while ((p = TAILQ_FIRST(&btpd.readq)) != NULL && btpd.ibw_left > 0) { | |||
TAILQ_REMOVE(&btpd.readq, p, rq_entry); | |||
while ((p = BTPDQ_FIRST(&btpd.readq)) != NULL && btpd.ibw_left > 0) { | |||
BTPDQ_REMOVE(&btpd.readq, p, rq_entry); | |||
p->flags &= ~PF_ON_READQ; | |||
btpd.ibw_left -= p->reader->read(p, btpd.ibw_left); | |||
} | |||
} else { | |||
while ((p = TAILQ_FIRST(&btpd.readq)) != NULL) { | |||
TAILQ_REMOVE(&btpd.readq, p, rq_entry); | |||
while ((p = BTPDQ_FIRST(&btpd.readq)) != NULL) { | |||
BTPDQ_REMOVE(&btpd.readq, p, rq_entry); | |||
p->flags &= ~PF_ON_READQ; | |||
p->reader->read(p, 0); | |||
} | |||
} | |||
if (btpd.obwlim) { | |||
while ((p = TAILQ_FIRST(&btpd.writeq)) != NULL && btpd.obw_left > 0) { | |||
TAILQ_REMOVE(&btpd.writeq, p, wq_entry); | |||
while ((p = BTPDQ_FIRST(&btpd.writeq)) != NULL && btpd.obw_left > 0) { | |||
BTPDQ_REMOVE(&btpd.writeq, p, wq_entry); | |||
p->flags &= ~PF_ON_WRITEQ; | |||
btpd.obw_left -= net_write(p, btpd.obw_left); | |||
} | |||
} else { | |||
while ((p = TAILQ_FIRST(&btpd.writeq)) != NULL) { | |||
TAILQ_REMOVE(&btpd.writeq, p, wq_entry); | |||
while ((p = BTPDQ_FIRST(&btpd.writeq)) != NULL) { | |||
BTPDQ_REMOVE(&btpd.writeq, p, wq_entry); | |||
p->flags &= ~PF_ON_WRITEQ; | |||
net_write(p, 0); | |||
} | |||
@@ -12,12 +12,12 @@ | |||
#define MSG_CANCEL 8 | |||
struct iob_link { | |||
TAILQ_ENTRY(iob_link) entry; | |||
BTPDQ_ENTRY(iob_link) entry; | |||
void (*kill_buf)(struct io_buffer *); | |||
struct io_buffer iob; | |||
}; | |||
TAILQ_HEAD(io_tq, iob_link); | |||
BTPDQ_HEAD(io_tq, iob_link); | |||
struct peer; | |||
@@ -67,10 +67,10 @@ struct generic_reader { | |||
struct piece_req { | |||
uint32_t index, begin, length; | |||
struct iob_link *head; /* Pointer to outgoing piece. */ | |||
TAILQ_ENTRY(piece_req) entry; | |||
BTPDQ_ENTRY(piece_req) entry; | |||
}; | |||
TAILQ_HEAD(piece_req_tq, piece_req); | |||
BTPDQ_HEAD(piece_req_tq, piece_req); | |||
void net_connection_cb(int sd, short type, void *arg); | |||
void net_by_second(void); | |||
@@ -24,30 +24,30 @@ peer_kill(struct peer *p) | |||
if (p->flags & PF_ATTACHED) | |||
cm_on_lost_peer(p); | |||
if (p->flags & PF_ON_READQ) | |||
TAILQ_REMOVE(&btpd.readq, p, rq_entry); | |||
BTPDQ_REMOVE(&btpd.readq, p, rq_entry); | |||
if (p->flags & PF_ON_WRITEQ) | |||
TAILQ_REMOVE(&btpd.writeq, p, wq_entry); | |||
BTPDQ_REMOVE(&btpd.writeq, p, wq_entry); | |||
close(p->sd); | |||
event_del(&p->in_ev); | |||
event_del(&p->out_ev); | |||
iol = TAILQ_FIRST(&p->outq); | |||
iol = BTPDQ_FIRST(&p->outq); | |||
while (iol != NULL) { | |||
struct iob_link *next = TAILQ_NEXT(iol, entry); | |||
struct iob_link *next = BTPDQ_NEXT(iol, entry); | |||
iol->kill_buf(&iol->iob); | |||
free(iol); | |||
iol = next; | |||
} | |||
req = TAILQ_FIRST(&p->p_reqs); | |||
req = BTPDQ_FIRST(&p->p_reqs); | |||
while (req != NULL) { | |||
struct piece_req *next = TAILQ_NEXT(req, entry); | |||
struct piece_req *next = BTPDQ_NEXT(req, entry); | |||
free(req); | |||
req = next; | |||
} | |||
req = TAILQ_FIRST(&p->my_reqs); | |||
req = BTPDQ_FIRST(&p->my_reqs); | |||
while (req != NULL) { | |||
struct piece_req *next = TAILQ_NEXT(req, entry); | |||
struct piece_req *next = BTPDQ_NEXT(req, entry); | |||
free(req); | |||
req = next; | |||
} | |||
@@ -66,7 +66,7 @@ peer_request(struct peer *p, uint32_t index, uint32_t begin, uint32_t len) | |||
req->index = index; | |||
req->begin = begin; | |||
req->length = len; | |||
TAILQ_INSERT_TAIL(&p->my_reqs, req, entry); | |||
BTPDQ_INSERT_TAIL(&p->my_reqs, req, entry); | |||
net_send_request(p, req); | |||
} | |||
@@ -75,15 +75,15 @@ peer_cancel(struct peer *p, uint32_t index, uint32_t begin, uint32_t len) | |||
{ | |||
struct piece_req *req; | |||
again: | |||
req = TAILQ_FIRST(&p->my_reqs); | |||
req = BTPDQ_FIRST(&p->my_reqs); | |||
while (req != NULL && | |||
!(index == req->index && | |||
begin == req->begin && | |||
len == req->length)) | |||
req = TAILQ_NEXT(req, entry); | |||
req = BTPDQ_NEXT(req, entry); | |||
if (req != NULL) { | |||
net_send_cancel(p, req); | |||
TAILQ_REMOVE(&p->my_reqs, req, entry); | |||
BTPDQ_REMOVE(&p->my_reqs, req, entry); | |||
free(req); | |||
goto again; | |||
} | |||
@@ -107,7 +107,7 @@ peer_choke(struct peer *p) | |||
{ | |||
struct piece_req *req; | |||
while ((req = TAILQ_FIRST(&p->p_reqs)) != NULL) | |||
while ((req = BTPDQ_FIRST(&p->p_reqs)) != NULL) | |||
net_unsend_piece(p, req); | |||
p->flags |= PF_I_CHOKE; | |||
@@ -141,9 +141,9 @@ peer_create_common(int sd) | |||
p->sd = sd; | |||
p->flags = PF_I_CHOKE | PF_P_CHOKE; | |||
TAILQ_INIT(&p->p_reqs); | |||
TAILQ_INIT(&p->my_reqs); | |||
TAILQ_INIT(&p->outq); | |||
BTPDQ_INIT(&p->p_reqs); | |||
BTPDQ_INIT(&p->my_reqs); | |||
BTPDQ_INIT(&p->outq); | |||
event_set(&p->out_ev, p->sd, EV_WRITE, net_write_cb, p); | |||
event_set(&p->in_ev, p->sd, EV_READ, net_read_cb, p); | |||
@@ -35,13 +35,13 @@ struct peer { | |||
unsigned long rate_to_me[RATEHISTORY]; | |||
unsigned long rate_from_me[RATEHISTORY]; | |||
TAILQ_ENTRY(peer) cm_entry; | |||
BTPDQ_ENTRY(peer) cm_entry; | |||
TAILQ_ENTRY(peer) rq_entry; | |||
TAILQ_ENTRY(peer) wq_entry; | |||
BTPDQ_ENTRY(peer) rq_entry; | |||
BTPDQ_ENTRY(peer) wq_entry; | |||
}; | |||
TAILQ_HEAD(peer_tq, peer); | |||
BTPDQ_HEAD(peer_tq, peer); | |||
void peer_unchoke(struct peer *p); | |||
void peer_choke(struct peer *p); | |||
@@ -43,7 +43,7 @@ static void | |||
cm_assign_requests_eg(struct peer *peer) | |||
{ | |||
struct piece *piece; | |||
TAILQ_FOREACH(piece, &peer->tp->getlst, entry) { | |||
BTPDQ_FOREACH(piece, &peer->tp->getlst, entry) { | |||
if (has_bit(peer->piece_field, piece->index)) { | |||
peer_want(peer, piece->index); | |||
if ((peer->flags & PF_P_CHOKE) == 0) | |||
@@ -55,13 +55,13 @@ cm_assign_requests_eg(struct peer *peer) | |||
static void | |||
cm_unassign_requests_eg(struct peer *peer) | |||
{ | |||
struct piece_req *req = TAILQ_FIRST(&peer->my_reqs); | |||
struct piece_req *req = BTPDQ_FIRST(&peer->my_reqs); | |||
while (req != NULL) { | |||
struct piece_req *next = TAILQ_NEXT(req, entry); | |||
struct piece_req *next = BTPDQ_NEXT(req, entry); | |||
free(req); | |||
req = next; | |||
} | |||
TAILQ_INIT(&peer->my_reqs); | |||
BTPDQ_INIT(&peer->my_reqs); | |||
} | |||
static void | |||
@@ -70,7 +70,7 @@ cm_enter_endgame(struct torrent *tp) | |||
struct peer *peer; | |||
btpd_log(BTPD_L_POL, "Entering end game\n"); | |||
tp->endgame = 1; | |||
TAILQ_FOREACH(peer, &tp->peers, cm_entry) | |||
BTPDQ_FOREACH(peer, &tp->peers, cm_entry) | |||
cm_assign_requests_eg(peer); | |||
} | |||
@@ -85,13 +85,13 @@ cm_should_schedule(struct torrent *tp) | |||
{ | |||
if (!tp->endgame) { | |||
int should = 1; | |||
struct piece *p = TAILQ_FIRST(&tp->getlst); | |||
struct piece *p = BTPDQ_FIRST(&tp->getlst); | |||
while (p != NULL) { | |||
if (!piece_full(p)) { | |||
should = 0; | |||
break; | |||
} | |||
p = TAILQ_NEXT(p, entry); | |||
p = BTPDQ_NEXT(p, entry); | |||
} | |||
return should; | |||
} else | |||
@@ -105,7 +105,7 @@ cm_on_peerless_piece(struct torrent *tp, struct piece *piece) | |||
assert(tp->piece_count[piece->index] == 0); | |||
btpd_log(BTPD_L_POL, "peerless piece %u\n", piece->index); | |||
msync(tp->imem, tp->isiz, MS_ASYNC); | |||
TAILQ_REMOVE(&tp->getlst, piece, entry); | |||
BTPDQ_REMOVE(&tp->getlst, piece, entry); | |||
free(piece); | |||
if (cm_should_schedule(tp)) | |||
cm_schedule_piece(tp); | |||
@@ -150,7 +150,7 @@ choke_alg(struct torrent *tp) | |||
psort = (struct peer **)btpd_malloc(tp->npeers * sizeof(p)); | |||
i = 0; | |||
TAILQ_FOREACH(p, &tp->peers, cm_entry) | |||
BTPDQ_FOREACH(p, &tp->peers, cm_entry) | |||
psort[i++] = p; | |||
if (tp->have_npieces == tp->meta.npieces) | |||
@@ -190,13 +190,13 @@ next_optimistic(struct torrent *tp, struct peer *np) | |||
if (np != NULL) | |||
tp->optimistic = np; | |||
else if (tp->optimistic == NULL) | |||
tp->optimistic = TAILQ_FIRST(&tp->peers); | |||
tp->optimistic = BTPDQ_FIRST(&tp->peers); | |||
else { | |||
np = TAILQ_NEXT(tp->optimistic, cm_entry); | |||
np = BTPDQ_NEXT(tp->optimistic, cm_entry); | |||
if (np != NULL) | |||
tp->optimistic = np; | |||
else | |||
tp->optimistic = TAILQ_FIRST(&tp->peers); | |||
tp->optimistic = BTPDQ_FIRST(&tp->peers); | |||
} | |||
assert(tp->optimistic != NULL); | |||
choke_alg(tp); | |||
@@ -257,9 +257,9 @@ cm_on_piece_ann(struct peer *peer, uint32_t piece) | |||
if (has_bit(tp->piece_field, piece)) | |||
return; | |||
p = TAILQ_FIRST(&tp->getlst); | |||
p = BTPDQ_FIRST(&tp->getlst); | |||
while (p != NULL && p->index != piece) | |||
p = TAILQ_NEXT(p, entry); | |||
p = BTPDQ_NEXT(p, entry); | |||
if (p != NULL && tp->endgame) { | |||
peer_want(peer, p->index); | |||
@@ -267,7 +267,7 @@ cm_on_piece_ann(struct peer *peer, uint32_t piece) | |||
cm_on_download(peer); | |||
} else if (p != NULL && !piece_full(p)) { | |||
peer_want(peer, p->index); | |||
if ((peer->flags & PF_P_CHOKE) == 0 && TAILQ_EMPTY(&peer->my_reqs)) | |||
if ((peer->flags & PF_P_CHOKE) == 0 && BTPDQ_EMPTY(&peer->my_reqs)) | |||
cm_on_download(peer); | |||
} else if (p == NULL && cm_should_schedule(tp)) | |||
cm_schedule_piece(tp); | |||
@@ -282,18 +282,18 @@ cm_on_lost_peer(struct peer *peer) | |||
tp->npeers--; | |||
peer->flags &= ~PF_ATTACHED; | |||
if (tp->npeers == 0) { | |||
TAILQ_REMOVE(&tp->peers, peer, cm_entry); | |||
BTPDQ_REMOVE(&tp->peers, peer, cm_entry); | |||
tp->optimistic = NULL; | |||
tp->choke_time = tp->opt_time = 0; | |||
} else if (tp->optimistic == peer) { | |||
struct peer *next = TAILQ_NEXT(peer, cm_entry); | |||
TAILQ_REMOVE(&tp->peers, peer, cm_entry); | |||
struct peer *next = BTPDQ_NEXT(peer, cm_entry); | |||
BTPDQ_REMOVE(&tp->peers, peer, cm_entry); | |||
next_optimistic(peer->tp, next); | |||
} else if ((peer->flags & (PF_P_WANT|PF_I_CHOKE)) == PF_P_WANT) { | |||
TAILQ_REMOVE(&tp->peers, peer, cm_entry); | |||
BTPDQ_REMOVE(&tp->peers, peer, cm_entry); | |||
cm_on_unupload(peer); | |||
} else { | |||
TAILQ_REMOVE(&tp->peers, peer, cm_entry); | |||
BTPDQ_REMOVE(&tp->peers, peer, cm_entry); | |||
} | |||
for (size_t i = 0; i < peer->tp->meta.npieces; i++) | |||
@@ -303,8 +303,8 @@ cm_on_lost_peer(struct peer *peer) | |||
if ((peer->flags & (PF_I_WANT|PF_P_CHOKE)) == PF_I_WANT) | |||
cm_on_undownload(peer); | |||
for (piece = TAILQ_FIRST(&tp->getlst); piece; | |||
piece = TAILQ_NEXT(piece, entry)) { | |||
for (piece = BTPDQ_FIRST(&tp->getlst); piece; | |||
piece = BTPDQ_NEXT(piece, entry)) { | |||
if (has_bit(peer->piece_field, piece->index) && | |||
tp->piece_count[piece->index] == 0) | |||
cm_on_peerless_piece(tp, piece); | |||
@@ -320,13 +320,13 @@ cm_on_new_peer(struct peer *peer) | |||
peer->flags |= PF_ATTACHED; | |||
if (tp->npeers == 1) { | |||
TAILQ_INSERT_HEAD(&tp->peers, peer, cm_entry); | |||
BTPDQ_INSERT_HEAD(&tp->peers, peer, cm_entry); | |||
next_optimistic(peer->tp, peer); | |||
} else { | |||
if (random() > RAND_MAX / 3) | |||
TAILQ_INSERT_AFTER(&tp->peers, tp->optimistic, peer, cm_entry); | |||
BTPDQ_INSERT_AFTER(&tp->peers, tp->optimistic, peer, cm_entry); | |||
else | |||
TAILQ_INSERT_TAIL(&tp->peers, peer, cm_entry); | |||
BTPDQ_INSERT_TAIL(&tp->peers, peer, cm_entry); | |||
} | |||
} | |||
@@ -336,7 +336,7 @@ missing_piece(struct torrent *tp, uint32_t index) | |||
struct piece *p; | |||
if (has_bit(tp->piece_field, index)) | |||
return 0; | |||
TAILQ_FOREACH(p, &tp->getlst, entry) | |||
BTPDQ_FOREACH(p, &tp->getlst, entry) | |||
if (p->index == index) | |||
return 0; | |||
return 1; | |||
@@ -380,17 +380,17 @@ activate_piece_peers(struct torrent *tp, struct piece *piece) | |||
{ | |||
struct peer *peer; | |||
assert(!piece_full(piece) && tp->endgame == 0); | |||
TAILQ_FOREACH(peer, &tp->peers, cm_entry) | |||
BTPDQ_FOREACH(peer, &tp->peers, cm_entry) | |||
if (has_bit(peer->piece_field, piece->index)) | |||
peer_want(peer, piece->index); | |||
peer = TAILQ_FIRST(&tp->peers); | |||
peer = BTPDQ_FIRST(&tp->peers); | |||
while (peer != NULL && !piece_full(piece)) { | |||
if ((peer->flags & (PF_P_CHOKE|PF_I_WANT)) == PF_I_WANT && | |||
TAILQ_EMPTY(&peer->my_reqs)) { | |||
BTPDQ_EMPTY(&peer->my_reqs)) { | |||
// | |||
cm_on_download(peer); | |||
} | |||
peer = TAILQ_NEXT(peer, cm_entry); | |||
peer = BTPDQ_NEXT(peer, cm_entry); | |||
} | |||
} | |||
@@ -444,7 +444,7 @@ cm_schedule_piece(struct torrent *tp) | |||
btpd_log(BTPD_L_POL, "scheduled piece: %u.\n", min_i); | |||
piece = alloc_piece(tp, min_i); | |||
TAILQ_INSERT_HEAD(&tp->getlst, piece, entry); | |||
BTPDQ_INSERT_HEAD(&tp->getlst, piece, entry); | |||
if (piece->ngot == piece->nblocks) { | |||
cm_on_piece(tp, piece); | |||
if (cm_should_schedule(tp)) | |||
@@ -466,7 +466,7 @@ cm_on_piece_full(struct torrent *tp, struct piece *piece) | |||
if (cm_should_schedule(tp)) | |||
cm_schedule_piece(tp); | |||
TAILQ_FOREACH(p, &tp->peers, cm_entry) { | |||
BTPDQ_FOREACH(p, &tp->peers, cm_entry) { | |||
if (has_bit(p->piece_field, piece->index)) | |||
peer_unwant(p, piece->index); | |||
} | |||
@@ -479,11 +479,11 @@ cm_assign_request(struct peer *peer) | |||
unsigned i; | |||
uint32_t start, len; | |||
piece = TAILQ_FIRST(&peer->tp->getlst); | |||
piece = BTPDQ_FIRST(&peer->tp->getlst); | |||
while (piece != NULL) { | |||
if (!piece_full(piece) && has_bit(peer->piece_field, piece->index)) | |||
break; | |||
piece = TAILQ_NEXT(piece, entry); | |||
piece = BTPDQ_NEXT(piece, entry); | |||
} | |||
if (piece == NULL) | |||
@@ -531,20 +531,20 @@ void | |||
cm_unassign_requests(struct peer *peer) | |||
{ | |||
struct torrent *tp = peer->tp; | |||
struct piece *piece = TAILQ_FIRST(&tp->getlst); | |||
struct piece *piece = BTPDQ_FIRST(&tp->getlst); | |||
while (piece != NULL) { | |||
int was_full = piece_full(piece); | |||
struct piece_req *req = TAILQ_FIRST(&peer->my_reqs); | |||
struct piece_req *req = BTPDQ_FIRST(&peer->my_reqs); | |||
while (req != NULL) { | |||
struct piece_req *next = TAILQ_NEXT(req, entry); | |||
struct piece_req *next = BTPDQ_NEXT(req, entry); | |||
if (piece->index == req->index) { | |||
assert(has_bit(piece->down_field, req->begin / BLOCKLEN)); | |||
clear_bit(piece->down_field, req->begin / BLOCKLEN); | |||
piece->nbusy--; | |||
TAILQ_REMOVE(&peer->my_reqs, req, entry); | |||
BTPDQ_REMOVE(&peer->my_reqs, req, entry); | |||
free(req); | |||
} | |||
@@ -554,10 +554,10 @@ cm_unassign_requests(struct peer *peer) | |||
if (was_full && !piece_full(piece)) | |||
cm_on_piece_unfull(tp, piece); | |||
piece = TAILQ_NEXT(piece, entry); | |||
piece = BTPDQ_NEXT(piece, entry); | |||
} | |||
assert(TAILQ_EMPTY(&peer->my_reqs)); | |||
assert(BTPDQ_EMPTY(&peer->my_reqs)); | |||
} | |||
static int | |||
@@ -631,12 +631,12 @@ cm_on_piece(struct torrent *tp, struct piece *piece) | |||
tracker_req(tp, TR_COMPLETED); | |||
} | |||
msync(tp->imem, tp->isiz, MS_ASYNC); | |||
TAILQ_FOREACH(p, &tp->peers, cm_entry) | |||
BTPDQ_FOREACH(p, &tp->peers, cm_entry) | |||
peer_have(p, piece->index); | |||
if (tp->endgame) | |||
TAILQ_FOREACH(p, &tp->peers, cm_entry) | |||
BTPDQ_FOREACH(p, &tp->peers, cm_entry) | |||
peer_unwant(p, piece->index); | |||
TAILQ_REMOVE(&tp->getlst, piece, entry); | |||
BTPDQ_REMOVE(&tp->getlst, piece, entry); | |||
free(piece); | |||
} else if (tp->endgame) { | |||
struct peer *p; | |||
@@ -645,7 +645,7 @@ cm_on_piece(struct torrent *tp, struct piece *piece) | |||
for (unsigned i = 0; i < piece->nblocks; i++) | |||
clear_bit(piece->have_field, i); | |||
piece->ngot = 0; | |||
TAILQ_FOREACH(p, &tp->peers, cm_entry) | |||
BTPDQ_FOREACH(p, &tp->peers, cm_entry) | |||
if (has_bit(p->piece_field, piece->index) && | |||
(p->flags & PF_P_CHOKE) == 0) { | |||
// | |||
@@ -659,7 +659,7 @@ cm_on_piece(struct torrent *tp, struct piece *piece) | |||
assert(!has_bit(piece->down_field, i)); | |||
} | |||
msync(tp->imem, tp->isiz, MS_ASYNC); | |||
TAILQ_REMOVE(&tp->getlst, piece, entry); | |||
BTPDQ_REMOVE(&tp->getlst, piece, entry); | |||
free(piece); | |||
if (cm_should_schedule(tp)) | |||
cm_schedule_piece(tp); | |||
@@ -670,11 +670,11 @@ void | |||
cm_on_block(struct peer *peer) | |||
{ | |||
struct torrent *tp = peer->tp; | |||
struct piece_req *req = TAILQ_FIRST(&peer->my_reqs); | |||
struct piece *piece = TAILQ_FIRST(&tp->getlst); | |||
struct piece_req *req = BTPDQ_FIRST(&peer->my_reqs); | |||
struct piece *piece = BTPDQ_FIRST(&tp->getlst); | |||
unsigned block = req->begin / BLOCKLEN; | |||
while (piece != NULL && piece->index != req->index) | |||
piece = TAILQ_NEXT(piece, entry); | |||
piece = BTPDQ_NEXT(piece, entry); | |||
set_bit(piece->have_field, block); | |||
clear_bit(piece->down_field, block); | |||
piece->ngot++; | |||
@@ -685,10 +685,10 @@ cm_on_block(struct peer *peer) | |||
uint32_t length = req->length; | |||
struct peer *p; | |||
TAILQ_REMOVE(&peer->my_reqs, req, entry); | |||
BTPDQ_REMOVE(&peer->my_reqs, req, entry); | |||
free(req); | |||
TAILQ_FOREACH(p, &tp->peers, cm_entry) { | |||
BTPDQ_FOREACH(p, &tp->peers, cm_entry) { | |||
if (has_bit(p->piece_field, index) && | |||
(peer->flags & PF_P_CHOKE) == 0) | |||
peer_cancel(p, index, begin, length); | |||
@@ -696,7 +696,7 @@ cm_on_block(struct peer *peer) | |||
if (piece->ngot == piece->nblocks) | |||
cm_on_piece(tp, piece); | |||
} else { | |||
TAILQ_REMOVE(&peer->my_reqs, req, entry); | |||
BTPDQ_REMOVE(&peer->my_reqs, req, entry); | |||
free(req); | |||
if (piece->ngot == piece->nblocks) | |||
cm_on_piece(tp, piece); | |||
@@ -9,73 +9,73 @@ | |||
/* | |||
* Tail queue declarations. | |||
*/ | |||
#define TAILQ_HEAD(name, type) \ | |||
#define BTPDQ_HEAD(name, type) \ | |||
struct name { \ | |||
struct type *tqh_first; /* first element */ \ | |||
struct type **tqh_last; /* addr of last next element */ \ | |||
} | |||
#define TAILQ_HEAD_INITIALIZER(head) \ | |||
#define BTPDQ_HEAD_INITIALIZER(head) \ | |||
{ NULL, &(head).tqh_first } | |||
#define TAILQ_ENTRY(type) \ | |||
#define BTPDQ_ENTRY(type) \ | |||
struct { \ | |||
struct type *tqe_next; /* next element */ \ | |||
struct type **tqe_prev; /* address of previous next element */ \ | |||
} | |||
#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL) | |||
#define BTPDQ_EMPTY(head) ((head)->tqh_first == NULL) | |||
#define TAILQ_FIRST(head) ((head)->tqh_first) | |||
#define BTPDQ_FIRST(head) ((head)->tqh_first) | |||
#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) | |||
#define BTPDQ_NEXT(elm, field) ((elm)->field.tqe_next) | |||
#define TAILQ_FOREACH(var, head, field) \ | |||
for ((var) = TAILQ_FIRST((head)); \ | |||
#define BTPDQ_FOREACH(var, head, field) \ | |||
for ((var) = BTPDQ_FIRST((head)); \ | |||
(var); \ | |||
(var) = TAILQ_NEXT((var), field)) | |||
(var) = BTPDQ_NEXT((var), field)) | |||
#define TAILQ_INIT(head) do { \ | |||
TAILQ_FIRST((head)) = NULL; \ | |||
(head)->tqh_last = &TAILQ_FIRST((head)); \ | |||
#define BTPDQ_INIT(head) do { \ | |||
BTPDQ_FIRST((head)) = NULL; \ | |||
(head)->tqh_last = &BTPDQ_FIRST((head)); \ | |||
} while (0) | |||
#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ | |||
if ((TAILQ_NEXT((elm), field) = TAILQ_NEXT((listelm), field)) != NULL)\ | |||
TAILQ_NEXT((elm), field)->field.tqe_prev = \ | |||
&TAILQ_NEXT((elm), field); \ | |||
#define BTPDQ_INSERT_AFTER(head, listelm, elm, field) do { \ | |||
if ((BTPDQ_NEXT((elm), field) = BTPDQ_NEXT((listelm), field)) != NULL)\ | |||
BTPDQ_NEXT((elm), field)->field.tqe_prev = \ | |||
&BTPDQ_NEXT((elm), field); \ | |||
else { \ | |||
(head)->tqh_last = &TAILQ_NEXT((elm), field); \ | |||
(head)->tqh_last = &BTPDQ_NEXT((elm), field); \ | |||
} \ | |||
TAILQ_NEXT((listelm), field) = (elm); \ | |||
(elm)->field.tqe_prev = &TAILQ_NEXT((listelm), field); \ | |||
BTPDQ_NEXT((listelm), field) = (elm); \ | |||
(elm)->field.tqe_prev = &BTPDQ_NEXT((listelm), field); \ | |||
} while (0) | |||
#define TAILQ_INSERT_HEAD(head, elm, field) do { \ | |||
if ((TAILQ_NEXT((elm), field) = TAILQ_FIRST((head))) != NULL) \ | |||
TAILQ_FIRST((head))->field.tqe_prev = \ | |||
&TAILQ_NEXT((elm), field); \ | |||
#define BTPDQ_INSERT_HEAD(head, elm, field) do { \ | |||
if ((BTPDQ_NEXT((elm), field) = BTPDQ_FIRST((head))) != NULL) \ | |||
BTPDQ_FIRST((head))->field.tqe_prev = \ | |||
&BTPDQ_NEXT((elm), field); \ | |||
else \ | |||
(head)->tqh_last = &TAILQ_NEXT((elm), field); \ | |||
TAILQ_FIRST((head)) = (elm); \ | |||
(elm)->field.tqe_prev = &TAILQ_FIRST((head)); \ | |||
(head)->tqh_last = &BTPDQ_NEXT((elm), field); \ | |||
BTPDQ_FIRST((head)) = (elm); \ | |||
(elm)->field.tqe_prev = &BTPDQ_FIRST((head)); \ | |||
} while (0) | |||
#define TAILQ_INSERT_TAIL(head, elm, field) do { \ | |||
TAILQ_NEXT((elm), field) = NULL; \ | |||
#define BTPDQ_INSERT_TAIL(head, elm, field) do { \ | |||
BTPDQ_NEXT((elm), field) = NULL; \ | |||
(elm)->field.tqe_prev = (head)->tqh_last; \ | |||
*(head)->tqh_last = (elm); \ | |||
(head)->tqh_last = &TAILQ_NEXT((elm), field); \ | |||
(head)->tqh_last = &BTPDQ_NEXT((elm), field); \ | |||
} while (0) | |||
#define TAILQ_REMOVE(head, elm, field) do { \ | |||
if ((TAILQ_NEXT((elm), field)) != NULL) \ | |||
TAILQ_NEXT((elm), field)->field.tqe_prev = \ | |||
#define BTPDQ_REMOVE(head, elm, field) do { \ | |||
if ((BTPDQ_NEXT((elm), field)) != NULL) \ | |||
BTPDQ_NEXT((elm), field)->field.tqe_prev = \ | |||
(elm)->field.tqe_prev; \ | |||
else { \ | |||
(head)->tqh_last = (elm)->field.tqe_prev; \ | |||
} \ | |||
*(elm)->field.tqe_prev = TAILQ_NEXT((elm), field); \ | |||
*(elm)->field.tqe_prev = BTPDQ_NEXT((elm), field); \ | |||
} while (0) | |||
#endif |
@@ -44,8 +44,8 @@ torrent_load3(const char *file, struct metainfo *mi, char *mem, size_t memsiz) | |||
tp->piece_count = btpd_calloc(mi->npieces, sizeof(tp->piece_count[0])); | |||
TAILQ_INIT(&tp->peers); | |||
TAILQ_INIT(&tp->getlst); | |||
BTPDQ_INIT(&tp->peers); | |||
BTPDQ_INIT(&tp->getlst); | |||
tp->imem = mem; | |||
tp->isiz = memsiz; | |||
@@ -61,7 +61,7 @@ torrent_load3(const char *file, struct metainfo *mi, char *mem, size_t memsiz) | |||
tp->meta = *mi; | |||
free(mi); | |||
TAILQ_INSERT_TAIL(&btpd.cm_list, tp, entry); | |||
BTPDQ_INSERT_TAIL(&btpd.cm_list, tp, entry); | |||
tracker_req(tp, TR_STARTED); | |||
btpd.ntorrents++; | |||
@@ -153,17 +153,17 @@ torrent_unload(struct torrent *tp) | |||
tracker_req(tp, TR_STOPPED); | |||
peer = TAILQ_FIRST(&tp->peers); | |||
peer = BTPDQ_FIRST(&tp->peers); | |||
while (peer != NULL) { | |||
struct peer *next = TAILQ_NEXT(peer, cm_entry); | |||
struct peer *next = BTPDQ_NEXT(peer, cm_entry); | |||
peer->flags &= ~PF_ATTACHED; | |||
peer_kill(peer); | |||
peer = next; | |||
} | |||
piece = TAILQ_FIRST(&tp->getlst); | |||
piece = BTPDQ_FIRST(&tp->getlst); | |||
while (piece != NULL) { | |||
struct piece *next = TAILQ_NEXT(piece, entry); | |||
struct piece *next = BTPDQ_NEXT(piece, entry); | |||
free(piece); | |||
piece = next; | |||
} | |||
@@ -174,7 +174,7 @@ torrent_unload(struct torrent *tp) | |||
munmap(tp->imem, tp->isiz); | |||
TAILQ_REMOVE(&btpd.cm_list, tp, entry); | |||
BTPDQ_REMOVE(&btpd.cm_list, tp, entry); | |||
free(tp); | |||
btpd.ntorrents--; | |||
} | |||
@@ -223,13 +223,13 @@ int | |||
torrent_has_peer(struct torrent *tp, const uint8_t *id) | |||
{ | |||
int has = 0; | |||
struct peer *p = TAILQ_FIRST(&tp->peers); | |||
struct peer *p = BTPDQ_FIRST(&tp->peers); | |||
while (p != NULL) { | |||
if (bcmp(p->id, id, 20) == 0) { | |||
has = 1; | |||
break; | |||
} | |||
p = TAILQ_NEXT(p, cm_entry); | |||
p = BTPDQ_NEXT(p, cm_entry); | |||
} | |||
return has; | |||
} | |||
@@ -237,8 +237,8 @@ torrent_has_peer(struct torrent *tp, const uint8_t *id) | |||
struct torrent * | |||
torrent_get_by_hash(const uint8_t *hash) | |||
{ | |||
struct torrent *tp = TAILQ_FIRST(&btpd.cm_list); | |||
struct torrent *tp = BTPDQ_FIRST(&btpd.cm_list); | |||
while (tp != NULL && bcmp(hash, tp->meta.info_hash, 20) != 0) | |||
tp = TAILQ_NEXT(tp, entry); | |||
tp = BTPDQ_NEXT(tp, entry); | |||
return tp; | |||
} |
@@ -11,16 +11,16 @@ struct piece { | |||
uint8_t *have_field; | |||
uint8_t *down_field; | |||
TAILQ_ENTRY(piece) entry; | |||
BTPDQ_ENTRY(piece) entry; | |||
}; | |||
TAILQ_HEAD(piece_tq, piece); | |||
BTPDQ_HEAD(piece_tq, piece); | |||
struct torrent { | |||
const char *relpath; | |||
struct metainfo meta; | |||
TAILQ_ENTRY(torrent) entry; | |||
BTPDQ_ENTRY(torrent) entry; | |||
void *imem; | |||
size_t isiz; | |||
@@ -48,7 +48,7 @@ struct torrent { | |||
struct piece_tq getlst; | |||
}; | |||
TAILQ_HEAD(torrent_tq, torrent); | |||
BTPDQ_HEAD(torrent_tq, torrent); | |||
off_t torrent_bytes_left(struct torrent *tp); | |||
@@ -285,7 +285,7 @@ tracker_req(struct torrent *tp, enum tr_event tr_event) | |||
child = (struct child *)(req + 1); | |||
child->data = req; | |||
child->child_done = tracker_done; | |||
TAILQ_INSERT_TAIL(&btpd.kids, child, entry); | |||
BTPDQ_INSERT_TAIL(&btpd.kids, child, entry); | |||
child->pid = fork(); | |||
if (child->pid < 0) { | |||