Log which peer contributed what to a piece. Do not try to download the same piece from the same peers. Don't download at all from peers implicated in 3 bad pieces. When a previously bad piece has been downloaded successfully the bad peer(s) can be found and banned.master
@@ -58,6 +58,7 @@ | |||||
#define BTPD_L_MSG 0x00000008 | #define BTPD_L_MSG 0x00000008 | ||||
#define BTPD_L_BTPD 0x00000010 | #define BTPD_L_BTPD 0x00000010 | ||||
#define BTPD_L_POL 0x00000020 | #define BTPD_L_POL 0x00000020 | ||||
#define BTPD_L_BAD 0x00000040 | |||||
extern long btpd_seconds; | extern long btpd_seconds; | ||||
@@ -90,6 +91,9 @@ uint32_t btpd_id_hash(const void *k); | |||||
const uint8_t *btpd_get_peer_id(void); | const uint8_t *btpd_get_peer_id(void); | ||||
int btpd_id_eq(const void *id1, const void *id2); | |||||
uint32_t btpd_id_hash(const void *id); | |||||
void td_acquire_lock(void); | void td_acquire_lock(void); | ||||
void td_release_lock(void); | void td_release_lock(void); | ||||
@@ -17,18 +17,17 @@ dl_on_piece_ann(struct peer *p, uint32_t index) | |||||
if (n->endgame) { | if (n->endgame) { | ||||
assert(pc != NULL); | assert(pc != NULL); | ||||
peer_want(p, index); | peer_want(p, index); | ||||
if (!peer_chokes(p) && !peer_laden(p)) | |||||
if (peer_leech_ok(p)) | |||||
dl_assign_requests_eg(p); | dl_assign_requests_eg(p); | ||||
} else if (pc == NULL) { | } else if (pc == NULL) { | ||||
peer_want(p, index); | peer_want(p, index); | ||||
if (!peer_chokes(p) && !peer_laden(p)) { | |||||
if (peer_leech_ok(p)) { | |||||
pc = dl_new_piece(n, index); | pc = dl_new_piece(n, index); | ||||
if (pc != NULL) | |||||
dl_piece_assign_requests(pc, p); | |||||
dl_piece_assign_requests(pc, p); | |||||
} | } | ||||
} else if (!piece_full(pc)) { | } else if (!piece_full(pc)) { | ||||
peer_want(p, index); | peer_want(p, index); | ||||
if (!peer_chokes(p) && !peer_laden(p)) | |||||
if (peer_leech_ok(p)) | |||||
dl_piece_assign_requests(pc, p); | dl_piece_assign_requests(pc, p); | ||||
} | } | ||||
} | } | ||||
@@ -36,21 +35,17 @@ dl_on_piece_ann(struct peer *p, uint32_t index) | |||||
void | void | ||||
dl_on_download(struct peer *p) | dl_on_download(struct peer *p) | ||||
{ | { | ||||
assert(peer_wanted(p)); | |||||
struct net *n = p->n; | struct net *n = p->n; | ||||
if (n->endgame) { | |||||
if (n->endgame) | |||||
dl_assign_requests_eg(p); | dl_assign_requests_eg(p); | ||||
} else { | |||||
unsigned count = dl_assign_requests(p); | |||||
if (count == 0 && !p->n->endgame) // We may have entered end game. | |||||
assert(!peer_wanted(p) || peer_laden(p)); | |||||
} | |||||
else | |||||
dl_assign_requests(p); | |||||
} | } | ||||
void | void | ||||
dl_on_unchoke(struct peer *p) | dl_on_unchoke(struct peer *p) | ||||
{ | { | ||||
if (peer_wanted(p)) | |||||
if (peer_leech_ok(p)) | |||||
dl_on_download(p); | dl_on_download(p); | ||||
} | } | ||||
@@ -90,8 +85,9 @@ dl_on_ok_piece(struct net *n, uint32_t piece) | |||||
if (n->endgame) | if (n->endgame) | ||||
BTPDQ_FOREACH(p, &n->peers, p_entry) | BTPDQ_FOREACH(p, &n->peers, p_entry) | ||||
if (peer_has(p, pc->index)) | |||||
peer_unwant(p, pc->index); | |||||
peer_unwant(p, pc->index); | |||||
piece_log_good(pc); | |||||
assert(pc->nreqs == 0); | assert(pc->nreqs == 0); | ||||
piece_free(pc); | piece_free(pc); | ||||
@@ -114,14 +110,16 @@ dl_on_bad_piece(struct net *n, uint32_t piece) | |||||
pc->ngot = 0; | pc->ngot = 0; | ||||
pc->nbusy = 0; | pc->nbusy = 0; | ||||
piece_log_bad(pc); | |||||
if (n->endgame) { | if (n->endgame) { | ||||
struct peer *p; | struct peer *p; | ||||
BTPDQ_FOREACH(p, &n->peers, p_entry) { | BTPDQ_FOREACH(p, &n->peers, p_entry) { | ||||
if (peer_has(p, pc->index) && peer_leech_ok(p) && !peer_laden(p)) | |||||
if (peer_leech_ok(p) && peer_requestable(p, pc->index)) | |||||
dl_assign_requests_eg(p); | dl_assign_requests_eg(p); | ||||
} | } | ||||
} else | } else | ||||
dl_on_piece_unfull(pc); // XXX: May get bad data again. | |||||
dl_on_piece_unfull(pc); | |||||
} | } | ||||
void | void | ||||
@@ -149,6 +147,7 @@ dl_on_block(struct peer *p, struct block_request *req, | |||||
struct net *n = p->n; | struct net *n = p->n; | ||||
struct piece *pc = dl_find_piece(n, index); | struct piece *pc = dl_find_piece(n, index); | ||||
piece_log_block(pc, p, begin); | |||||
cm_put_bytes(p->n->tp, index, begin, data, length); | cm_put_bytes(p->n->tp, index, begin, data, length); | ||||
pc->ngot++; | pc->ngot++; | ||||
@@ -170,7 +169,7 @@ dl_on_block(struct peer *p, struct block_request *req, | |||||
continue; | continue; | ||||
BTPDQ_REMOVE(&pc->reqs, req, blk_entry); | BTPDQ_REMOVE(&pc->reqs, req, blk_entry); | ||||
nb_drop(req->msg); | nb_drop(req->msg); | ||||
if (peer_leech_ok(req->p) && !peer_laden(req->p)) | |||||
if (peer_leech_ok(req->p)) | |||||
dl_assign_requests_eg(req->p); | dl_assign_requests_eg(req->p); | ||||
free(req); | free(req); | ||||
} | } | ||||
@@ -186,7 +185,7 @@ dl_on_block(struct peer *p, struct block_request *req, | |||||
pc->nbusy--; | pc->nbusy--; | ||||
if (pc->ngot == pc->nblocks) | if (pc->ngot == pc->nblocks) | ||||
cm_test_piece(pc->n->tp, pc->index); | cm_test_piece(pc->n->tp, pc->index); | ||||
if (peer_leech_ok(p) && !peer_laden(p)) | |||||
if (peer_leech_ok(p)) | |||||
dl_assign_requests(p); | dl_assign_requests(p); | ||||
} | } | ||||
} | } |
@@ -6,6 +6,10 @@ | |||||
int piece_full(struct piece *pc); | int piece_full(struct piece *pc); | ||||
void piece_free(struct piece *pc); | void piece_free(struct piece *pc); | ||||
void piece_log_bad(struct piece *pc); | |||||
void piece_log_good(struct piece *pc); | |||||
void piece_log_block(struct piece *pc, struct peer *p, uint32_t begin); | |||||
void dl_on_piece_unfull(struct piece *pc); | void dl_on_piece_unfull(struct piece *pc); | ||||
struct piece *dl_new_piece(struct net *n, uint32_t index); | struct piece *dl_new_piece(struct net *n, uint32_t index); | ||||
@@ -24,6 +24,135 @@ | |||||
#include <openssl/sha.h> | #include <openssl/sha.h> | ||||
#include <stream.h> | #include <stream.h> | ||||
static void | |||||
piece_new_log(struct piece *pc) | |||||
{ | |||||
struct blog *log = btpd_calloc(1, sizeof(*log)); | |||||
BTPDQ_INIT(&log->records); | |||||
BTPDQ_INSERT_HEAD(&pc->logs, log, entry); | |||||
} | |||||
static void | |||||
piece_log_hashes(struct piece *pc) | |||||
{ | |||||
uint8_t *buf; | |||||
struct torrent *tp = pc->n->tp; | |||||
struct blog *log = BTPDQ_FIRST(&pc->logs); | |||||
log->hashes = btpd_malloc(20 * pc->nblocks); | |||||
for (unsigned i = 0; i < pc->nblocks; i++) { | |||||
uint32_t bsize = torrent_block_size(tp, pc->index, pc->nblocks, i); | |||||
cm_get_bytes(tp, pc->index, i * PIECE_BLOCKLEN, bsize, &buf); | |||||
SHA1(buf, bsize, &log->hashes[i * 20]); | |||||
free(buf); | |||||
} | |||||
} | |||||
static void | |||||
piece_log_free(struct piece *pc, struct blog *log) | |||||
{ | |||||
struct blog_record *r, *rnext; | |||||
BTPDQ_FOREACH_MUTABLE(r, &log->records, entry, rnext) { | |||||
mp_drop(r->mp, pc->n); | |||||
free(r); | |||||
} | |||||
if (log->hashes != NULL) | |||||
free(log->hashes); | |||||
free(log); | |||||
} | |||||
static void | |||||
piece_kill_logs(struct piece *pc) | |||||
{ | |||||
struct blog *log, *lnext; | |||||
BTPDQ_FOREACH_MUTABLE(log, &pc->logs, entry, lnext) | |||||
piece_log_free(pc, log); | |||||
BTPDQ_INIT(&pc->logs); | |||||
} | |||||
void | |||||
piece_log_bad(struct piece *pc) | |||||
{ | |||||
struct blog *log = BTPDQ_FIRST(&pc->logs); | |||||
struct blog_record *r = BTPDQ_FIRST(&log->records); | |||||
struct meta_peer *culprit = NULL; | |||||
if (r == BTPDQ_LAST(&log->records, blog_record_tq)) { | |||||
unsigned i; | |||||
for (i = 0; i < pc->nblocks; i++) | |||||
if (!has_bit(r->down_field, i)) | |||||
break; | |||||
if (i == pc->nblocks) | |||||
culprit = r->mp; | |||||
} | |||||
if (culprit != NULL) { | |||||
if (pc->n->endgame && culprit->p != NULL) | |||||
peer_unwant(culprit->p, pc->index); | |||||
net_ban_peer(pc->n, culprit); | |||||
BTPDQ_REMOVE(&pc->logs, log, entry); | |||||
piece_log_free(pc, log); | |||||
} else { | |||||
BTPDQ_FOREACH(r, &log->records, entry) { | |||||
if (r->mp->p != NULL) { | |||||
if (pc->n->endgame) | |||||
peer_unwant(r->mp->p, pc->index); | |||||
peer_bad_piece(r->mp->p, pc->index); | |||||
} | |||||
} | |||||
piece_log_hashes(pc); | |||||
} | |||||
piece_new_log(pc); | |||||
} | |||||
void | |||||
piece_log_good(struct piece *pc) | |||||
{ | |||||
struct blog_record *r; | |||||
struct blog *log = BTPDQ_FIRST(&pc->logs), *bad = BTPDQ_NEXT(log, entry); | |||||
BTPDQ_FOREACH(r, &log->records, entry) | |||||
if (r->mp->p != NULL) | |||||
peer_good_piece(r->mp->p, pc->index); | |||||
if (bad != NULL) | |||||
piece_log_hashes(pc); | |||||
while (bad != NULL) { | |||||
BTPDQ_FOREACH(r, &bad->records, entry) { | |||||
int culprit = 0; | |||||
for (unsigned i = 0; i < pc->nblocks && !culprit; i++) | |||||
if (has_bit(r->down_field, i) && ( | |||||
bcmp(&log->hashes[i*20], &bad->hashes[i*20], 20) != 0)) | |||||
culprit = 1; | |||||
if (culprit) | |||||
net_ban_peer(pc->n, r->mp); | |||||
else if (r->mp->p != NULL) | |||||
peer_good_piece(r->mp->p, pc->index); | |||||
} | |||||
bad = BTPDQ_NEXT(bad, entry); | |||||
} | |||||
} | |||||
void | |||||
piece_log_block(struct piece *pc, struct peer *p, uint32_t begin) | |||||
{ | |||||
struct blog_record *r; | |||||
struct blog *log = BTPDQ_FIRST(&pc->logs); | |||||
BTPDQ_FOREACH(r, &log->records, entry) | |||||
if (r->mp == p->mp) | |||||
break; | |||||
if (r == NULL) { | |||||
r = btpd_calloc(1, sizeof(*r) + ceil(pc->nblocks / 8.0)); | |||||
r->mp = p->mp; | |||||
mp_hold(r->mp); | |||||
BTPDQ_INSERT_HEAD(&log->records, r, entry); | |||||
} else { | |||||
BTPDQ_REMOVE(&log->records, r, entry); | |||||
BTPDQ_INSERT_HEAD(&log->records, r, entry); | |||||
} | |||||
set_bit(r->down_field, begin / PIECE_BLOCKLEN); | |||||
} | |||||
static struct piece * | static struct piece * | ||||
piece_alloc(struct net *n, uint32_t index) | piece_alloc(struct net *n, uint32_t index) | ||||
{ | { | ||||
@@ -54,10 +183,13 @@ piece_alloc(struct net *n, uint32_t index) | |||||
assert(pc->ngot < pc->nblocks); | assert(pc->ngot < pc->nblocks); | ||||
BTPDQ_INIT(&pc->reqs); | BTPDQ_INIT(&pc->reqs); | ||||
BTPDQ_INIT(&pc->logs); | |||||
piece_new_log(pc); | |||||
n->npcs_busy++; | n->npcs_busy++; | ||||
set_bit(n->busy_field, index); | set_bit(n->busy_field, index); | ||||
BTPDQ_INSERT_HEAD(&n->getlst, pc, entry); | |||||
BTPDQ_INSERT_TAIL(&n->getlst, pc, entry); | |||||
return pc; | return pc; | ||||
} | } | ||||
@@ -74,6 +206,7 @@ piece_free(struct piece *pc) | |||||
nb_drop(req->msg); | nb_drop(req->msg); | ||||
free(req); | free(req); | ||||
} | } | ||||
piece_kill_logs(pc); | |||||
if (pc->eg_reqs != NULL) { | if (pc->eg_reqs != NULL) { | ||||
for (uint32_t i = 0; i < pc->nblocks; i++) | for (uint32_t i = 0; i < pc->nblocks; i++) | ||||
if (pc->eg_reqs[i] != NULL) | if (pc->eg_reqs[i] != NULL) | ||||
@@ -170,11 +303,9 @@ dl_enter_endgame(struct net *n) | |||||
} | } | ||||
BTPDQ_FOREACH(p, &n->peers, p_entry) { | BTPDQ_FOREACH(p, &n->peers, p_entry) { | ||||
assert(p->nwant == 0); | assert(p->nwant == 0); | ||||
BTPDQ_FOREACH(pc, &n->getlst, entry) { | |||||
if (peer_has(p, pc->index)) | |||||
peer_want(p, pc->index); | |||||
} | |||||
if (p->nwant > 0 && peer_leech_ok(p) && !peer_laden(p)) | |||||
BTPDQ_FOREACH(pc, &n->getlst, entry) | |||||
peer_want(p, pc->index); | |||||
if (peer_leech_ok(p)) | |||||
dl_assign_requests_eg(p); | dl_assign_requests_eg(p); | ||||
} | } | ||||
} | } | ||||
@@ -192,7 +323,7 @@ dl_find_piece(struct net *n, uint32_t index) | |||||
static int | static int | ||||
dl_piece_startable(struct peer *p, uint32_t index) | dl_piece_startable(struct peer *p, uint32_t index) | ||||
{ | { | ||||
return peer_has(p, index) && !cm_has_piece(p->n->tp, index) | |||||
return peer_requestable(p, index) && !cm_has_piece(p->n->tp, index) | |||||
&& !has_bit(p->n->busy_field, index); | && !has_bit(p->n->busy_field, index); | ||||
} | } | ||||
@@ -252,23 +383,12 @@ static void | |||||
dl_on_piece_full(struct piece *pc) | dl_on_piece_full(struct piece *pc) | ||||
{ | { | ||||
struct peer *p; | struct peer *p; | ||||
BTPDQ_FOREACH(p, &pc->n->peers, p_entry) { | |||||
if (peer_has(p, pc->index)) | |||||
peer_unwant(p, pc->index); | |||||
} | |||||
BTPDQ_FOREACH(p, &pc->n->peers, p_entry) | |||||
peer_unwant(p, pc->index); | |||||
if (dl_should_enter_endgame(pc->n)) | if (dl_should_enter_endgame(pc->n)) | ||||
dl_enter_endgame(pc->n); | dl_enter_endgame(pc->n); | ||||
} | } | ||||
/* | |||||
* Allocate the piece indicated by the index for download. | |||||
* There's a small possibility that a piece is fully downloaded | |||||
* but haven't been tested. If such is the case the piece will | |||||
* be tested and NULL will be returned. Also, we might then enter | |||||
* end game. | |||||
* | |||||
* Return the piece or NULL. | |||||
*/ | |||||
struct piece * | struct piece * | ||||
dl_new_piece(struct net *n, uint32_t index) | dl_new_piece(struct net *n, uint32_t index) | ||||
{ | { | ||||
@@ -291,11 +411,10 @@ dl_on_piece_unfull(struct piece *pc) | |||||
struct peer *p; | struct peer *p; | ||||
assert(!piece_full(pc) && n->endgame == 0); | assert(!piece_full(pc) && n->endgame == 0); | ||||
BTPDQ_FOREACH(p, &n->peers, p_entry) | BTPDQ_FOREACH(p, &n->peers, p_entry) | ||||
if (peer_has(p, pc->index)) | |||||
peer_want(p, pc->index); | |||||
peer_want(p, pc->index); | |||||
p = BTPDQ_FIRST(&n->peers); | p = BTPDQ_FIRST(&n->peers); | ||||
while (p != NULL && !piece_full(pc)) { | while (p != NULL && !piece_full(pc)) { | ||||
if (peer_leech_ok(p) && !peer_laden(p)) | |||||
if (peer_leech_ok(p) && peer_requestable(p, pc->index)) | |||||
dl_piece_assign_requests(pc, p); // Cannot provoke end game here. | dl_piece_assign_requests(pc, p); // Cannot provoke end game here. | ||||
p = BTPDQ_NEXT(p, p_entry); | p = BTPDQ_NEXT(p, p_entry); | ||||
} | } | ||||
@@ -368,12 +487,12 @@ dl_piece_assign_requests(struct piece *pc, struct peer *p) | |||||
unsigned | unsigned | ||||
dl_assign_requests(struct peer *p) | dl_assign_requests(struct peer *p) | ||||
{ | { | ||||
assert(!p->n->endgame && !peer_laden(p)); | |||||
assert(!p->n->endgame && peer_leech_ok(p)); | |||||
struct piece *pc; | struct piece *pc; | ||||
struct net *n = p->n; | struct net *n = p->n; | ||||
unsigned count = 0; | unsigned count = 0; | ||||
BTPDQ_FOREACH(pc, &n->getlst, entry) { | BTPDQ_FOREACH(pc, &n->getlst, entry) { | ||||
if (piece_full(pc) || !peer_has(p, pc->index)) | |||||
if (piece_full(pc) || !peer_requestable(p, pc->index)) | |||||
continue; | continue; | ||||
count += dl_piece_assign_requests(pc, p); | count += dl_piece_assign_requests(pc, p); | ||||
if (n->endgame) | if (n->endgame) | ||||
@@ -455,7 +574,7 @@ dl_piece_assign_requests_eg(struct piece *pc, struct peer *p) | |||||
void | void | ||||
dl_assign_requests_eg(struct peer *p) | dl_assign_requests_eg(struct peer *p) | ||||
{ | { | ||||
assert(!peer_laden(p)); | |||||
assert(peer_leech_ok(p)); | |||||
struct net *n = p->n; | struct net *n = p->n; | ||||
struct piece_tq tmp; | struct piece_tq tmp; | ||||
BTPDQ_INIT(&tmp); | BTPDQ_INIT(&tmp); | ||||
@@ -463,7 +582,7 @@ dl_assign_requests_eg(struct peer *p) | |||||
struct piece *pc = BTPDQ_FIRST(&n->getlst); | struct piece *pc = BTPDQ_FIRST(&n->getlst); | ||||
while (!peer_laden(p) && pc != NULL) { | while (!peer_laden(p) && pc != NULL) { | ||||
struct piece *next = BTPDQ_NEXT(pc, entry); | struct piece *next = BTPDQ_NEXT(pc, entry); | ||||
if (peer_has(p, pc->index) && pc->nblocks != pc->ngot) { | |||||
if (peer_requestable(p, pc->index) && pc->nblocks != pc->ngot) { | |||||
dl_piece_assign_requests_eg(pc, p); | dl_piece_assign_requests_eg(pc, p); | ||||
BTPDQ_REMOVE(&n->getlst, pc, entry); | BTPDQ_REMOVE(&n->getlst, pc, entry); | ||||
BTPDQ_INSERT_HEAD(&tmp, pc, entry); | BTPDQ_INSERT_HEAD(&tmp, pc, entry); | ||||
@@ -23,6 +23,16 @@ struct peer_tq net_bw_readq = BTPDQ_HEAD_INITIALIZER(net_bw_readq); | |||||
struct peer_tq net_bw_writeq = BTPDQ_HEAD_INITIALIZER(net_bw_writeq); | struct peer_tq net_bw_writeq = BTPDQ_HEAD_INITIALIZER(net_bw_writeq); | ||||
struct peer_tq net_unattached = BTPDQ_HEAD_INITIALIZER(net_unattached); | struct peer_tq net_unattached = BTPDQ_HEAD_INITIALIZER(net_unattached); | ||||
void | |||||
net_ban_peer(struct net *n, struct meta_peer *mp) | |||||
{ | |||||
if (mp->flags & PF_BANNED) | |||||
return; | |||||
mp_hold(mp); // Keep the meta peer alive | |||||
mp->flags |= PF_BANNED; | |||||
btpd_log(BTPD_L_BAD, "banned peer %p.\n", mp); | |||||
} | |||||
int | int | ||||
net_torrent_has_peer(struct net *n, const uint8_t *id) | net_torrent_has_peer(struct net *n, const uint8_t *id) | ||||
{ | { | ||||
@@ -29,6 +29,7 @@ void net_start(struct torrent *tp); | |||||
void net_stop(struct torrent *tp); | void net_stop(struct torrent *tp); | ||||
int net_active(struct torrent *tp); | int net_active(struct torrent *tp); | ||||
void net_ban_peer(struct net *n, struct meta_peer *mp); | |||||
int net_torrent_has_peer(struct net *n, const uint8_t *id); | int net_torrent_has_peer(struct net *n, const uint8_t *id); | ||||
void net_io_cb(int sd, short type, void *arg); | void net_io_cb(int sd, short type, void *arg); | ||||
@@ -4,6 +4,8 @@ | |||||
BTPDQ_HEAD(peer_tq, peer); | BTPDQ_HEAD(peer_tq, peer); | ||||
BTPDQ_HEAD(piece_tq, piece); | BTPDQ_HEAD(piece_tq, piece); | ||||
BTPDQ_HEAD(block_request_tq, block_request); | BTPDQ_HEAD(block_request_tq, block_request); | ||||
BTPDQ_HEAD(blog_tq, blog); | |||||
BTPDQ_HEAD(blog_record_tq, blog_record); | |||||
struct net { | struct net { | ||||
struct torrent *tp; | struct torrent *tp; | ||||
@@ -47,8 +49,11 @@ HTBL_TYPE(mptbl, meta_peer, uint8_t, id, chain); | |||||
struct peer { | struct peer { | ||||
int sd; | int sd; | ||||
uint8_t *piece_field; | uint8_t *piece_field; | ||||
uint8_t *bad_field; | |||||
uint32_t npieces; | uint32_t npieces; | ||||
uint32_t nwant; | uint32_t nwant; | ||||
uint32_t npcs_bad; | |||||
int suspicion; | |||||
struct net *n; | struct net *n; | ||||
struct meta_peer *mp; | struct meta_peer *mp; | ||||
@@ -102,6 +107,7 @@ struct piece { | |||||
struct net_buf **eg_reqs; | struct net_buf **eg_reqs; | ||||
struct block_request_tq reqs; | struct block_request_tq reqs; | ||||
struct blog_tq logs; | |||||
const uint8_t *have_field; | const uint8_t *have_field; | ||||
uint8_t *down_field; | uint8_t *down_field; | ||||
@@ -109,6 +115,18 @@ struct piece { | |||||
BTPDQ_ENTRY(piece) entry; | BTPDQ_ENTRY(piece) entry; | ||||
}; | }; | ||||
struct blog { | |||||
BTPDQ_ENTRY(blog) entry; | |||||
struct blog_record_tq records; | |||||
uint8_t *hashes; | |||||
}; | |||||
struct blog_record { | |||||
BTPDQ_ENTRY(blog_record) entry; | |||||
struct meta_peer *mp; | |||||
uint8_t down_field[]; | |||||
}; | |||||
struct block_request { | struct block_request { | ||||
struct peer *p; | struct peer *p; | ||||
struct net_buf *msg; | struct net_buf *msg; | ||||
@@ -26,6 +26,7 @@ mp_drop(struct meta_peer *mp, struct net *n) | |||||
assert(mp->refs > 0); | assert(mp->refs > 0); | ||||
mp->refs--; | mp->refs--; | ||||
if (mp->refs == 0) { | if (mp->refs == 0) { | ||||
assert(mp->p == NULL); | |||||
if (mp->flags & PF_ATTACHED) | if (mp->flags & PF_ATTACHED) | ||||
assert(mptbl_remove(n->mptbl, mp->id) == mp); | assert(mptbl_remove(n->mptbl, mp->id) == mp); | ||||
mp_kill(mp); | mp_kill(mp); | ||||
@@ -70,6 +71,8 @@ peer_kill(struct peer *p) | |||||
free(p->in.buf); | free(p->in.buf); | ||||
if (p->piece_field != NULL) | if (p->piece_field != NULL) | ||||
free(p->piece_field); | free(p->piece_field); | ||||
if (p->bad_field != NULL) | |||||
free(p->bad_field); | |||||
free(p); | free(p); | ||||
net_npeers--; | net_npeers--; | ||||
} | } | ||||
@@ -257,9 +260,14 @@ peer_choke(struct peer *p) | |||||
void | void | ||||
peer_want(struct peer *p, uint32_t index) | peer_want(struct peer *p, uint32_t index) | ||||
{ | { | ||||
if (!has_bit(p->piece_field, index) || peer_has_bad(p, index)) | |||||
return; | |||||
assert(p->nwant < p->npieces); | assert(p->nwant < p->npieces); | ||||
p->nwant++; | p->nwant++; | ||||
if (p->nwant == 1) { | if (p->nwant == 1) { | ||||
p->mp->flags |= PF_I_WANT; | |||||
if (p->mp->flags & PF_SUSPECT) | |||||
return; | |||||
if (p->nreqs_out == 0) { | if (p->nreqs_out == 0) { | ||||
assert((p->mp->flags & PF_DO_UNWANT) == 0); | assert((p->mp->flags & PF_DO_UNWANT) == 0); | ||||
int unsent = 0; | int unsent = 0; | ||||
@@ -272,17 +280,20 @@ peer_want(struct peer *p, uint32_t index) | |||||
assert((p->mp->flags & PF_DO_UNWANT) != 0); | assert((p->mp->flags & PF_DO_UNWANT) != 0); | ||||
p->mp->flags &= ~PF_DO_UNWANT; | p->mp->flags &= ~PF_DO_UNWANT; | ||||
} | } | ||||
p->mp->flags |= PF_I_WANT; | |||||
} | } | ||||
} | } | ||||
void | void | ||||
peer_unwant(struct peer *p, uint32_t index) | peer_unwant(struct peer *p, uint32_t index) | ||||
{ | { | ||||
if (!has_bit(p->piece_field, index) || peer_has_bad(p, index)) | |||||
return; | |||||
assert(p->nwant > 0); | assert(p->nwant > 0); | ||||
p->nwant--; | p->nwant--; | ||||
if (p->nwant == 0) { | if (p->nwant == 0) { | ||||
p->mp->flags &= ~PF_I_WANT; | p->mp->flags &= ~PF_I_WANT; | ||||
if (p->mp->flags & PF_SUSPECT) | |||||
return; | |||||
p->t_nointerest = btpd_seconds; | p->t_nointerest = btpd_seconds; | ||||
if (p->nreqs_out == 0) | if (p->nreqs_out == 0) | ||||
peer_send(p, nb_create_uninterest()); | peer_send(p, nb_create_uninterest()); | ||||
@@ -568,6 +579,8 @@ peer_on_cancel(struct peer *p, uint32_t index, uint32_t begin, | |||||
void | void | ||||
peer_on_tick(struct peer *p) | peer_on_tick(struct peer *p) | ||||
{ | { | ||||
if (p->mp->flags & PF_BANNED) | |||||
goto kill; | |||||
if (p->mp->flags & PF_ATTACHED) { | if (p->mp->flags & PF_ATTACHED) { | ||||
if (BTPDQ_EMPTY(&p->outq)) { | if (BTPDQ_EMPTY(&p->outq)) { | ||||
if (btpd_seconds - p->t_lastwrite >= 120) | if (btpd_seconds - p->t_lastwrite >= 120) | ||||
@@ -590,6 +603,52 @@ kill: | |||||
peer_kill(p); | peer_kill(p); | ||||
} | } | ||||
void | |||||
peer_bad_piece(struct peer *p, uint32_t index) | |||||
{ | |||||
if (p->npcs_bad == 0) { | |||||
assert(p->bad_field == NULL); | |||||
p->bad_field = btpd_calloc(ceil(p->n->tp->npieces / 8.0), 1); | |||||
} | |||||
assert(!has_bit(p->bad_field, index)); | |||||
set_bit(p->bad_field, index); | |||||
p->npcs_bad++; | |||||
p->suspicion++; | |||||
if (p->suspicion == 3) { | |||||
btpd_log(BTPD_L_BAD, "suspect peer %p.\n", p); | |||||
p->mp->flags |= PF_SUSPECT; | |||||
if (p->nwant > 0) { | |||||
p->mp->flags &= ~PF_DO_UNWANT; | |||||
peer_send(p, nb_create_uninterest()); | |||||
} | |||||
} | |||||
} | |||||
void | |||||
peer_good_piece(struct peer *p, uint32_t index) | |||||
{ | |||||
if (peer_has_bad(p, index)) { | |||||
assert(p->npcs_bad > 0); | |||||
p->npcs_bad--; | |||||
if (p->npcs_bad == 0) { | |||||
free(p->bad_field); | |||||
p->bad_field = NULL; | |||||
} else | |||||
clear_bit(p->bad_field, index); | |||||
} | |||||
p->suspicion = 0; | |||||
if (p->mp->flags & PF_SUSPECT) { | |||||
btpd_log(BTPD_L_BAD, "unsuspect peer %p.\n", p); | |||||
p->mp->flags &= ~PF_SUSPECT; | |||||
if (p->nwant > 0) { | |||||
assert(p->mp->flags & PF_I_WANT); | |||||
peer_send(p, nb_create_interest()); | |||||
} | |||||
if (peer_leech_ok(p)) | |||||
dl_on_download(p); | |||||
} | |||||
} | |||||
int | int | ||||
peer_chokes(struct peer *p) | peer_chokes(struct peer *p) | ||||
{ | { | ||||
@@ -602,6 +661,12 @@ peer_has(struct peer *p, uint32_t index) | |||||
return has_bit(p->piece_field, index); | return has_bit(p->piece_field, index); | ||||
} | } | ||||
int | |||||
peer_has_bad(struct peer *p, uint32_t index) | |||||
{ | |||||
return p->bad_field != NULL && has_bit(p->bad_field, index); | |||||
} | |||||
int | int | ||||
peer_laden(struct peer *p) | peer_laden(struct peer *p) | ||||
{ | { | ||||
@@ -617,7 +682,8 @@ peer_wanted(struct peer *p) | |||||
int | int | ||||
peer_leech_ok(struct peer *p) | peer_leech_ok(struct peer *p) | ||||
{ | { | ||||
return (p->mp->flags & (PF_I_WANT|PF_P_CHOKE)) == PF_I_WANT; | |||||
return (p->mp->flags & (PF_BANNED|PF_SUSPECT|PF_I_WANT|PF_P_CHOKE)) | |||||
== PF_I_WANT && !peer_laden(p); | |||||
} | } | ||||
int | int | ||||
@@ -638,3 +704,9 @@ peer_full(struct peer *p) | |||||
{ | { | ||||
return p->npieces == p->n->tp->npieces; | return p->npieces == p->n->tp->npieces; | ||||
} | } | ||||
int | |||||
peer_requestable(struct peer *p, uint32_t index) | |||||
{ | |||||
return peer_has(p, index) && !peer_has_bad(p, index); | |||||
} |
@@ -11,6 +11,8 @@ | |||||
#define PF_NO_REQUESTS 0x80 | #define PF_NO_REQUESTS 0x80 | ||||
#define PF_INCOMING 0x100 | #define PF_INCOMING 0x100 | ||||
#define PF_DO_UNWANT 0x200 | #define PF_DO_UNWANT 0x200 | ||||
#define PF_SUSPECT 0x400 | |||||
#define PF_BANNED 0x800 | |||||
#define MAXPIECEMSGS 128 | #define MAXPIECEMSGS 128 | ||||
#define MAXPIPEDREQUESTS 10 | #define MAXPIPEDREQUESTS 10 | ||||
@@ -61,9 +63,15 @@ int peer_chokes(struct peer *p); | |||||
int peer_wanted(struct peer *p); | int peer_wanted(struct peer *p); | ||||
int peer_laden(struct peer *p); | int peer_laden(struct peer *p); | ||||
int peer_has(struct peer *p, uint32_t index); | int peer_has(struct peer *p, uint32_t index); | ||||
int peer_has_bad(struct peer *p, uint32_t index); | |||||
int peer_leech_ok(struct peer *p); | int peer_leech_ok(struct peer *p); | ||||
int peer_full(struct peer *p); | int peer_full(struct peer *p); | ||||
void peer_bad_piece(struct peer *p, uint32_t index); | |||||
void peer_good_piece(struct peer *p, uint32_t index); | |||||
int peer_requestable(struct peer *p, uint32_t index); | |||||
void mp_hold(struct meta_peer *mp); | |||||
void mp_drop(struct meta_peer *mp, struct net *n); | |||||
void mp_kill(struct meta_peer *mp); | void mp_kill(struct meta_peer *mp); | ||||
#endif | #endif |
@@ -83,6 +83,8 @@ logtype_str(uint32_t type) | |||||
case BTPD_L_CONN: return "conn"; | case BTPD_L_CONN: return "conn"; | ||||
case BTPD_L_TR: return "tracker"; | case BTPD_L_TR: return "tracker"; | ||||
case BTPD_L_MSG: return "msg"; | case BTPD_L_MSG: return "msg"; | ||||
case BTPD_L_POL: return "policy"; | |||||
case BTPD_L_BAD: return "bad"; | |||||
} | } | ||||
return ""; | return ""; | ||||
} | } | ||||