Procházet zdrojové kódy

There's now one list with requests per piece instead of one per block. The

network buffers for requests are now allocated as they're needed. Before
they were allocated at the same time as their corresponding piece. This
lowers btpd's memory consumption.
master
Richard Nyberg před 19 roky
rodič
revize
9f4070eeb8
5 změnil soubory, kde provedl 90 přidání a 79 odebrání
  1. +15
    -13
      btpd/download.c
  2. +63
    -50
      btpd/download_subr.c
  3. +3
    -8
      btpd/net_types.h
  4. +8
    -7
      btpd/peer.c
  5. +1
    -1
      btpd/peer.h

+ 15
- 13
btpd/download.c Zobrazit soubor

@@ -155,36 +155,38 @@ dl_on_block(struct peer *p, struct block_request *req,
uint32_t index, uint32_t begin, uint32_t length, const uint8_t *data)
{
struct net *n = p->n;
struct block *blk = req->blk;
struct piece *pc = blk->pc;
struct piece *pc = dl_find_piece(n, index);

cm_put_bytes(p->n->tp, index, begin, data, length);
pc->ngot++;

if (n->endgame) {
struct block_request *req;
struct block_request *req, *next;
struct net_buf *cancel = nb_create_cancel(index, begin, length);
nb_hold(cancel);
BTPDQ_FOREACH(req, &blk->reqs, blk_entry) {
if (req->p != p)
peer_cancel(req->p, req, cancel);
pc->nreqs--;
BTPDQ_FOREACH(req, &pc->reqs, blk_entry) {
if (nb_get_begin(req->msg) == begin) {
if (req->p != p)
peer_cancel(req->p, req, cancel);
pc->nreqs--;
}
}
nb_drop(cancel);
dl_piece_reorder_eg(pc);
req = BTPDQ_FIRST(&blk->reqs);
while (req != NULL) {
struct block_request *next = BTPDQ_NEXT(req, blk_entry);
BTPDQ_FOREACH_MUTABLE(req, &pc->reqs, blk_entry, next) {
if (nb_get_begin(req->msg) != begin)
continue;
BTPDQ_REMOVE(&pc->reqs, req, blk_entry);
nb_drop(req->msg);
if (peer_leech_ok(req->p) && !peer_laden(req->p))
dl_assign_requests_eg(req->p);
free(req);
req = next;
}
BTPDQ_INIT(&blk->reqs);
if (pc->ngot == pc->nblocks)
cm_test_piece(pc->n->tp, pc->index);
} else {
BTPDQ_REMOVE(&blk->reqs, req, blk_entry);
BTPDQ_REMOVE(&pc->reqs, req, blk_entry);
nb_drop(req->msg);
free(req);
pc->nreqs--;
// XXX: Needs to be looked at if we introduce snubbing.


+ 63
- 50
btpd/download_subr.c Zobrazit soubor

@@ -35,13 +35,12 @@ piece_alloc(struct net *n, uint32_t index)
assert(!has_bit(n->busy_field, index)
&& n->npcs_busy < n->tp->meta.npieces);
struct piece *pc;
size_t mem, field, blocks;
size_t mem, field;
unsigned nblocks;

nblocks = torrent_piece_blocks(n->tp, index);
blocks = sizeof(pc->blocks[0]) * nblocks;
field = (size_t)ceil(nblocks / 8.0);
mem = sizeof(*pc) + field + blocks;
mem = sizeof(*pc) + field;

pc = btpd_calloc(1, mem);
pc->n = n;
@@ -59,16 +58,7 @@ piece_alloc(struct net *n, uint32_t index)
pc->ngot++;
assert(pc->ngot < pc->nblocks);

pc->blocks = (struct block *)(pc->down_field + field);
for (unsigned i = 0; i < nblocks; i++) {
uint32_t start = i * PIECE_BLOCKLEN;
uint32_t len = torrent_block_size(n->tp, index, nblocks, i);
struct block *blk = &pc->blocks[i];
blk->pc = pc;
BTPDQ_INIT(&blk->reqs);
blk->msg = nb_create_request(index, start, len);
nb_hold(blk->msg);
}
BTPDQ_INIT(&pc->reqs);

n->npcs_busy++;
set_bit(n->busy_field, index);
@@ -80,18 +70,20 @@ void
piece_free(struct piece *pc)
{
struct net *n = pc->n;
struct block_request *req, *next;
assert(n->npcs_busy > 0);
n->npcs_busy--;
clear_bit(n->busy_field, pc->index);
BTPDQ_REMOVE(&pc->n->getlst, pc, entry);
for (unsigned i = 0; i < pc->nblocks; i++) {
struct block_request *req = BTPDQ_FIRST(&pc->blocks[i].reqs);
while (req != NULL) {
struct block_request *next = BTPDQ_NEXT(req, blk_entry);
free(req);
req = next;
}
nb_drop(pc->blocks[i].msg);
BTPDQ_FOREACH_MUTABLE(req, &pc->reqs, blk_entry, next) {
nb_drop(req->msg);
free(req);
}
if (pc->eg_reqs != NULL) {
for (uint32_t i = 0; i < pc->nblocks; i++)
if (pc->eg_reqs[i] != NULL)
nb_drop(pc->eg_reqs[i]);
free(pc->eg_reqs);
}
free(pc);
}
@@ -161,9 +153,18 @@ dl_enter_endgame(struct net *n)

pi = 0;
BTPDQ_FOREACH(pc, &n->getlst, entry) {
struct block_request *req;
for (unsigned i = 0; i < pc->nblocks; i++)
clear_bit(pc->down_field, i);
pc->nbusy = 0;
pc->eg_reqs = btpd_calloc(pc->nblocks, sizeof(struct net_buf *));
BTPDQ_FOREACH(req, &pc->reqs, blk_entry) {
uint32_t blki = nb_get_begin(req->msg) / PIECE_BLOCKLEN;
if (pc->eg_reqs[blki] == NULL) {
pc->eg_reqs[blki] = req->msg;
nb_hold(req->msg);
}
}
pcs[pi] = pc;
pi++;
}
@@ -308,6 +309,30 @@ dl_on_piece_unfull(struct piece *pc)
#define INCNEXTBLOCK(pc) \
(pc)->next_block = ((pc)->next_block + 1) % (pc)->nblocks

static struct block_request *
dl_new_request(struct peer *p, struct piece *pc, struct net_buf *msg)
{
if (msg == NULL) {
uint32_t block = pc->next_block;
uint32_t start = block * PIECE_BLOCKLEN;
uint32_t length =
torrent_block_size(pc->n->tp, pc->index, pc->nblocks, block);
msg = nb_create_request(pc->index, start, length);
}
struct block_request *req = btpd_malloc(sizeof(*req));
req->p = p;
req->msg = msg;
nb_hold(req->msg);
BTPDQ_INSERT_TAIL(&pc->reqs, req, blk_entry);
pc->nreqs++;
if (!pc->n->endgame) {
set_bit(pc->down_field, pc->next_block);
pc->nbusy++;
}
INCNEXTBLOCK(pc);
peer_request(p, req);
return req;
}

/*
* Request as many blocks as possible on this piece from
@@ -324,20 +349,8 @@ dl_piece_assign_requests(struct piece *pc, struct peer *p)
while ((has_bit(pc->have_field, pc->next_block)
|| has_bit(pc->down_field, pc->next_block)))
INCNEXTBLOCK(pc);

struct block *blk = &pc->blocks[pc->next_block];
struct block_request *req = btpd_malloc(sizeof(*req));
req->p = p;
req->blk = blk;
BTPDQ_INSERT_TAIL(&blk->reqs, req, blk_entry);

peer_request(p, req);

set_bit(pc->down_field, pc->next_block);
pc->nbusy++;
pc->nreqs++;
dl_new_request(p, pc, NULL);
count++;
INCNEXTBLOCK(pc);
} while (!piece_full(pc) && !peer_laden(p));

if (piece_full(pc))
@@ -392,25 +405,25 @@ dl_unassign_requests(struct peer *p)
{
while (p->nreqs_out > 0) {
struct block_request *req = BTPDQ_FIRST(&p->my_reqs);
struct piece *pc = req->blk->pc;
struct piece *pc = dl_find_piece(p->n, nb_get_index(req->msg));
int was_full = piece_full(pc);

while (req != NULL) {
struct block_request *next = BTPDQ_NEXT(req, p_entry);

uint32_t blki = nb_get_begin(req->blk->msg) / PIECE_BLOCKLEN;
struct block *blk = req->blk;
uint32_t blki = nb_get_begin(req->msg) / PIECE_BLOCKLEN;
// XXX: Needs to be looked at if we introduce snubbing.
assert(has_bit(pc->down_field, blki));
clear_bit(pc->down_field, blki);
pc->nbusy--;
BTPDQ_REMOVE(&p->my_reqs, req, p_entry);
p->nreqs_out--;
BTPDQ_REMOVE(&blk->reqs, req, blk_entry);
BTPDQ_REMOVE(&pc->reqs, req, blk_entry);
nb_drop(req->msg);
free(req);
pc->nreqs--;

while (next != NULL && next->blk->pc != pc)
while (next != NULL && nb_get_index(next->msg) != pc->index)
next = BTPDQ_NEXT(next, p_entry);
req = next;
}
@@ -430,17 +443,16 @@ dl_piece_assign_requests_eg(struct piece *pc, struct peer *p)
unsigned first_block = pc->next_block;
do {
if ((has_bit(pc->have_field, pc->next_block)
|| peer_requested(p, &pc->blocks[pc->next_block]))) {
|| peer_requested(p, pc->index, pc->next_block))) {
INCNEXTBLOCK(pc);
continue;
}
struct block_request *req = btpd_calloc(1, sizeof(*req));
req->blk = &pc->blocks[pc->next_block];
req->p = p;
BTPDQ_INSERT_TAIL(&pc->blocks[pc->next_block].reqs, req, blk_entry);
pc->nreqs++;
INCNEXTBLOCK(pc);
peer_request(p, req);
struct block_request *req =
dl_new_request(p, pc, pc->eg_reqs[pc->next_block]);
if (pc->eg_reqs[pc->next_block] == NULL) {
pc->eg_reqs[pc->next_block] = req->msg;
nb_hold(req->msg);
}
} while (!peer_laden(p) && pc->next_block != first_block);
}

@@ -482,7 +494,7 @@ dl_unassign_requests_eg(struct peer *p)
while (p->nreqs_out > 0) {
req = BTPDQ_FIRST(&p->my_reqs);

pc = req->blk->pc;
pc = dl_find_piece(p->n, nb_get_index(req->msg));
BTPDQ_REMOVE(&pc->n->getlst, pc, entry);
BTPDQ_INSERT_HEAD(&tmp, pc, entry);

@@ -490,11 +502,12 @@ dl_unassign_requests_eg(struct peer *p)
struct block_request *next = BTPDQ_NEXT(req, p_entry);
BTPDQ_REMOVE(&p->my_reqs, req, p_entry);
p->nreqs_out--;
BTPDQ_REMOVE(&req->blk->reqs, req, blk_entry);
BTPDQ_REMOVE(&pc->reqs, req, blk_entry);
nb_drop(req->msg);
free(req);
pc->nreqs--;

while (next != NULL && next->blk->pc != pc)
while (next != NULL && nb_get_index(next->msg) != pc->index)
next = BTPDQ_NEXT(next, p_entry);
req = next;
}


+ 3
- 8
btpd/net_types.h Zobrazit soubor

@@ -90,7 +90,8 @@ struct piece {
unsigned nbusy;
unsigned next_block;

struct block *blocks;
struct net_buf **eg_reqs;
struct block_request_tq reqs;

const uint8_t *have_field;
uint8_t *down_field;
@@ -98,15 +99,9 @@ struct piece {
BTPDQ_ENTRY(piece) entry;
};

struct block {
struct piece *pc;
struct net_buf *msg;
struct block_request_tq reqs;
};

struct block_request {
struct peer *p;
struct block *blk;
struct net_buf *msg;
BTPDQ_ENTRY(block_request) p_entry;
BTPDQ_ENTRY(block_request) blk_entry;
};


+ 8
- 7
btpd/peer.c Zobrazit soubor

@@ -160,15 +160,16 @@ peer_request(struct peer *p, struct block_request *req)
assert(p->nreqs_out < MAXPIPEDREQUESTS);
p->nreqs_out++;
BTPDQ_INSERT_TAIL(&p->my_reqs, req, p_entry);
peer_send(p, req->blk->msg);
peer_send(p, req->msg);
}

int
peer_requested(struct peer *p, struct block *blk)
peer_requested(struct peer *p, uint32_t piece, uint32_t block)
{
uint32_t begin = block * PIECE_BLOCKLEN;
struct block_request *req;
BTPDQ_FOREACH(req, &p->my_reqs, p_entry)
if (req->blk == blk)
if (nb_get_index(req->msg) == piece && nb_get_begin(req->msg) == begin)
return 1;
return 0;
}
@@ -182,7 +183,7 @@ peer_cancel(struct peer *p, struct block_request *req, struct net_buf *nb)
int removed = 0;
struct nb_link *nl;
BTPDQ_FOREACH(nl, &p->outq, entry) {
if (nl->nb == req->blk->msg) {
if (nl->nb == req->msg) {
removed = peer_unsend(p, nl);
break;
}
@@ -448,9 +449,9 @@ peer_on_piece(struct peer *p, uint32_t index, uint32_t begin,
{
struct block_request *req;
BTPDQ_FOREACH(req, &p->my_reqs, p_entry)
if ((nb_get_begin(req->blk->msg) == begin &&
nb_get_index(req->blk->msg) == index &&
nb_get_length(req->blk->msg) == length))
if ((nb_get_begin(req->msg) == begin &&
nb_get_index(req->msg) == index &&
nb_get_length(req->msg) == length))
break;
if (req != NULL) {
btpd_log(BTPD_L_MSG, "received piece(%u,%u,%u) from %p\n",


+ 1
- 1
btpd/peer.h Zobrazit soubor

@@ -29,7 +29,7 @@ void peer_request(struct peer *p, struct block_request *req);
void peer_cancel(struct peer *p, struct block_request *req,
struct net_buf *nb);

int peer_requested(struct peer *p, struct block *blk);
int peer_requested(struct peer *p, uint32_t piece, uint32_t block);

void peer_create_in(int sd);
void peer_create_out(struct net *n, const uint8_t *id,


Načítá se…
Zrušit
Uložit