diff --git a/btpd/policy_subr.c b/btpd/policy_subr.c
index 6d17522..8f8e88e 100644
--- a/btpd/policy_subr.c
+++ b/btpd/policy_subr.c
@@ -48,7 +48,7 @@ piece_alloc(struct torrent *tp, uint32_t index)
     pc->down_field = (uint8_t *)(pc + 1);
     pc->have_field =
 	tp->block_field +
-	(size_t)ceil(index * tp->meta.piece_length / (double)(1 << 17));
+	index * (size_t)ceil(tp->meta.piece_length / (double)(1 << 17));
     pc->nblocks = nblocks;
     pc->index = index;
 
@@ -254,6 +254,24 @@ cm_choose_rarest(struct peer *p, uint32_t *res)
     return 0;
 }
 
+/*
+ * Called from either cm_piece_assign_requests or cm_new_piece, 
+ * when a pice becomes full. The wanted level of the peers
+ * that has this piece will be decreased. This function is
+ * the only one that may trigger end game.
+ */
+static void
+cm_on_piece_full(struct piece *pc)
+{
+    struct peer *p;
+    BTPDQ_FOREACH(p, &pc->tp->peers, cm_entry) {
+	if (peer_has(p, pc->index))
+	    peer_unwant(p, pc->index);
+    }
+    if (cm_should_enter_endgame(pc->tp))
+	cm_enter_endgame(pc->tp);
+}
+
 /*
  * Allocate the piece indicated by the index for download.
  * There's a small possibility that a piece is fully downloaded
@@ -269,6 +287,7 @@ cm_new_piece(struct torrent *tp, uint32_t index)
     btpd_log(BTPD_L_POL, "Started on piece %u.\n", index);
     struct piece *pc = piece_alloc(tp, index);
     if (pc->ngot == pc->nblocks) {
+	cm_on_piece_full(pc);
 	cm_on_piece(pc);
 	if (cm_should_enter_endgame(tp))
 	    cm_enter_endgame(tp);
@@ -277,25 +296,6 @@ cm_new_piece(struct torrent *tp, uint32_t index)
 	return pc;
 }
 
-/*
- * Called from either cm_piece_assign_requests or cm_new_piece, 
- * when a pice becomes full. The wanted level of the peers
- * that has this piece will be decreased. This function is
- * the only one that may trigger end game.
- */
-static void
-cm_on_piece_full(struct piece *pc)
-{
-    struct peer *p;
-    BTPDQ_FOREACH(p, &pc->tp->peers, cm_entry) {
-	if (peer_has(p, pc->index))
-	    peer_unwant(p, pc->index);
-    }
-    if (cm_should_enter_endgame(pc->tp))
-	cm_enter_endgame(pc->tp);
-}
-
-
 /*
  * Called when a previously full piece loses a peer.
  * This is needed because we have decreased the wanted
@@ -360,7 +360,7 @@ cm_piece_assign_requests(struct piece *pc, struct peer *p)
 unsigned
 cm_assign_requests(struct peer *p)
 {
-    assert(!p->tp->endgame);
+    assert(!p->tp->endgame && !peer_laden(p));
     struct piece *pc;
     struct torrent *tp = p->tp;
     unsigned count = 0;
diff --git a/btpd/torrent.c b/btpd/torrent.c
index 88553f8..d715346 100644
--- a/btpd/torrent.c
+++ b/btpd/torrent.c
@@ -94,7 +94,7 @@ torrent_load2(const char *file, struct metainfo *mi)
 
     memsiz =
 	ceil(mi->npieces / 8.0) +
-	ceil(mi->npieces * mi->piece_length / (double)(1 << 17));
+	mi->npieces * ceil(mi->piece_length / (double)(1 << 17));
 
     if (sb.st_size != memsiz) {
 	btpd_log(BTPD_L_ERROR, "File has wrong size: %s.i.\n", file);
diff --git a/cli/btcli.c b/cli/btcli.c
index 874ef6a..6b5d8b1 100644
--- a/cli/btcli.c
+++ b/cli/btcli.c
@@ -138,9 +138,8 @@ gen_ifile(char *path)
     if ((errno = vopen(&fd, O_WRONLY|O_CREAT, "%s.i", path)) != 0)
 	err(1, "opening %s.i", path);
 
-    if (ftruncate(fd,
-            field_len +
-	    (off_t)ceil(mi->npieces * mi->piece_length / (double)(1<<17))) < 0)
+    if (ftruncate(fd, field_len + mi->npieces *
+	    (off_t)ceil(mi->piece_length / (double)(1 << 17))) < 0)
 	err(1, "ftruncate: %s", path);
 
     if (write(fd, cb.piece_field, field_len) != field_len)