A clone of btpd with my configuration changes.

349 line
6.6 KiB

  1. #include <sys/types.h>
  2. #include <sys/socket.h>
  3. #include <netinet/in.h>
  4. #include <math.h>
  5. #include <string.h>
  6. #include <unistd.h>
  7. #include "btpd.h"
  8. unsigned long
  9. peer_get_rate(unsigned long *rates)
  10. {
  11. unsigned long ret = 0;
  12. for (int i = 0; i < RATEHISTORY; i++)
  13. ret += rates[i];
  14. return ret;
  15. }
  16. void
  17. peer_kill(struct peer *p)
  18. {
  19. struct nb_link *nl;
  20. struct piece_req *req;
  21. btpd_log(BTPD_L_CONN, "killed peer.\n");
  22. if (p->flags & PF_ATTACHED)
  23. cm_on_lost_peer(p);
  24. else
  25. BTPDQ_REMOVE(&btpd.unattached, p, cm_entry);
  26. if (p->flags & PF_ON_READQ)
  27. BTPDQ_REMOVE(&btpd.readq, p, rq_entry);
  28. if (p->flags & PF_ON_WRITEQ)
  29. BTPDQ_REMOVE(&btpd.writeq, p, wq_entry);
  30. close(p->sd);
  31. event_del(&p->in_ev);
  32. event_del(&p->out_ev);
  33. nl = BTPDQ_FIRST(&p->outq);
  34. while (nl != NULL) {
  35. struct nb_link *next = BTPDQ_NEXT(nl, entry);
  36. nb_drop(nl->nb);
  37. free(nl);
  38. nl = next;
  39. }
  40. req = BTPDQ_FIRST(&p->my_reqs);
  41. while (req != NULL) {
  42. struct piece_req *next = BTPDQ_NEXT(req, entry);
  43. free(req);
  44. req = next;
  45. }
  46. p->reader->kill(p->reader);
  47. if (p->piece_field != NULL)
  48. free(p->piece_field);
  49. free(p);
  50. btpd.npeers--;
  51. }
  52. void
  53. peer_request(struct peer *p, uint32_t index, uint32_t begin, uint32_t len)
  54. {
  55. p->nreqs_out++;
  56. struct piece_req *req = btpd_calloc(1, sizeof(*req));
  57. req->index = index;
  58. req->begin = begin;
  59. req->length = len;
  60. BTPDQ_INSERT_TAIL(&p->my_reqs, req, entry);
  61. net_send_request(p, req);
  62. }
  63. void
  64. peer_cancel(struct peer *p, uint32_t index, uint32_t begin, uint32_t len)
  65. {
  66. struct piece_req *req;
  67. again:
  68. req = BTPDQ_FIRST(&p->my_reqs);
  69. while (req != NULL &&
  70. !(index == req->index &&
  71. begin == req->begin &&
  72. len == req->length))
  73. req = BTPDQ_NEXT(req, entry);
  74. if (req != NULL) {
  75. net_send_cancel(p, req);
  76. BTPDQ_REMOVE(&p->my_reqs, req, entry);
  77. free(req);
  78. p->nreqs_out--;
  79. goto again;
  80. }
  81. }
  82. void
  83. peer_have(struct peer *p, uint32_t index)
  84. {
  85. net_send_have(p, index);
  86. }
  87. void
  88. peer_unchoke(struct peer *p)
  89. {
  90. p->flags &= ~PF_I_CHOKE;
  91. net_send_unchoke(p);
  92. }
  93. void
  94. peer_choke(struct peer *p)
  95. {
  96. struct nb_link *nl = BTPDQ_FIRST(&p->outq);
  97. while (nl != NULL) {
  98. struct nb_link *next = BTPDQ_NEXT(nl, entry);
  99. if (nl->nb->info.type == NB_PIECE) {
  100. struct nb_link *data = next;
  101. next = BTPDQ_NEXT(next, entry);
  102. if (net_unsend(p, nl))
  103. net_unsend(p, data);
  104. }
  105. nl = next;
  106. }
  107. p->flags |= PF_I_CHOKE;
  108. net_send_choke(p);
  109. }
  110. void
  111. peer_want(struct peer *p, uint32_t index)
  112. {
  113. assert(p->nwant < p->npieces);
  114. p->nwant++;
  115. if (p->nwant == 1) {
  116. p->flags |= PF_I_WANT;
  117. net_send_interest(p);
  118. }
  119. }
  120. void
  121. peer_unwant(struct peer *p, uint32_t index)
  122. {
  123. assert(p->nwant > 0);
  124. p->nwant--;
  125. if (p->nwant == 0) {
  126. p->flags &= ~PF_I_WANT;
  127. net_send_uninterest(p);
  128. }
  129. }
  130. static struct peer *
  131. peer_create_common(int sd)
  132. {
  133. struct peer *p = btpd_calloc(1, sizeof(*p));
  134. p->sd = sd;
  135. p->flags = PF_I_CHOKE | PF_P_CHOKE;
  136. BTPDQ_INIT(&p->my_reqs);
  137. BTPDQ_INIT(&p->outq);
  138. event_set(&p->out_ev, p->sd, EV_WRITE, net_write_cb, p);
  139. event_set(&p->in_ev, p->sd, EV_READ, net_read_cb, p);
  140. event_add(&p->in_ev, NULL);
  141. BTPDQ_INSERT_TAIL(&btpd.unattached, p, cm_entry);
  142. btpd.npeers++;
  143. return p;
  144. }
  145. void
  146. peer_create_in(int sd)
  147. {
  148. struct peer *p = peer_create_common(sd);
  149. net_handshake(p, 1);
  150. }
  151. void
  152. peer_create_out(struct torrent *tp, const uint8_t *id,
  153. const char *ip, int port)
  154. {
  155. int sd;
  156. struct peer *p;
  157. if (net_connect(ip, port, &sd) != 0)
  158. return;
  159. p = peer_create_common(sd);
  160. p->tp = tp;
  161. net_handshake(p, 0);
  162. }
  163. void
  164. peer_create_out_compact(struct torrent *tp, const char *compact)
  165. {
  166. int sd;
  167. struct peer *p;
  168. struct sockaddr_in addr;
  169. addr.sin_family = AF_INET;
  170. addr.sin_addr.s_addr = *(long *)compact;
  171. addr.sin_port = *(short *)(compact + 4);
  172. if (net_connect2((struct sockaddr *)&addr, sizeof(addr), &sd) != 0)
  173. return;
  174. p = peer_create_common(sd);
  175. p->tp = tp;
  176. net_handshake(p, 0);
  177. }
  178. void
  179. peer_on_choke(struct peer *p)
  180. {
  181. if ((p->flags & PF_P_CHOKE) != 0)
  182. return;
  183. else {
  184. p->flags |= PF_P_CHOKE;
  185. cm_on_choke(p);
  186. }
  187. }
  188. void
  189. peer_on_unchoke(struct peer *p)
  190. {
  191. if ((p->flags & PF_P_CHOKE) == 0)
  192. return;
  193. else {
  194. p->flags &= ~PF_P_CHOKE;
  195. cm_on_unchoke(p);
  196. }
  197. }
  198. void
  199. peer_on_interest(struct peer *p)
  200. {
  201. if ((p->flags & PF_P_WANT) != 0)
  202. return;
  203. else {
  204. p->flags |= PF_P_WANT;
  205. cm_on_interest(p);
  206. }
  207. }
  208. void
  209. peer_on_uninterest(struct peer *p)
  210. {
  211. if ((p->flags & PF_P_WANT) == 0)
  212. return;
  213. else {
  214. p->flags &= ~PF_P_WANT;
  215. cm_on_uninterest(p);
  216. }
  217. }
  218. void
  219. peer_on_have(struct peer *p, uint32_t index)
  220. {
  221. if (!has_bit(p->piece_field, index)) {
  222. set_bit(p->piece_field, index);
  223. p->npieces++;
  224. cm_on_piece_ann(p, index);
  225. }
  226. }
  227. void
  228. peer_on_bitfield(struct peer *p, uint8_t *field)
  229. {
  230. assert(p->npieces == 0);
  231. bcopy(field, p->piece_field, (size_t)ceil(p->tp->meta.npieces / 8.0));
  232. for (uint32_t i = 0; i < p->tp->meta.npieces; i++) {
  233. if (has_bit(p->piece_field, i)) {
  234. p->npieces++;
  235. cm_on_piece_ann(p, i);
  236. }
  237. }
  238. }
  239. void
  240. peer_on_piece(struct peer *p, uint32_t index, uint32_t begin,
  241. uint32_t length, const char *data)
  242. {
  243. struct piece_req *req = BTPDQ_FIRST(&p->my_reqs);
  244. if (req != NULL &&
  245. req->index == index &&
  246. req->begin == begin &&
  247. req->length == length) {
  248. assert(p->nreqs_out > 0);
  249. p->nreqs_out--;
  250. BTPDQ_REMOVE(&p->my_reqs, req, entry);
  251. free(req);
  252. cm_on_block(p, index, begin, length, data);
  253. }
  254. }
  255. void
  256. peer_on_request(struct peer *p, uint32_t index, uint32_t begin,
  257. uint32_t length)
  258. {
  259. off_t cbegin = index * p->tp->meta.piece_length + begin;
  260. char * content = torrent_get_bytes(p->tp, cbegin, length);
  261. net_send_piece(p, index, begin, content, length);
  262. }
  263. void
  264. peer_on_cancel(struct peer *p, uint32_t index, uint32_t begin,
  265. uint32_t length)
  266. {
  267. struct nb_link *nl;
  268. BTPDQ_FOREACH(nl, &p->outq, entry)
  269. if (nl->nb->info.type == NB_PIECE
  270. && nl->nb->info.index == index
  271. && nl->nb->info.begin == begin
  272. && nl->nb->info.length == length) {
  273. struct nb_link *data = BTPDQ_NEXT(nl, entry);
  274. if (net_unsend(p, nl))
  275. net_unsend(p, data);
  276. break;
  277. }
  278. }
  279. int
  280. peer_chokes(struct peer *p)
  281. {
  282. return p->flags & PF_P_CHOKE;
  283. }
  284. int
  285. peer_has(struct peer *p, uint32_t index)
  286. {
  287. return has_bit(p->piece_field, index);
  288. }
  289. int
  290. peer_laden(struct peer *p)
  291. {
  292. return p->nreqs_out >= MAXPIPEDREQUESTS;
  293. }
  294. int
  295. peer_wanted(struct peer *p)
  296. {
  297. return (p->flags & PF_I_WANT) == PF_I_WANT;
  298. }
  299. int
  300. peer_leech_ok(struct peer *p)
  301. {
  302. return (p->flags & (PF_I_WANT|PF_P_CHOKE)) == PF_I_WANT;
  303. }