A clone of btpd with my configuration changes.

733 satır
19 KiB

  1. #include "btpd.h"
  2. #include <sys/uio.h>
  3. #include <netdb.h>
  4. static unsigned long m_bw_bytes_in;
  5. static unsigned long m_bw_bytes_out;
  6. static unsigned long m_rate_up;
  7. static unsigned long m_rate_dwn;
  8. struct net_listener {
  9. int sd;
  10. struct fdev ev;
  11. };
  12. static int m_nlisteners;
  13. static struct net_listener *m_net_listeners;
  14. unsigned net_npeers;
  15. struct peer_tq net_bw_readq = BTPDQ_HEAD_INITIALIZER(net_bw_readq);
  16. struct peer_tq net_bw_writeq = BTPDQ_HEAD_INITIALIZER(net_bw_writeq);
  17. struct peer_tq net_unattached = BTPDQ_HEAD_INITIALIZER(net_unattached);
  18. int
  19. net_torrent_has_peer(struct net *n, const uint8_t *id)
  20. {
  21. int has = 0;
  22. struct peer *p = BTPDQ_FIRST(&n->peers);
  23. while (p != NULL) {
  24. if (bcmp(p->id, id, 20) == 0) {
  25. has = 1;
  26. break;
  27. }
  28. p = BTPDQ_NEXT(p, p_entry);
  29. }
  30. return has;
  31. }
  32. void
  33. net_create(struct torrent *tp)
  34. {
  35. size_t field_size = ceil(tp->npieces / 8.0);
  36. size_t mem = sizeof(*(tp->net)) + field_size +
  37. tp->npieces * sizeof(*(tp->net->piece_count));
  38. struct net *n = btpd_calloc(1, mem);
  39. n->tp = tp;
  40. tp->net = n;
  41. BTPDQ_INIT(&n->getlst);
  42. n->busy_field = (uint8_t *)(n + 1);
  43. n->piece_count = (unsigned *)(n->busy_field + field_size);
  44. }
  45. void
  46. net_kill(struct torrent *tp)
  47. {
  48. free(tp->net);
  49. tp->net = NULL;
  50. }
  51. void
  52. net_start(struct torrent *tp)
  53. {
  54. struct net *n = tp->net;
  55. n->active = 1;
  56. }
  57. void
  58. net_stop(struct torrent *tp)
  59. {
  60. struct net *n = tp->net;
  61. n->active = 0;
  62. n->rate_up = 0;
  63. n->rate_dwn = 0;
  64. ul_on_lost_torrent(n);
  65. struct piece *pc;
  66. while ((pc = BTPDQ_FIRST(&n->getlst)) != NULL)
  67. piece_free(pc);
  68. BTPDQ_INIT(&n->getlst);
  69. struct peer *p = BTPDQ_FIRST(&net_unattached);
  70. while (p != NULL) {
  71. struct peer *next = BTPDQ_NEXT(p, p_entry);
  72. if (p->n == n)
  73. peer_kill(p);
  74. p = next;
  75. }
  76. p = BTPDQ_FIRST(&n->peers);
  77. while (p != NULL) {
  78. struct peer *next = BTPDQ_NEXT(p, p_entry);
  79. peer_kill(p);
  80. p = next;
  81. }
  82. }
  83. int
  84. net_active(struct torrent *tp)
  85. {
  86. return tp->net->active;
  87. }
  88. #define BLOCK_MEM_COUNT 1
  89. static unsigned long
  90. net_write(struct peer *p, unsigned long wmax)
  91. {
  92. struct nb_link *nl;
  93. struct iovec iov[IOV_MAX];
  94. int niov;
  95. int limited;
  96. ssize_t nwritten;
  97. unsigned long bcount;
  98. int block_count = 0;
  99. limited = wmax > 0;
  100. niov = 0;
  101. assert((nl = BTPDQ_FIRST(&p->outq)) != NULL);
  102. if (nl->nb->type == NB_TORRENTDATA)
  103. block_count = 1;
  104. while ((niov < IOV_MAX && nl != NULL
  105. && (!limited || (limited && wmax > 0)))) {
  106. if (nl->nb->type == NB_PIECE) {
  107. if (block_count >= BLOCK_MEM_COUNT)
  108. break;
  109. struct net_buf *tdata = BTPDQ_NEXT(nl, entry)->nb;
  110. if (tdata->buf == NULL) {
  111. if (nb_torrentdata_fill(tdata, p->n->tp, nb_get_index(nl->nb),
  112. nb_get_begin(nl->nb), nb_get_length(nl->nb)) != 0) {
  113. peer_kill(p);
  114. return 0;
  115. }
  116. }
  117. block_count++;
  118. }
  119. if (niov > 0) {
  120. iov[niov].iov_base = nl->nb->buf;
  121. iov[niov].iov_len = nl->nb->len;
  122. } else {
  123. iov[niov].iov_base = nl->nb->buf + p->outq_off;
  124. iov[niov].iov_len = nl->nb->len - p->outq_off;
  125. }
  126. if (limited) {
  127. if (iov[niov].iov_len > wmax)
  128. iov[niov].iov_len = wmax;
  129. wmax -= iov[niov].iov_len;
  130. }
  131. niov++;
  132. nl = BTPDQ_NEXT(nl, entry);
  133. }
  134. nwritten = writev(p->sd, iov, niov);
  135. if (nwritten < 0) {
  136. if (errno == EAGAIN) {
  137. p->t_wantwrite = btpd_seconds;
  138. return 0;
  139. } else {
  140. btpd_log(BTPD_L_CONN, "write error: %s\n", strerror(errno));
  141. peer_kill(p);
  142. return 0;
  143. }
  144. } else if (nwritten == 0) {
  145. btpd_log(BTPD_L_CONN, "connection closed by peer.\n");
  146. peer_kill(p);
  147. return 0;
  148. }
  149. bcount = nwritten;
  150. nl = BTPDQ_FIRST(&p->outq);
  151. while (bcount > 0) {
  152. unsigned long bufdelta = nl->nb->len - p->outq_off;
  153. if (bcount >= bufdelta) {
  154. peer_sent(p, nl->nb);
  155. if (nl->nb->type == NB_TORRENTDATA) {
  156. p->n->uploaded += bufdelta;
  157. p->count_up += bufdelta;
  158. }
  159. bcount -= bufdelta;
  160. BTPDQ_REMOVE(&p->outq, nl, entry);
  161. nb_drop(nl->nb);
  162. free(nl);
  163. p->outq_off = 0;
  164. nl = BTPDQ_FIRST(&p->outq);
  165. } else {
  166. if (nl->nb->type == NB_TORRENTDATA) {
  167. p->n->uploaded += bcount;
  168. p->count_up += bcount;
  169. }
  170. p->outq_off += bcount;
  171. bcount = 0;
  172. }
  173. }
  174. if (!BTPDQ_EMPTY(&p->outq))
  175. p->t_wantwrite = btpd_seconds;
  176. else
  177. btpd_ev_disable(&p->ioev, EV_WRITE);
  178. p->t_lastwrite = btpd_seconds;
  179. return nwritten;
  180. }
  181. static int
  182. net_dispatch_msg(struct peer *p, const char *buf)
  183. {
  184. uint32_t index, begin, length;
  185. int res = 0;
  186. switch (p->in.msg_num) {
  187. case MSG_CHOKE:
  188. peer_on_choke(p);
  189. break;
  190. case MSG_UNCHOKE:
  191. peer_on_unchoke(p);
  192. break;
  193. case MSG_INTEREST:
  194. peer_on_interest(p);
  195. break;
  196. case MSG_UNINTEREST:
  197. peer_on_uninterest(p);
  198. break;
  199. case MSG_HAVE:
  200. peer_on_have(p, dec_be32(buf));
  201. break;
  202. case MSG_BITFIELD:
  203. if (p->npieces == 0)
  204. peer_on_bitfield(p, buf);
  205. else
  206. res = 1;
  207. break;
  208. case MSG_REQUEST:
  209. if ((p->flags & (PF_P_WANT|PF_I_CHOKE)) == PF_P_WANT) {
  210. index = dec_be32(buf);
  211. begin = dec_be32(buf + 4);
  212. length = dec_be32(buf + 8);
  213. if ((length > PIECE_BLOCKLEN
  214. || index >= p->n->tp->npieces
  215. || !cm_has_piece(p->n->tp, index)
  216. || begin + length > torrent_piece_size(p->n->tp, index))) {
  217. btpd_log(BTPD_L_MSG, "bad request: (%u, %u, %u) from %p\n",
  218. index, begin, length, p);
  219. res = 1;
  220. break;
  221. }
  222. peer_on_request(p, index, begin, length);
  223. }
  224. break;
  225. case MSG_CANCEL:
  226. index = dec_be32(buf);
  227. begin = dec_be32(buf + 4);
  228. length = dec_be32(buf + 8);
  229. peer_on_cancel(p, index, begin, length);
  230. break;
  231. case MSG_PIECE:
  232. length = p->in.msg_len - 9;
  233. peer_on_piece(p, p->in.pc_index, p->in.pc_begin, length, buf);
  234. break;
  235. default:
  236. abort();
  237. }
  238. return res;
  239. }
  240. static int
  241. net_mh_ok(struct peer *p)
  242. {
  243. uint32_t mlen = p->in.msg_len;
  244. switch (p->in.msg_num) {
  245. case MSG_CHOKE:
  246. case MSG_UNCHOKE:
  247. case MSG_INTEREST:
  248. case MSG_UNINTEREST:
  249. return mlen == 1;
  250. case MSG_HAVE:
  251. return mlen == 5;
  252. case MSG_BITFIELD:
  253. return mlen == (uint32_t)ceil(p->n->tp->npieces / 8.0) + 1;
  254. case MSG_REQUEST:
  255. case MSG_CANCEL:
  256. return mlen == 13;
  257. case MSG_PIECE:
  258. return mlen <= PIECE_BLOCKLEN + 9;
  259. default:
  260. return 0;
  261. }
  262. }
  263. static void
  264. net_progress(struct peer *p, size_t length)
  265. {
  266. if (p->in.state == BTP_MSGBODY && p->in.msg_num == MSG_PIECE) {
  267. p->n->downloaded += length;
  268. p->count_dwn += length;
  269. }
  270. }
  271. static int
  272. net_state(struct peer *p, const char *buf)
  273. {
  274. switch (p->in.state) {
  275. case SHAKE_PSTR:
  276. if (bcmp(buf, "\x13""BitTorrent protocol", 20) != 0)
  277. goto bad;
  278. peer_set_in_state(p, SHAKE_INFO, 20);
  279. break;
  280. case SHAKE_INFO:
  281. if (p->flags & PF_INCOMING) {
  282. struct torrent *tp = torrent_by_hash(buf);
  283. if (tp == NULL || !net_active(tp))
  284. goto bad;
  285. p->n = tp->net;
  286. peer_send(p, nb_create_shake(tp));
  287. } else if (bcmp(buf, p->n->tp->tl->hash, 20) != 0)
  288. goto bad;
  289. peer_set_in_state(p, SHAKE_ID, 20);
  290. break;
  291. case SHAKE_ID:
  292. if ((net_torrent_has_peer(p->n, buf)
  293. || bcmp(buf, btpd_get_peer_id(), 20) == 0))
  294. goto bad;
  295. bcopy(buf, p->id, 20);
  296. peer_on_shake(p);
  297. peer_set_in_state(p, BTP_MSGSIZE, 4);
  298. break;
  299. case BTP_MSGSIZE:
  300. p->in.msg_len = dec_be32(buf);
  301. if (p->in.msg_len == 0)
  302. peer_on_keepalive(p);
  303. else
  304. peer_set_in_state(p, BTP_MSGHEAD, 1);
  305. break;
  306. case BTP_MSGHEAD:
  307. p->in.msg_num = buf[0];
  308. if (!net_mh_ok(p))
  309. goto bad;
  310. else if (p->in.msg_len == 1) {
  311. if (net_dispatch_msg(p, buf) != 0)
  312. goto bad;
  313. peer_set_in_state(p, BTP_MSGSIZE, 4);
  314. } else if (p->in.msg_num == MSG_PIECE)
  315. peer_set_in_state(p, BTP_PIECEMETA, 8);
  316. else
  317. peer_set_in_state(p, BTP_MSGBODY, p->in.msg_len - 1);
  318. break;
  319. case BTP_PIECEMETA:
  320. p->in.pc_index = dec_be32(buf);
  321. p->in.pc_begin = dec_be32(buf + 4);
  322. peer_set_in_state(p, BTP_MSGBODY, p->in.msg_len - 9);
  323. break;
  324. case BTP_MSGBODY:
  325. if (net_dispatch_msg(p, buf) != 0)
  326. goto bad;
  327. peer_set_in_state(p, BTP_MSGSIZE, 4);
  328. break;
  329. default:
  330. abort();
  331. }
  332. return 0;
  333. bad:
  334. btpd_log(BTPD_L_CONN, "bad data from %p (%u, %u, %u).\n",
  335. p, p->in.state, p->in.msg_len, p->in.msg_num);
  336. peer_kill(p);
  337. return -1;
  338. }
  339. #define GRBUFLEN (1 << 15)
  340. static unsigned long
  341. net_read(struct peer *p, unsigned long rmax)
  342. {
  343. size_t rest = p->in.buf != NULL ? p->in.st_bytes - p->in.off : 0;
  344. char buf[GRBUFLEN];
  345. struct iovec iov[2] = {
  346. {
  347. p->in.buf + p->in.off,
  348. rest
  349. }, {
  350. buf,
  351. sizeof(buf)
  352. }
  353. };
  354. if (rmax > 0) {
  355. if (iov[0].iov_len > rmax)
  356. iov[0].iov_len = rmax;
  357. iov[1].iov_len = min(rmax - iov[0].iov_len, iov[1].iov_len);
  358. }
  359. ssize_t nread = readv(p->sd, iov, 2);
  360. if (nread < 0 && errno == EAGAIN)
  361. goto out;
  362. else if (nread < 0) {
  363. btpd_log(BTPD_L_CONN, "Read error (%s) on %p.\n", strerror(errno), p);
  364. peer_kill(p);
  365. return 0;
  366. } else if (nread == 0) {
  367. btpd_log(BTPD_L_CONN, "Connection closed by %p.\n", p);
  368. peer_kill(p);
  369. return 0;
  370. }
  371. if (rest > 0) {
  372. if (nread < rest) {
  373. p->in.off += nread;
  374. net_progress(p, nread);
  375. goto out;
  376. }
  377. net_progress(p, rest);
  378. if (net_state(p, p->in.buf) != 0)
  379. return nread;
  380. free(p->in.buf);
  381. p->in.buf = NULL;
  382. p->in.off = 0;
  383. }
  384. iov[1].iov_len = nread - rest;
  385. while (p->in.st_bytes <= iov[1].iov_len) {
  386. size_t consumed = p->in.st_bytes;
  387. net_progress(p, consumed);
  388. if (net_state(p, iov[1].iov_base) != 0)
  389. return nread;
  390. iov[1].iov_base += consumed;
  391. iov[1].iov_len -= consumed;
  392. }
  393. if (iov[1].iov_len > 0) {
  394. net_progress(p, iov[1].iov_len);
  395. p->in.off = iov[1].iov_len;
  396. p->in.buf = btpd_malloc(p->in.st_bytes);
  397. bcopy(iov[1].iov_base, p->in.buf, iov[1].iov_len);
  398. }
  399. out:
  400. return nread > 0 ? nread : 0;
  401. }
  402. int
  403. net_connect_addr(int family, struct sockaddr *sa, socklen_t salen, int *sd)
  404. {
  405. if ((*sd = socket(family, SOCK_STREAM, 0)) == -1)
  406. return errno;
  407. set_nonblocking(*sd);
  408. if (connect(*sd, sa, salen) == -1 && errno != EINPROGRESS) {
  409. int err = errno;
  410. btpd_log(BTPD_L_CONN, "Botched connection %s.\n", strerror(errno));
  411. close(*sd);
  412. return err;
  413. }
  414. return 0;
  415. }
  416. int
  417. net_connect_name(const char *ip, int port, int *sd)
  418. {
  419. struct addrinfo hints, *res;
  420. char portstr[6];
  421. assert(net_npeers < net_max_peers);
  422. if (snprintf(portstr, sizeof(portstr), "%d", port) >= sizeof(portstr))
  423. return EINVAL;
  424. bzero(&hints, sizeof(hints));
  425. hints.ai_family = net_af_spec();
  426. hints.ai_flags = AI_NUMERICHOST;
  427. hints.ai_socktype = SOCK_STREAM;
  428. if (getaddrinfo(ip, portstr, &hints, &res) != 0)
  429. return errno;
  430. int error =
  431. net_connect_addr(res->ai_family, res->ai_addr, res->ai_addrlen, sd);
  432. freeaddrinfo(res);
  433. return error;
  434. }
  435. void
  436. net_connection_cb(int sd, short type, void *arg)
  437. {
  438. int nsd;
  439. nsd = accept(sd, NULL, NULL);
  440. if (nsd < 0) {
  441. if (errno == EWOULDBLOCK || errno == ECONNABORTED)
  442. return;
  443. else
  444. btpd_err("accept: %s\n", strerror(errno));
  445. }
  446. if (set_nonblocking(nsd) != 0) {
  447. close(nsd);
  448. return;
  449. }
  450. assert(net_npeers <= net_max_peers);
  451. if (net_npeers == net_max_peers) {
  452. close(nsd);
  453. return;
  454. }
  455. peer_create_in(nsd);
  456. btpd_log(BTPD_L_CONN, "got connection.\n");
  457. }
  458. static unsigned long
  459. compute_rate_sub(unsigned long rate)
  460. {
  461. if (rate > 256 * RATEHISTORY)
  462. return rate / RATEHISTORY;
  463. else
  464. return min(256, rate);
  465. }
  466. static void
  467. compute_rates(void) {
  468. unsigned long tot_up = 0, tot_dwn = 0;
  469. struct torrent *tp;
  470. BTPDQ_FOREACH(tp, torrent_get_all(), entry) {
  471. unsigned long tp_up = 0, tp_dwn = 0;
  472. struct net *n = tp->net;
  473. struct peer *p;
  474. BTPDQ_FOREACH(p, &n->peers, p_entry) {
  475. if (p->count_up > 0 || peer_active_up(p)) {
  476. tp_up += p->count_up;
  477. p->rate_up += p->count_up - compute_rate_sub(p->rate_up);
  478. p->count_up = 0;
  479. }
  480. if (p->count_dwn > 0 || peer_active_down(p)) {
  481. tp_dwn += p->count_dwn;
  482. p->rate_dwn += p->count_dwn - compute_rate_sub(p->rate_dwn);
  483. p->count_dwn = 0;
  484. }
  485. }
  486. n->rate_up += tp_up - compute_rate_sub(n->rate_up);
  487. n->rate_dwn += tp_dwn - compute_rate_sub(n->rate_dwn);
  488. tot_up += tp_up;
  489. tot_dwn += tp_dwn;
  490. }
  491. m_rate_up += tot_up - compute_rate_sub(m_rate_up);
  492. m_rate_dwn += tot_dwn - compute_rate_sub(m_rate_dwn);
  493. }
  494. static void
  495. net_bw_tick(void)
  496. {
  497. struct peer *p;
  498. m_bw_bytes_out = net_bw_limit_out;
  499. m_bw_bytes_in = net_bw_limit_in;
  500. if (net_bw_limit_in > 0) {
  501. while ((p = BTPDQ_FIRST(&net_bw_readq)) != NULL && m_bw_bytes_in > 0) {
  502. BTPDQ_REMOVE(&net_bw_readq, p, rq_entry);
  503. btpd_ev_enable(&p->ioev, EV_READ);
  504. p->flags &= ~PF_ON_READQ;
  505. m_bw_bytes_in -= net_read(p, m_bw_bytes_in);
  506. }
  507. } else {
  508. while ((p = BTPDQ_FIRST(&net_bw_readq)) != NULL) {
  509. BTPDQ_REMOVE(&net_bw_readq, p, rq_entry);
  510. btpd_ev_enable(&p->ioev, EV_READ);
  511. p->flags &= ~PF_ON_READQ;
  512. net_read(p, 0);
  513. }
  514. }
  515. if (net_bw_limit_out) {
  516. while (((p = BTPDQ_FIRST(&net_bw_writeq)) != NULL
  517. && m_bw_bytes_out > 0)) {
  518. BTPDQ_REMOVE(&net_bw_writeq, p, wq_entry);
  519. btpd_ev_enable(&p->ioev, EV_WRITE);
  520. p->flags &= ~PF_ON_WRITEQ;
  521. m_bw_bytes_out -= net_write(p, m_bw_bytes_out);
  522. }
  523. } else {
  524. while ((p = BTPDQ_FIRST(&net_bw_writeq)) != NULL) {
  525. BTPDQ_REMOVE(&net_bw_writeq, p, wq_entry);
  526. btpd_ev_enable(&p->ioev, EV_WRITE);
  527. p->flags &= ~PF_ON_WRITEQ;
  528. net_write(p, 0);
  529. }
  530. }
  531. }
  532. static void
  533. run_peer_ticks(void)
  534. {
  535. struct torrent *tp;
  536. struct peer *p, *next;
  537. BTPDQ_FOREACH_MUTABLE(p, &net_unattached, p_entry, next)
  538. peer_on_tick(p);
  539. BTPDQ_FOREACH(tp, torrent_get_all(), entry)
  540. BTPDQ_FOREACH_MUTABLE(p, &tp->net->peers, p_entry, next)
  541. peer_on_tick(p);
  542. }
  543. void
  544. net_on_tick(void)
  545. {
  546. run_peer_ticks();
  547. compute_rates();
  548. net_bw_tick();
  549. }
  550. static void
  551. net_read_cb(struct peer *p)
  552. {
  553. if (net_bw_limit_in == 0)
  554. net_read(p, 0);
  555. else if (m_bw_bytes_in > 0)
  556. m_bw_bytes_in -= net_read(p, m_bw_bytes_in);
  557. else {
  558. btpd_ev_disable(&p->ioev, EV_READ);
  559. p->flags |= PF_ON_READQ;
  560. BTPDQ_INSERT_TAIL(&net_bw_readq, p, rq_entry);
  561. }
  562. }
  563. static void
  564. net_write_cb(struct peer *p)
  565. {
  566. if (net_bw_limit_out == 0)
  567. net_write(p, 0);
  568. else if (m_bw_bytes_out > 0)
  569. m_bw_bytes_out -= net_write(p, m_bw_bytes_out);
  570. else {
  571. btpd_ev_disable(&p->ioev, EV_WRITE);
  572. p->flags |= PF_ON_WRITEQ;
  573. BTPDQ_INSERT_TAIL(&net_bw_writeq, p, wq_entry);
  574. }
  575. }
  576. void
  577. net_io_cb(int sd, short type, void *arg)
  578. {
  579. switch (type) {
  580. case EV_READ:
  581. net_read_cb(arg);
  582. break;
  583. case EV_WRITE:
  584. net_write_cb(arg);
  585. break;
  586. default:
  587. abort();
  588. }
  589. }
  590. int
  591. net_af_spec(void)
  592. {
  593. if (net_ipv4 && net_ipv6)
  594. return AF_UNSPEC;
  595. else if (net_ipv4)
  596. return AF_INET;
  597. else
  598. return AF_INET6;
  599. }
  600. void
  601. net_shutdown(void)
  602. {
  603. for (int i = 0; i < m_nlisteners; i++) {
  604. btpd_ev_del(&m_net_listeners[i].ev);
  605. close(m_net_listeners[i].sd);
  606. }
  607. }
  608. void
  609. net_init(void)
  610. {
  611. m_bw_bytes_out = net_bw_limit_out;
  612. m_bw_bytes_in = net_bw_limit_in;
  613. int safe_fds = getdtablesize() * 4 / 5;
  614. if (net_max_peers == 0 || net_max_peers > safe_fds)
  615. net_max_peers = safe_fds;
  616. int count = 0, flag = 1, found_ipv4 = 0, found_ipv6 = 0, sd;
  617. char portstr[6];
  618. struct addrinfo hints, *res, *ai;
  619. bzero(&hints, sizeof(hints));
  620. hints.ai_flags = AI_ADDRCONFIG | AI_PASSIVE | AI_NUMERICSERV;
  621. hints.ai_family = net_af_spec();
  622. hints.ai_socktype = SOCK_STREAM;
  623. snprintf(portstr, sizeof(portstr), "%hd", net_port);
  624. if ((errno = getaddrinfo(NULL, portstr, &hints, &res)) != 0)
  625. btpd_err("getaddrinfo failed (%s).\n", gai_strerror(errno));
  626. for (ai = res; ai != NULL; ai = ai->ai_next) {
  627. count++;
  628. if (ai->ai_family == AF_INET)
  629. found_ipv4 = 1;
  630. else
  631. found_ipv6 = 1;
  632. }
  633. net_ipv4 = found_ipv4;
  634. net_ipv6 = found_ipv6;
  635. if (!net_ipv4 && !net_ipv6)
  636. btpd_err("no usable address found. wrong use of -4/-6 perhaps.\n");
  637. m_nlisteners = count;
  638. m_net_listeners = btpd_calloc(count, sizeof(*m_net_listeners));
  639. for (ai = res; ai != NULL; ai = ai->ai_next) {
  640. count--;
  641. if ((sd = socket(ai->ai_family, ai->ai_socktype, 0)) == -1)
  642. btpd_err("failed to create socket (%s).\n", strerror(errno));
  643. setsockopt(sd, SOL_SOCKET, SO_REUSEADDR, &flag, sizeof(flag));
  644. #ifdef IPV6_V6ONLY
  645. if (ai->ai_family == AF_INET6)
  646. setsockopt(sd, IPPROTO_IPV6, IPV6_V6ONLY, &flag, sizeof(flag));
  647. #endif
  648. if (bind(sd, ai->ai_addr, ai->ai_addrlen) == -1)
  649. btpd_err("bind failed (%s).\n", strerror(errno));
  650. listen(sd, 10);
  651. set_nonblocking(sd);
  652. m_net_listeners[count].sd = sd;
  653. btpd_ev_new(&m_net_listeners[count].ev, sd, EV_READ,
  654. net_connection_cb, NULL);
  655. }
  656. freeaddrinfo(res);
  657. }