A clone of btpd with my configuration changes.
選択できるのは25トピックまでです。 トピックは、先頭が英数字で、英数字とダッシュ('-')を使用した35文字以内のものにしてください。

743 行
19 KiB

  1. #include "btpd.h"
  2. #include <sys/uio.h>
  3. #include <netdb.h>
  4. static unsigned long m_bw_bytes_in;
  5. static unsigned long m_bw_bytes_out;
  6. static unsigned long m_rate_up;
  7. static unsigned long m_rate_dwn;
  8. struct net_listener {
  9. int sd;
  10. struct fdev ev;
  11. };
  12. static int m_nlisteners;
  13. static struct net_listener *m_net_listeners;
  14. unsigned net_npeers;
  15. struct peer_tq net_bw_readq = BTPDQ_HEAD_INITIALIZER(net_bw_readq);
  16. struct peer_tq net_bw_writeq = BTPDQ_HEAD_INITIALIZER(net_bw_writeq);
  17. struct peer_tq net_unattached = BTPDQ_HEAD_INITIALIZER(net_unattached);
  18. void
  19. net_ban_peer(struct net *n, struct meta_peer *mp)
  20. {
  21. if (mp->flags & PF_BANNED)
  22. return;
  23. mp_hold(mp); // Keep the meta peer alive
  24. mp->flags |= PF_BANNED;
  25. btpd_log(BTPD_L_BAD, "banned peer %p.\n", mp);
  26. }
  27. int
  28. net_torrent_has_peer(struct net *n, const uint8_t *id)
  29. {
  30. return mptbl_find(n->mptbl, id) != NULL;
  31. }
  32. void
  33. net_create(struct torrent *tp)
  34. {
  35. struct net *n = btpd_calloc(1, sizeof(*n));
  36. n->tp = tp;
  37. tp->net = n;
  38. if ((n->mptbl = mptbl_create(3, btpd_id_eq, btpd_id_hash)) == NULL)
  39. btpd_err("Out of memory.\n");
  40. BTPDQ_INIT(&n->getlst);
  41. n->busy_field = btpd_calloc(ceil(tp->npieces / 8.0), 1);
  42. n->piece_count = btpd_calloc(tp->npieces, sizeof(*n->piece_count));
  43. }
  44. void
  45. net_kill(struct torrent *tp)
  46. {
  47. struct htbl_iter it;
  48. struct meta_peer *mp = mptbl_iter_first(tp->net->mptbl, &it);
  49. while (mp != NULL) {
  50. struct meta_peer *mps = mp;
  51. mp = mptbl_iter_del(&it);
  52. mp_kill(mps);
  53. }
  54. mptbl_free(tp->net->mptbl);
  55. free(tp->net->piece_count);
  56. free(tp->net->busy_field);
  57. free(tp->net);
  58. tp->net = NULL;
  59. }
  60. void
  61. net_start(struct torrent *tp)
  62. {
  63. struct net *n = tp->net;
  64. n->active = 1;
  65. }
  66. void
  67. net_stop(struct torrent *tp)
  68. {
  69. struct net *n = tp->net;
  70. n->active = 0;
  71. n->rate_up = 0;
  72. n->rate_dwn = 0;
  73. ul_on_lost_torrent(n);
  74. struct piece *pc;
  75. while ((pc = BTPDQ_FIRST(&n->getlst)) != NULL)
  76. piece_free(pc);
  77. BTPDQ_INIT(&n->getlst);
  78. struct peer *p = BTPDQ_FIRST(&net_unattached);
  79. while (p != NULL) {
  80. struct peer *next = BTPDQ_NEXT(p, p_entry);
  81. if (p->n == n)
  82. peer_kill(p);
  83. p = next;
  84. }
  85. p = BTPDQ_FIRST(&n->peers);
  86. while (p != NULL) {
  87. struct peer *next = BTPDQ_NEXT(p, p_entry);
  88. peer_kill(p);
  89. p = next;
  90. }
  91. }
  92. int
  93. net_active(struct torrent *tp)
  94. {
  95. return tp->net->active;
  96. }
  97. #define BLOCK_MEM_COUNT 1
  98. static unsigned long
  99. net_write(struct peer *p, unsigned long wmax)
  100. {
  101. struct nb_link *nl;
  102. struct iovec iov[IOV_MAX];
  103. int niov;
  104. int limited;
  105. ssize_t nwritten;
  106. unsigned long bcount;
  107. int block_count = 0;
  108. limited = wmax > 0;
  109. niov = 0;
  110. assert((nl = BTPDQ_FIRST(&p->outq)) != NULL);
  111. if (nl->nb->type == NB_TORRENTDATA)
  112. block_count = 1;
  113. while ((niov < IOV_MAX && nl != NULL
  114. && (!limited || (limited && wmax > 0)))) {
  115. if (nl->nb->type == NB_PIECE) {
  116. if (block_count >= BLOCK_MEM_COUNT)
  117. break;
  118. struct net_buf *tdata = BTPDQ_NEXT(nl, entry)->nb;
  119. if (tdata->buf == NULL) {
  120. if (nb_torrentdata_fill(tdata, p->n->tp, nb_get_index(nl->nb),
  121. nb_get_begin(nl->nb), nb_get_length(nl->nb)) != 0) {
  122. peer_kill(p);
  123. return 0;
  124. }
  125. }
  126. block_count++;
  127. }
  128. if (niov > 0) {
  129. iov[niov].iov_base = nl->nb->buf;
  130. iov[niov].iov_len = nl->nb->len;
  131. } else {
  132. iov[niov].iov_base = nl->nb->buf + p->outq_off;
  133. iov[niov].iov_len = nl->nb->len - p->outq_off;
  134. }
  135. if (limited) {
  136. if (iov[niov].iov_len > wmax)
  137. iov[niov].iov_len = wmax;
  138. wmax -= iov[niov].iov_len;
  139. }
  140. niov++;
  141. nl = BTPDQ_NEXT(nl, entry);
  142. }
  143. nwritten = writev(p->sd, iov, niov);
  144. if (nwritten < 0) {
  145. if (errno == EAGAIN) {
  146. p->t_wantwrite = btpd_seconds;
  147. return 0;
  148. } else {
  149. btpd_log(BTPD_L_CONN, "write error: %s\n", strerror(errno));
  150. peer_kill(p);
  151. return 0;
  152. }
  153. } else if (nwritten == 0) {
  154. btpd_log(BTPD_L_CONN, "connection closed by peer.\n");
  155. peer_kill(p);
  156. return 0;
  157. }
  158. bcount = nwritten;
  159. nl = BTPDQ_FIRST(&p->outq);
  160. while (bcount > 0) {
  161. unsigned long bufdelta = nl->nb->len - p->outq_off;
  162. if (bcount >= bufdelta) {
  163. peer_sent(p, nl->nb);
  164. if (nl->nb->type == NB_TORRENTDATA) {
  165. p->n->uploaded += bufdelta;
  166. p->count_up += bufdelta;
  167. }
  168. bcount -= bufdelta;
  169. BTPDQ_REMOVE(&p->outq, nl, entry);
  170. nb_drop(nl->nb);
  171. free(nl);
  172. p->outq_off = 0;
  173. nl = BTPDQ_FIRST(&p->outq);
  174. } else {
  175. if (nl->nb->type == NB_TORRENTDATA) {
  176. p->n->uploaded += bcount;
  177. p->count_up += bcount;
  178. }
  179. p->outq_off += bcount;
  180. bcount = 0;
  181. }
  182. }
  183. if (!BTPDQ_EMPTY(&p->outq))
  184. p->t_wantwrite = btpd_seconds;
  185. else
  186. btpd_ev_disable(&p->ioev, EV_WRITE);
  187. p->t_lastwrite = btpd_seconds;
  188. return nwritten;
  189. }
  190. static int
  191. net_dispatch_msg(struct peer *p, const char *buf)
  192. {
  193. uint32_t index, begin, length;
  194. int res = 0;
  195. switch (p->in.msg_num) {
  196. case MSG_CHOKE:
  197. peer_on_choke(p);
  198. break;
  199. case MSG_UNCHOKE:
  200. peer_on_unchoke(p);
  201. break;
  202. case MSG_INTEREST:
  203. peer_on_interest(p);
  204. break;
  205. case MSG_UNINTEREST:
  206. peer_on_uninterest(p);
  207. break;
  208. case MSG_HAVE:
  209. peer_on_have(p, dec_be32(buf));
  210. break;
  211. case MSG_BITFIELD:
  212. if (p->npieces == 0)
  213. peer_on_bitfield(p, buf);
  214. else
  215. res = 1;
  216. break;
  217. case MSG_REQUEST:
  218. if ((p->mp->flags & (PF_P_WANT|PF_I_CHOKE)) == PF_P_WANT) {
  219. index = dec_be32(buf);
  220. begin = dec_be32(buf + 4);
  221. length = dec_be32(buf + 8);
  222. if ((length > PIECE_BLOCKLEN
  223. || index >= p->n->tp->npieces
  224. || !cm_has_piece(p->n->tp, index)
  225. || begin + length > torrent_piece_size(p->n->tp, index))) {
  226. btpd_log(BTPD_L_MSG, "bad request: (%u, %u, %u) from %p\n",
  227. index, begin, length, p);
  228. res = 1;
  229. break;
  230. }
  231. peer_on_request(p, index, begin, length);
  232. }
  233. break;
  234. case MSG_CANCEL:
  235. index = dec_be32(buf);
  236. begin = dec_be32(buf + 4);
  237. length = dec_be32(buf + 8);
  238. peer_on_cancel(p, index, begin, length);
  239. break;
  240. case MSG_PIECE:
  241. length = p->in.msg_len - 9;
  242. peer_on_piece(p, p->in.pc_index, p->in.pc_begin, length, buf);
  243. break;
  244. default:
  245. abort();
  246. }
  247. return res;
  248. }
  249. static int
  250. net_mh_ok(struct peer *p)
  251. {
  252. uint32_t mlen = p->in.msg_len;
  253. switch (p->in.msg_num) {
  254. case MSG_CHOKE:
  255. case MSG_UNCHOKE:
  256. case MSG_INTEREST:
  257. case MSG_UNINTEREST:
  258. return mlen == 1;
  259. case MSG_HAVE:
  260. return mlen == 5;
  261. case MSG_BITFIELD:
  262. return mlen == (uint32_t)ceil(p->n->tp->npieces / 8.0) + 1;
  263. case MSG_REQUEST:
  264. case MSG_CANCEL:
  265. return mlen == 13;
  266. case MSG_PIECE:
  267. return mlen <= PIECE_BLOCKLEN + 9;
  268. default:
  269. return 0;
  270. }
  271. }
  272. static void
  273. net_progress(struct peer *p, size_t length)
  274. {
  275. if (p->in.state == BTP_MSGBODY && p->in.msg_num == MSG_PIECE) {
  276. p->n->downloaded += length;
  277. p->count_dwn += length;
  278. }
  279. }
  280. static int
  281. net_state(struct peer *p, const char *buf)
  282. {
  283. switch (p->in.state) {
  284. case SHAKE_PSTR:
  285. if (bcmp(buf, "\x13""BitTorrent protocol", 20) != 0)
  286. goto bad;
  287. peer_set_in_state(p, SHAKE_INFO, 20);
  288. break;
  289. case SHAKE_INFO:
  290. if (p->mp->flags & PF_INCOMING) {
  291. struct torrent *tp = torrent_by_hash(buf);
  292. if (tp == NULL || !net_active(tp))
  293. goto bad;
  294. p->n = tp->net;
  295. peer_send(p, nb_create_shake(tp));
  296. } else if (bcmp(buf, p->n->tp->tl->hash, 20) != 0)
  297. goto bad;
  298. peer_set_in_state(p, SHAKE_ID, 20);
  299. break;
  300. case SHAKE_ID:
  301. if ((net_torrent_has_peer(p->n, buf)
  302. || bcmp(buf, btpd_get_peer_id(), 20) == 0))
  303. goto bad;
  304. bcopy(buf, p->mp->id, 20);
  305. peer_on_shake(p);
  306. peer_set_in_state(p, BTP_MSGSIZE, 4);
  307. break;
  308. case BTP_MSGSIZE:
  309. p->in.msg_len = dec_be32(buf);
  310. if (p->in.msg_len == 0)
  311. peer_on_keepalive(p);
  312. else
  313. peer_set_in_state(p, BTP_MSGHEAD, 1);
  314. break;
  315. case BTP_MSGHEAD:
  316. p->in.msg_num = buf[0];
  317. if (!net_mh_ok(p))
  318. goto bad;
  319. else if (p->in.msg_len == 1) {
  320. if (net_dispatch_msg(p, buf) != 0)
  321. goto bad;
  322. peer_set_in_state(p, BTP_MSGSIZE, 4);
  323. } else if (p->in.msg_num == MSG_PIECE)
  324. peer_set_in_state(p, BTP_PIECEMETA, 8);
  325. else
  326. peer_set_in_state(p, BTP_MSGBODY, p->in.msg_len - 1);
  327. break;
  328. case BTP_PIECEMETA:
  329. p->in.pc_index = dec_be32(buf);
  330. p->in.pc_begin = dec_be32(buf + 4);
  331. peer_set_in_state(p, BTP_MSGBODY, p->in.msg_len - 9);
  332. break;
  333. case BTP_MSGBODY:
  334. if (net_dispatch_msg(p, buf) != 0)
  335. goto bad;
  336. peer_set_in_state(p, BTP_MSGSIZE, 4);
  337. break;
  338. default:
  339. abort();
  340. }
  341. return 0;
  342. bad:
  343. btpd_log(BTPD_L_CONN, "bad data from %p (%u, %u, %u).\n",
  344. p, p->in.state, p->in.msg_len, p->in.msg_num);
  345. peer_kill(p);
  346. return -1;
  347. }
  348. #define GRBUFLEN (1 << 15)
  349. static unsigned long
  350. net_read(struct peer *p, unsigned long rmax)
  351. {
  352. size_t rest = p->in.buf != NULL ? p->in.st_bytes - p->in.off : 0;
  353. char buf[GRBUFLEN];
  354. struct iovec iov[2] = {
  355. {
  356. p->in.buf + p->in.off,
  357. rest
  358. }, {
  359. buf,
  360. sizeof(buf)
  361. }
  362. };
  363. if (rmax > 0) {
  364. if (iov[0].iov_len > rmax)
  365. iov[0].iov_len = rmax;
  366. iov[1].iov_len = min(rmax - iov[0].iov_len, iov[1].iov_len);
  367. }
  368. ssize_t nread = readv(p->sd, iov, 2);
  369. if (nread < 0 && errno == EAGAIN)
  370. goto out;
  371. else if (nread < 0) {
  372. btpd_log(BTPD_L_CONN, "Read error (%s) on %p.\n", strerror(errno), p);
  373. peer_kill(p);
  374. return 0;
  375. } else if (nread == 0) {
  376. btpd_log(BTPD_L_CONN, "Connection closed by %p.\n", p);
  377. peer_kill(p);
  378. return 0;
  379. }
  380. if (rest > 0) {
  381. if (nread < rest) {
  382. p->in.off += nread;
  383. net_progress(p, nread);
  384. goto out;
  385. }
  386. net_progress(p, rest);
  387. if (net_state(p, p->in.buf) != 0)
  388. return nread;
  389. free(p->in.buf);
  390. p->in.buf = NULL;
  391. p->in.off = 0;
  392. }
  393. iov[1].iov_len = nread - rest;
  394. while (p->in.st_bytes <= iov[1].iov_len) {
  395. size_t consumed = p->in.st_bytes;
  396. net_progress(p, consumed);
  397. if (net_state(p, iov[1].iov_base) != 0)
  398. return nread;
  399. iov[1].iov_base += consumed;
  400. iov[1].iov_len -= consumed;
  401. }
  402. if (iov[1].iov_len > 0) {
  403. net_progress(p, iov[1].iov_len);
  404. p->in.off = iov[1].iov_len;
  405. p->in.buf = btpd_malloc(p->in.st_bytes);
  406. bcopy(iov[1].iov_base, p->in.buf, iov[1].iov_len);
  407. }
  408. out:
  409. return nread > 0 ? nread : 0;
  410. }
  411. int
  412. net_connect_addr(int family, struct sockaddr *sa, socklen_t salen, int *sd)
  413. {
  414. if ((*sd = socket(family, SOCK_STREAM, 0)) == -1)
  415. return errno;
  416. set_nonblocking(*sd);
  417. if (connect(*sd, sa, salen) == -1 && errno != EINPROGRESS) {
  418. int err = errno;
  419. btpd_log(BTPD_L_CONN, "Botched connection %s.\n", strerror(errno));
  420. close(*sd);
  421. return err;
  422. }
  423. return 0;
  424. }
  425. int
  426. net_connect_name(const char *ip, int port, int *sd)
  427. {
  428. struct addrinfo hints, *res;
  429. char portstr[6];
  430. assert(net_npeers < net_max_peers);
  431. if (snprintf(portstr, sizeof(portstr), "%d", port) >= sizeof(portstr))
  432. return EINVAL;
  433. bzero(&hints, sizeof(hints));
  434. hints.ai_family = net_af_spec();
  435. hints.ai_flags = AI_NUMERICHOST;
  436. hints.ai_socktype = SOCK_STREAM;
  437. if (getaddrinfo(ip, portstr, &hints, &res) != 0)
  438. return errno;
  439. int error =
  440. net_connect_addr(res->ai_family, res->ai_addr, res->ai_addrlen, sd);
  441. freeaddrinfo(res);
  442. return error;
  443. }
  444. void
  445. net_connection_cb(int sd, short type, void *arg)
  446. {
  447. int nsd;
  448. nsd = accept(sd, NULL, NULL);
  449. if (nsd < 0) {
  450. if (errno == EWOULDBLOCK || errno == ECONNABORTED)
  451. return;
  452. else
  453. btpd_err("accept: %s\n", strerror(errno));
  454. }
  455. if (set_nonblocking(nsd) != 0) {
  456. close(nsd);
  457. return;
  458. }
  459. assert(net_npeers <= net_max_peers);
  460. if (net_npeers == net_max_peers) {
  461. close(nsd);
  462. return;
  463. }
  464. peer_create_in(nsd);
  465. btpd_log(BTPD_L_CONN, "got connection.\n");
  466. }
  467. static unsigned long
  468. compute_rate_sub(unsigned long rate)
  469. {
  470. if (rate > 256 * RATEHISTORY)
  471. return rate / RATEHISTORY;
  472. else
  473. return min(256, rate);
  474. }
  475. static void
  476. compute_rates(void) {
  477. unsigned long tot_up = 0, tot_dwn = 0;
  478. struct torrent *tp;
  479. BTPDQ_FOREACH(tp, torrent_get_all(), entry) {
  480. unsigned long tp_up = 0, tp_dwn = 0;
  481. struct net *n = tp->net;
  482. struct peer *p;
  483. BTPDQ_FOREACH(p, &n->peers, p_entry) {
  484. if (p->count_up > 0 || peer_active_up(p)) {
  485. tp_up += p->count_up;
  486. p->rate_up += p->count_up - compute_rate_sub(p->rate_up);
  487. p->count_up = 0;
  488. }
  489. if (p->count_dwn > 0 || peer_active_down(p)) {
  490. tp_dwn += p->count_dwn;
  491. p->rate_dwn += p->count_dwn - compute_rate_sub(p->rate_dwn);
  492. p->count_dwn = 0;
  493. }
  494. }
  495. n->rate_up += tp_up - compute_rate_sub(n->rate_up);
  496. n->rate_dwn += tp_dwn - compute_rate_sub(n->rate_dwn);
  497. tot_up += tp_up;
  498. tot_dwn += tp_dwn;
  499. }
  500. m_rate_up += tot_up - compute_rate_sub(m_rate_up);
  501. m_rate_dwn += tot_dwn - compute_rate_sub(m_rate_dwn);
  502. }
  503. static void
  504. net_bw_tick(void)
  505. {
  506. struct peer *p;
  507. m_bw_bytes_out = net_bw_limit_out;
  508. m_bw_bytes_in = net_bw_limit_in;
  509. if (net_bw_limit_in > 0) {
  510. while ((p = BTPDQ_FIRST(&net_bw_readq)) != NULL && m_bw_bytes_in > 0) {
  511. BTPDQ_REMOVE(&net_bw_readq, p, rq_entry);
  512. btpd_ev_enable(&p->ioev, EV_READ);
  513. p->mp->flags &= ~PF_ON_READQ;
  514. m_bw_bytes_in -= net_read(p, m_bw_bytes_in);
  515. }
  516. } else {
  517. while ((p = BTPDQ_FIRST(&net_bw_readq)) != NULL) {
  518. BTPDQ_REMOVE(&net_bw_readq, p, rq_entry);
  519. btpd_ev_enable(&p->ioev, EV_READ);
  520. p->mp->flags &= ~PF_ON_READQ;
  521. net_read(p, 0);
  522. }
  523. }
  524. if (net_bw_limit_out) {
  525. while (((p = BTPDQ_FIRST(&net_bw_writeq)) != NULL
  526. && m_bw_bytes_out > 0)) {
  527. BTPDQ_REMOVE(&net_bw_writeq, p, wq_entry);
  528. btpd_ev_enable(&p->ioev, EV_WRITE);
  529. p->mp->flags &= ~PF_ON_WRITEQ;
  530. m_bw_bytes_out -= net_write(p, m_bw_bytes_out);
  531. }
  532. } else {
  533. while ((p = BTPDQ_FIRST(&net_bw_writeq)) != NULL) {
  534. BTPDQ_REMOVE(&net_bw_writeq, p, wq_entry);
  535. btpd_ev_enable(&p->ioev, EV_WRITE);
  536. p->mp->flags &= ~PF_ON_WRITEQ;
  537. net_write(p, 0);
  538. }
  539. }
  540. }
  541. static void
  542. run_peer_ticks(void)
  543. {
  544. struct torrent *tp;
  545. struct peer *p, *next;
  546. BTPDQ_FOREACH_MUTABLE(p, &net_unattached, p_entry, next)
  547. peer_on_tick(p);
  548. BTPDQ_FOREACH(tp, torrent_get_all(), entry)
  549. BTPDQ_FOREACH_MUTABLE(p, &tp->net->peers, p_entry, next)
  550. peer_on_tick(p);
  551. }
  552. void
  553. net_on_tick(void)
  554. {
  555. run_peer_ticks();
  556. compute_rates();
  557. net_bw_tick();
  558. }
  559. static void
  560. net_read_cb(struct peer *p)
  561. {
  562. if (net_bw_limit_in == 0)
  563. net_read(p, 0);
  564. else if (m_bw_bytes_in > 0)
  565. m_bw_bytes_in -= net_read(p, m_bw_bytes_in);
  566. else {
  567. btpd_ev_disable(&p->ioev, EV_READ);
  568. p->mp->flags |= PF_ON_READQ;
  569. BTPDQ_INSERT_TAIL(&net_bw_readq, p, rq_entry);
  570. }
  571. }
  572. static void
  573. net_write_cb(struct peer *p)
  574. {
  575. if (net_bw_limit_out == 0)
  576. net_write(p, 0);
  577. else if (m_bw_bytes_out > 0)
  578. m_bw_bytes_out -= net_write(p, m_bw_bytes_out);
  579. else {
  580. btpd_ev_disable(&p->ioev, EV_WRITE);
  581. p->mp->flags |= PF_ON_WRITEQ;
  582. BTPDQ_INSERT_TAIL(&net_bw_writeq, p, wq_entry);
  583. }
  584. }
  585. void
  586. net_io_cb(int sd, short type, void *arg)
  587. {
  588. switch (type) {
  589. case EV_READ:
  590. net_read_cb(arg);
  591. break;
  592. case EV_WRITE:
  593. net_write_cb(arg);
  594. break;
  595. default:
  596. abort();
  597. }
  598. }
  599. int
  600. net_af_spec(void)
  601. {
  602. if (net_ipv4 && net_ipv6)
  603. return AF_UNSPEC;
  604. else if (net_ipv4)
  605. return AF_INET;
  606. else
  607. return AF_INET6;
  608. }
  609. void
  610. net_shutdown(void)
  611. {
  612. for (int i = 0; i < m_nlisteners; i++) {
  613. btpd_ev_del(&m_net_listeners[i].ev);
  614. close(m_net_listeners[i].sd);
  615. }
  616. }
  617. void
  618. net_init(void)
  619. {
  620. m_bw_bytes_out = net_bw_limit_out;
  621. m_bw_bytes_in = net_bw_limit_in;
  622. int safe_fds = getdtablesize() * 4 / 5;
  623. if (net_max_peers == 0 || net_max_peers > safe_fds)
  624. net_max_peers = safe_fds;
  625. int count = 0, flag = 1, found_ipv4 = 0, found_ipv6 = 0, sd;
  626. char portstr[6];
  627. struct addrinfo hints, *res, *ai;
  628. bzero(&hints, sizeof(hints));
  629. hints.ai_flags = AI_ADDRCONFIG | AI_PASSIVE;
  630. hints.ai_family = net_af_spec();
  631. hints.ai_socktype = SOCK_STREAM;
  632. snprintf(portstr, sizeof(portstr), "%hu", net_port);
  633. if ((errno = getaddrinfo(NULL, portstr, &hints, &res)) != 0)
  634. btpd_err("getaddrinfo failed (%s).\n", gai_strerror(errno));
  635. for (ai = res; ai != NULL; ai = ai->ai_next) {
  636. count++;
  637. if (ai->ai_family == AF_INET)
  638. found_ipv4 = 1;
  639. else
  640. found_ipv6 = 1;
  641. }
  642. net_ipv4 = found_ipv4;
  643. net_ipv6 = found_ipv6;
  644. if (!net_ipv4 && !net_ipv6)
  645. btpd_err("no usable address found. wrong use of -4/-6 perhaps.\n");
  646. m_nlisteners = count;
  647. m_net_listeners = btpd_calloc(count, sizeof(*m_net_listeners));
  648. for (ai = res; ai != NULL; ai = ai->ai_next) {
  649. count--;
  650. if ((sd = socket(ai->ai_family, ai->ai_socktype, 0)) == -1)
  651. btpd_err("failed to create socket (%s).\n", strerror(errno));
  652. setsockopt(sd, SOL_SOCKET, SO_REUSEADDR, &flag, sizeof(flag));
  653. #ifdef IPV6_V6ONLY
  654. if (ai->ai_family == AF_INET6)
  655. setsockopt(sd, IPPROTO_IPV6, IPV6_V6ONLY, &flag, sizeof(flag));
  656. #endif
  657. if (bind(sd, ai->ai_addr, ai->ai_addrlen) == -1)
  658. btpd_err("bind failed (%s).\n", strerror(errno));
  659. listen(sd, 10);
  660. set_nonblocking(sd);
  661. m_net_listeners[count].sd = sd;
  662. btpd_ev_new(&m_net_listeners[count].ev, sd, EV_READ,
  663. net_connection_cb, NULL);
  664. }
  665. freeaddrinfo(res);
  666. }