A clone of btpd with my configuration changes.
Vous ne pouvez pas sélectionner plus de 25 sujets Les noms de sujets doivent commencer par une lettre ou un nombre, peuvent contenir des tirets ('-') et peuvent comporter jusqu'à 35 caractères.

871 lignes
19 KiB

  1. /*
  2. * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions
  7. * are met:
  8. * 1. Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. * 2. Redistributions in binary form must reproduce the above copyright
  11. * notice, this list of conditions and the following disclaimer in the
  12. * documentation and/or other materials provided with the distribution.
  13. * 3. The name of the author may not be used to endorse or promote products
  14. * derived from this software without specific prior written permission.
  15. *
  16. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  17. * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  18. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  19. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  20. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  21. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  22. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  23. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  24. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  25. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  26. */
  27. #ifdef HAVE_CONFIG_H
  28. #include "config.h"
  29. #endif
  30. #ifdef WIN32
  31. #define WIN32_LEAN_AND_MEAN
  32. #include <windows.h>
  33. #undef WIN32_LEAN_AND_MEAN
  34. #include "misc.h"
  35. #endif
  36. #include <sys/types.h>
  37. #include <sys/tree.h>
  38. #ifdef HAVE_SYS_TIME_H
  39. #include <sys/time.h>
  40. #else
  41. #include <sys/_time.h>
  42. #endif
  43. #include <sys/queue.h>
  44. #include <stdio.h>
  45. #include <stdlib.h>
  46. #ifndef WIN32
  47. #include <unistd.h>
  48. #endif
  49. #include <errno.h>
  50. #include <signal.h>
  51. #include <string.h>
  52. #include <assert.h>
  53. #include "event.h"
  54. #include "event-internal.h"
  55. #include "log.h"
  56. #ifdef HAVE_EVENT_PORTS
  57. extern const struct eventop evportops;
  58. #endif
  59. #ifdef HAVE_SELECT
  60. extern const struct eventop selectops;
  61. #endif
  62. #ifdef HAVE_POLL
  63. extern const struct eventop pollops;
  64. #endif
  65. #ifdef HAVE_RTSIG
  66. extern const struct eventop rtsigops;
  67. #endif
  68. #ifdef HAVE_EPOLL
  69. extern const struct eventop epollops;
  70. #endif
  71. #ifdef HAVE_WORKING_KQUEUE
  72. extern const struct eventop kqops;
  73. #endif
  74. #ifdef HAVE_DEVPOLL
  75. extern const struct eventop devpollops;
  76. #endif
  77. #ifdef WIN32
  78. extern const struct eventop win32ops;
  79. #endif
  80. /* In order of preference */
  81. const struct eventop *eventops[] = {
  82. #ifdef HAVE_EVENT_PORTS
  83. &evportops,
  84. #endif
  85. #ifdef HAVE_WORKING_KQUEUE
  86. &kqops,
  87. #endif
  88. #ifdef HAVE_EPOLL
  89. &epollops,
  90. #endif
  91. #ifdef HAVE_DEVPOLL
  92. &devpollops,
  93. #endif
  94. #ifdef HAVE_RTSIG
  95. &rtsigops,
  96. #endif
  97. #ifdef HAVE_POLL
  98. &pollops,
  99. #endif
  100. #ifdef HAVE_SELECT
  101. &selectops,
  102. #endif
  103. #ifdef WIN32
  104. &win32ops,
  105. #endif
  106. NULL
  107. };
  108. /* Global state */
  109. struct event_list signalqueue;
  110. struct event_base *current_base = NULL;
  111. /* Handle signals - This is a deprecated interface */
  112. int (*event_sigcb)(void); /* Signal callback when gotsig is set */
  113. volatile sig_atomic_t event_gotsig; /* Set in signal handler */
  114. /* Prototypes */
  115. static void event_queue_insert(struct event_base *, struct event *, int);
  116. static void event_queue_remove(struct event_base *, struct event *, int);
  117. static int event_haveevents(struct event_base *);
  118. static void event_process_active(struct event_base *);
  119. static int timeout_next(struct event_base *, struct timeval *);
  120. static void timeout_process(struct event_base *);
  121. static void timeout_correct(struct event_base *, struct timeval *);
  122. static int
  123. compare(struct event *a, struct event *b)
  124. {
  125. if (timercmp(&a->ev_timeout, &b->ev_timeout, <))
  126. return (-1);
  127. else if (timercmp(&a->ev_timeout, &b->ev_timeout, >))
  128. return (1);
  129. if (a < b)
  130. return (-1);
  131. else if (a > b)
  132. return (1);
  133. return (0);
  134. }
  135. static int
  136. gettime(struct timeval *tp)
  137. {
  138. #ifdef HAVE_CLOCK_GETTIME
  139. struct timespec ts;
  140. if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1)
  141. return (-1);
  142. tp->tv_sec = ts.tv_sec;
  143. tp->tv_usec = ts.tv_nsec / 1000;
  144. #else
  145. gettimeofday(tp, NULL);
  146. #endif
  147. return (0);
  148. }
  149. RB_PROTOTYPE(event_tree, event, ev_timeout_node, compare);
  150. RB_GENERATE(event_tree, event, ev_timeout_node, compare);
  151. void *
  152. event_init(void)
  153. {
  154. int i;
  155. if ((current_base = calloc(1, sizeof(struct event_base))) == NULL)
  156. event_err(1, "%s: calloc");
  157. event_sigcb = NULL;
  158. event_gotsig = 0;
  159. gettime(&current_base->event_tv);
  160. RB_INIT(&current_base->timetree);
  161. TAILQ_INIT(&current_base->eventqueue);
  162. TAILQ_INIT(&signalqueue);
  163. current_base->evbase = NULL;
  164. for (i = 0; eventops[i] && !current_base->evbase; i++) {
  165. current_base->evsel = eventops[i];
  166. current_base->evbase = current_base->evsel->init();
  167. }
  168. if (current_base->evbase == NULL)
  169. event_errx(1, "%s: no event mechanism available", __func__);
  170. if (getenv("EVENT_SHOW_METHOD"))
  171. event_msgx("libevent using: %s\n",
  172. current_base->evsel->name);
  173. /* allocate a single active event queue */
  174. event_base_priority_init(current_base, 1);
  175. return (current_base);
  176. }
  177. void
  178. event_base_free(struct event_base *base)
  179. {
  180. int i;
  181. if (base == NULL && current_base)
  182. base = current_base;
  183. if (base == current_base)
  184. current_base = NULL;
  185. assert(base);
  186. assert(TAILQ_EMPTY(&base->eventqueue));
  187. for (i=0; i < base->nactivequeues; ++i)
  188. assert(TAILQ_EMPTY(base->activequeues[i]));
  189. assert(RB_EMPTY(&base->timetree));
  190. for (i = 0; i < base->nactivequeues; ++i)
  191. free(base->activequeues[i]);
  192. free(base->activequeues);
  193. if (base->evsel->dealloc != NULL)
  194. base->evsel->dealloc(base->evbase);
  195. free(base);
  196. }
  197. int
  198. event_priority_init(int npriorities)
  199. {
  200. return event_base_priority_init(current_base, npriorities);
  201. }
  202. int
  203. event_base_priority_init(struct event_base *base, int npriorities)
  204. {
  205. int i;
  206. if (base->event_count_active)
  207. return (-1);
  208. if (base->nactivequeues && npriorities != base->nactivequeues) {
  209. for (i = 0; i < base->nactivequeues; ++i) {
  210. free(base->activequeues[i]);
  211. }
  212. free(base->activequeues);
  213. }
  214. /* Allocate our priority queues */
  215. base->nactivequeues = npriorities;
  216. base->activequeues = (struct event_list **)calloc(base->nactivequeues,
  217. npriorities * sizeof(struct event_list *));
  218. if (base->activequeues == NULL)
  219. event_err(1, "%s: calloc", __func__);
  220. for (i = 0; i < base->nactivequeues; ++i) {
  221. base->activequeues[i] = malloc(sizeof(struct event_list));
  222. if (base->activequeues[i] == NULL)
  223. event_err(1, "%s: malloc", __func__);
  224. TAILQ_INIT(base->activequeues[i]);
  225. }
  226. return (0);
  227. }
  228. int
  229. event_haveevents(struct event_base *base)
  230. {
  231. return (base->event_count > 0);
  232. }
  233. /*
  234. * Active events are stored in priority queues. Lower priorities are always
  235. * process before higher priorities. Low priority events can starve high
  236. * priority ones.
  237. */
  238. static void
  239. event_process_active(struct event_base *base)
  240. {
  241. struct event *ev;
  242. struct event_list *activeq = NULL;
  243. int i;
  244. short ncalls;
  245. if (!base->event_count_active)
  246. return;
  247. for (i = 0; i < base->nactivequeues; ++i) {
  248. if (TAILQ_FIRST(base->activequeues[i]) != NULL) {
  249. activeq = base->activequeues[i];
  250. break;
  251. }
  252. }
  253. for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) {
  254. event_queue_remove(base, ev, EVLIST_ACTIVE);
  255. /* Allows deletes to work */
  256. ncalls = ev->ev_ncalls;
  257. ev->ev_pncalls = &ncalls;
  258. while (ncalls) {
  259. ncalls--;
  260. ev->ev_ncalls = ncalls;
  261. (*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg);
  262. }
  263. }
  264. }
  265. /*
  266. * Wait continously for events. We exit only if no events are left.
  267. */
  268. int
  269. event_dispatch(void)
  270. {
  271. return (event_loop(0));
  272. }
  273. int
  274. event_base_dispatch(struct event_base *event_base)
  275. {
  276. return (event_base_loop(event_base, 0));
  277. }
  278. static void
  279. event_loopexit_cb(int fd, short what, void *arg)
  280. {
  281. struct event_base *base = arg;
  282. base->event_gotterm = 1;
  283. }
  284. /* not thread safe */
  285. int
  286. event_loopexit(struct timeval *tv)
  287. {
  288. return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
  289. current_base, tv));
  290. }
  291. int
  292. event_base_loopexit(struct event_base *event_base, struct timeval *tv)
  293. {
  294. return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
  295. event_base, tv));
  296. }
  297. /* not thread safe */
  298. int
  299. event_loop(int flags)
  300. {
  301. return event_base_loop(current_base, flags);
  302. }
  303. int
  304. event_base_loop(struct event_base *base, int flags)
  305. {
  306. const struct eventop *evsel = base->evsel;
  307. void *evbase = base->evbase;
  308. struct timeval tv;
  309. int res, done;
  310. done = 0;
  311. while (!done) {
  312. /* Calculate the initial events that we are waiting for */
  313. if (evsel->recalc(base, evbase, 0) == -1)
  314. return (-1);
  315. /* Terminate the loop if we have been asked to */
  316. if (base->event_gotterm) {
  317. base->event_gotterm = 0;
  318. break;
  319. }
  320. /* You cannot use this interface for multi-threaded apps */
  321. while (event_gotsig) {
  322. event_gotsig = 0;
  323. if (event_sigcb) {
  324. res = (*event_sigcb)();
  325. if (res == -1) {
  326. errno = EINTR;
  327. return (-1);
  328. }
  329. }
  330. }
  331. /* Check if time is running backwards */
  332. gettime(&tv);
  333. if (timercmp(&tv, &base->event_tv, <)) {
  334. struct timeval off;
  335. event_debug(("%s: time is running backwards, corrected",
  336. __func__));
  337. timersub(&base->event_tv, &tv, &off);
  338. timeout_correct(base, &off);
  339. }
  340. base->event_tv = tv;
  341. if (!base->event_count_active && !(flags & EVLOOP_NONBLOCK))
  342. timeout_next(base, &tv);
  343. else
  344. timerclear(&tv);
  345. /* If we have no events, we just exit */
  346. if (!event_haveevents(base)) {
  347. event_debug(("%s: no events registered.", __func__));
  348. return (1);
  349. }
  350. res = evsel->dispatch(base, evbase, &tv);
  351. if (res == -1)
  352. return (-1);
  353. timeout_process(base);
  354. if (base->event_count_active) {
  355. event_process_active(base);
  356. if (!base->event_count_active && (flags & EVLOOP_ONCE))
  357. done = 1;
  358. } else if (flags & EVLOOP_NONBLOCK)
  359. done = 1;
  360. }
  361. event_debug(("%s: asked to terminate loop.", __func__));
  362. return (0);
  363. }
  364. /* Sets up an event for processing once */
  365. struct event_once {
  366. struct event ev;
  367. void (*cb)(int, short, void *);
  368. void *arg;
  369. };
  370. /* One-time callback, it deletes itself */
  371. static void
  372. event_once_cb(int fd, short events, void *arg)
  373. {
  374. struct event_once *eonce = arg;
  375. (*eonce->cb)(fd, events, eonce->arg);
  376. free(eonce);
  377. }
  378. /* Schedules an event once */
  379. int
  380. event_once(int fd, short events,
  381. void (*callback)(int, short, void *), void *arg, struct timeval *tv)
  382. {
  383. struct event_once *eonce;
  384. struct timeval etv;
  385. int res;
  386. /* We cannot support signals that just fire once */
  387. if (events & EV_SIGNAL)
  388. return (-1);
  389. if ((eonce = calloc(1, sizeof(struct event_once))) == NULL)
  390. return (-1);
  391. eonce->cb = callback;
  392. eonce->arg = arg;
  393. if (events == EV_TIMEOUT) {
  394. if (tv == NULL) {
  395. timerclear(&etv);
  396. tv = &etv;
  397. }
  398. evtimer_set(&eonce->ev, event_once_cb, eonce);
  399. } else if (events & (EV_READ|EV_WRITE)) {
  400. events &= EV_READ|EV_WRITE;
  401. event_set(&eonce->ev, fd, events, event_once_cb, eonce);
  402. } else {
  403. /* Bad event combination */
  404. free(eonce);
  405. return (-1);
  406. }
  407. res = event_add(&eonce->ev, tv);
  408. if (res != 0) {
  409. free(eonce);
  410. return (res);
  411. }
  412. return (0);
  413. }
  414. void
  415. event_set(struct event *ev, int fd, short events,
  416. void (*callback)(int, short, void *), void *arg)
  417. {
  418. /* Take the current base - caller needs to set the real base later */
  419. ev->ev_base = current_base;
  420. ev->ev_callback = callback;
  421. ev->ev_arg = arg;
  422. ev->ev_fd = fd;
  423. ev->ev_events = events;
  424. ev->ev_flags = EVLIST_INIT;
  425. ev->ev_ncalls = 0;
  426. ev->ev_pncalls = NULL;
  427. /* by default, we put new events into the middle priority */
  428. ev->ev_pri = current_base->nactivequeues/2;
  429. }
  430. int
  431. event_base_set(struct event_base *base, struct event *ev)
  432. {
  433. /* Only innocent events may be assigned to a different base */
  434. if (ev->ev_flags != EVLIST_INIT)
  435. return (-1);
  436. ev->ev_base = base;
  437. ev->ev_pri = base->nactivequeues/2;
  438. return (0);
  439. }
  440. /*
  441. * Set's the priority of an event - if an event is already scheduled
  442. * changing the priority is going to fail.
  443. */
  444. int
  445. event_priority_set(struct event *ev, int pri)
  446. {
  447. if (ev->ev_flags & EVLIST_ACTIVE)
  448. return (-1);
  449. if (pri < 0 || pri >= ev->ev_base->nactivequeues)
  450. return (-1);
  451. ev->ev_pri = pri;
  452. return (0);
  453. }
  454. /*
  455. * Checks if a specific event is pending or scheduled.
  456. */
  457. int
  458. event_pending(struct event *ev, short event, struct timeval *tv)
  459. {
  460. struct timeval now, res;
  461. int flags = 0;
  462. if (ev->ev_flags & EVLIST_INSERTED)
  463. flags |= (ev->ev_events & (EV_READ|EV_WRITE));
  464. if (ev->ev_flags & EVLIST_ACTIVE)
  465. flags |= ev->ev_res;
  466. if (ev->ev_flags & EVLIST_TIMEOUT)
  467. flags |= EV_TIMEOUT;
  468. if (ev->ev_flags & EVLIST_SIGNAL)
  469. flags |= EV_SIGNAL;
  470. event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_SIGNAL);
  471. /* See if there is a timeout that we should report */
  472. if (tv != NULL && (flags & event & EV_TIMEOUT)) {
  473. gettime(&now);
  474. timersub(&ev->ev_timeout, &now, &res);
  475. /* correctly remap to real time */
  476. gettimeofday(&now, NULL);
  477. timeradd(&now, &res, tv);
  478. }
  479. return (flags & event);
  480. }
  481. int
  482. event_add(struct event *ev, struct timeval *tv)
  483. {
  484. struct event_base *base = ev->ev_base;
  485. const struct eventop *evsel = base->evsel;
  486. void *evbase = base->evbase;
  487. event_debug((
  488. "event_add: event: %p, %s%s%scall %p",
  489. ev,
  490. ev->ev_events & EV_READ ? "EV_READ " : " ",
  491. ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
  492. tv ? "EV_TIMEOUT " : " ",
  493. ev->ev_callback));
  494. assert(!(ev->ev_flags & ~EVLIST_ALL));
  495. if (tv != NULL) {
  496. struct timeval now;
  497. if (ev->ev_flags & EVLIST_TIMEOUT)
  498. event_queue_remove(base, ev, EVLIST_TIMEOUT);
  499. /* Check if it is active due to a timeout. Rescheduling
  500. * this timeout before the callback can be executed
  501. * removes it from the active list. */
  502. if ((ev->ev_flags & EVLIST_ACTIVE) &&
  503. (ev->ev_res & EV_TIMEOUT)) {
  504. /* See if we are just active executing this
  505. * event in a loop
  506. */
  507. if (ev->ev_ncalls && ev->ev_pncalls) {
  508. /* Abort loop */
  509. *ev->ev_pncalls = 0;
  510. }
  511. event_queue_remove(base, ev, EVLIST_ACTIVE);
  512. }
  513. gettime(&now);
  514. timeradd(&now, tv, &ev->ev_timeout);
  515. event_debug((
  516. "event_add: timeout in %d seconds, call %p",
  517. tv->tv_sec, ev->ev_callback));
  518. event_queue_insert(base, ev, EVLIST_TIMEOUT);
  519. }
  520. if ((ev->ev_events & (EV_READ|EV_WRITE)) &&
  521. !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) {
  522. event_queue_insert(base, ev, EVLIST_INSERTED);
  523. return (evsel->add(evbase, ev));
  524. } else if ((ev->ev_events & EV_SIGNAL) &&
  525. !(ev->ev_flags & EVLIST_SIGNAL)) {
  526. event_queue_insert(base, ev, EVLIST_SIGNAL);
  527. return (evsel->add(evbase, ev));
  528. }
  529. return (0);
  530. }
  531. int
  532. event_del(struct event *ev)
  533. {
  534. struct event_base *base;
  535. const struct eventop *evsel;
  536. void *evbase;
  537. event_debug(("event_del: %p, callback %p",
  538. ev, ev->ev_callback));
  539. /* An event without a base has not been added */
  540. if (ev->ev_base == NULL)
  541. return (-1);
  542. base = ev->ev_base;
  543. evsel = base->evsel;
  544. evbase = base->evbase;
  545. assert(!(ev->ev_flags & ~EVLIST_ALL));
  546. /* See if we are just active executing this event in a loop */
  547. if (ev->ev_ncalls && ev->ev_pncalls) {
  548. /* Abort loop */
  549. *ev->ev_pncalls = 0;
  550. }
  551. if (ev->ev_flags & EVLIST_TIMEOUT)
  552. event_queue_remove(base, ev, EVLIST_TIMEOUT);
  553. if (ev->ev_flags & EVLIST_ACTIVE)
  554. event_queue_remove(base, ev, EVLIST_ACTIVE);
  555. if (ev->ev_flags & EVLIST_INSERTED) {
  556. event_queue_remove(base, ev, EVLIST_INSERTED);
  557. return (evsel->del(evbase, ev));
  558. } else if (ev->ev_flags & EVLIST_SIGNAL) {
  559. event_queue_remove(base, ev, EVLIST_SIGNAL);
  560. return (evsel->del(evbase, ev));
  561. }
  562. return (0);
  563. }
  564. void
  565. event_active(struct event *ev, int res, short ncalls)
  566. {
  567. /* We get different kinds of events, add them together */
  568. if (ev->ev_flags & EVLIST_ACTIVE) {
  569. ev->ev_res |= res;
  570. return;
  571. }
  572. ev->ev_res = res;
  573. ev->ev_ncalls = ncalls;
  574. ev->ev_pncalls = NULL;
  575. event_queue_insert(ev->ev_base, ev, EVLIST_ACTIVE);
  576. }
  577. int
  578. timeout_next(struct event_base *base, struct timeval *tv)
  579. {
  580. struct timeval dflt = TIMEOUT_DEFAULT;
  581. struct timeval now;
  582. struct event *ev;
  583. if ((ev = RB_MIN(event_tree, &base->timetree)) == NULL) {
  584. *tv = dflt;
  585. return (0);
  586. }
  587. if (gettime(&now) == -1)
  588. return (-1);
  589. if (timercmp(&ev->ev_timeout, &now, <=)) {
  590. timerclear(tv);
  591. return (0);
  592. }
  593. timersub(&ev->ev_timeout, &now, tv);
  594. assert(tv->tv_sec >= 0);
  595. assert(tv->tv_usec >= 0);
  596. event_debug(("timeout_next: in %d seconds", tv->tv_sec));
  597. return (0);
  598. }
  599. static void
  600. timeout_correct(struct event_base *base, struct timeval *off)
  601. {
  602. struct event *ev;
  603. /*
  604. * We can modify the key element of the node without destroying
  605. * the key, beause we apply it to all in the right order.
  606. */
  607. RB_FOREACH(ev, event_tree, &base->timetree)
  608. timersub(&ev->ev_timeout, off, &ev->ev_timeout);
  609. }
  610. void
  611. timeout_process(struct event_base *base)
  612. {
  613. struct timeval now;
  614. struct event *ev, *next;
  615. gettime(&now);
  616. for (ev = RB_MIN(event_tree, &base->timetree); ev; ev = next) {
  617. if (timercmp(&ev->ev_timeout, &now, >))
  618. break;
  619. next = RB_NEXT(event_tree, &base->timetree, ev);
  620. event_queue_remove(base, ev, EVLIST_TIMEOUT);
  621. /* delete this event from the I/O queues */
  622. event_del(ev);
  623. event_debug(("timeout_process: call %p",
  624. ev->ev_callback));
  625. event_active(ev, EV_TIMEOUT, 1);
  626. }
  627. }
  628. void
  629. event_queue_remove(struct event_base *base, struct event *ev, int queue)
  630. {
  631. int docount = 1;
  632. if (!(ev->ev_flags & queue))
  633. event_errx(1, "%s: %p(fd %d) not on queue %x", __func__,
  634. ev, ev->ev_fd, queue);
  635. if (ev->ev_flags & EVLIST_INTERNAL)
  636. docount = 0;
  637. if (docount)
  638. base->event_count--;
  639. ev->ev_flags &= ~queue;
  640. switch (queue) {
  641. case EVLIST_ACTIVE:
  642. if (docount)
  643. base->event_count_active--;
  644. TAILQ_REMOVE(base->activequeues[ev->ev_pri],
  645. ev, ev_active_next);
  646. break;
  647. case EVLIST_SIGNAL:
  648. TAILQ_REMOVE(&signalqueue, ev, ev_signal_next);
  649. break;
  650. case EVLIST_TIMEOUT:
  651. RB_REMOVE(event_tree, &base->timetree, ev);
  652. break;
  653. case EVLIST_INSERTED:
  654. TAILQ_REMOVE(&base->eventqueue, ev, ev_next);
  655. break;
  656. default:
  657. event_errx(1, "%s: unknown queue %x", __func__, queue);
  658. }
  659. }
  660. void
  661. event_queue_insert(struct event_base *base, struct event *ev, int queue)
  662. {
  663. int docount = 1;
  664. if (ev->ev_flags & queue) {
  665. /* Double insertion is possible for active events */
  666. if (queue & EVLIST_ACTIVE)
  667. return;
  668. event_errx(1, "%s: %p(fd %d) already on queue %x", __func__,
  669. ev, ev->ev_fd, queue);
  670. }
  671. if (ev->ev_flags & EVLIST_INTERNAL)
  672. docount = 0;
  673. if (docount)
  674. base->event_count++;
  675. ev->ev_flags |= queue;
  676. switch (queue) {
  677. case EVLIST_ACTIVE:
  678. if (docount)
  679. base->event_count_active++;
  680. TAILQ_INSERT_TAIL(base->activequeues[ev->ev_pri],
  681. ev,ev_active_next);
  682. break;
  683. case EVLIST_SIGNAL:
  684. TAILQ_INSERT_TAIL(&signalqueue, ev, ev_signal_next);
  685. break;
  686. case EVLIST_TIMEOUT: {
  687. struct event *tmp = RB_INSERT(event_tree, &base->timetree, ev);
  688. assert(tmp == NULL);
  689. break;
  690. }
  691. case EVLIST_INSERTED:
  692. TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next);
  693. break;
  694. default:
  695. event_errx(1, "%s: unknown queue %x", __func__, queue);
  696. }
  697. }
  698. /* Functions for debugging */
  699. const char *
  700. event_get_version(void)
  701. {
  702. return (VERSION);
  703. }
  704. /*
  705. * No thread-safe interface needed - the information should be the same
  706. * for all threads.
  707. */
  708. const char *
  709. event_get_method(void)
  710. {
  711. return (current_base->evsel->name);
  712. }