A clone of btpd with my configuration changes.
Você não pode selecionar mais de 25 tópicos Os tópicos devem começar com uma letra ou um número, podem incluir traços ('-') e podem ter até 35 caracteres.

879 linhas
19 KiB

  1. /*
  2. * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions
  7. * are met:
  8. * 1. Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. * 2. Redistributions in binary form must reproduce the above copyright
  11. * notice, this list of conditions and the following disclaimer in the
  12. * documentation and/or other materials provided with the distribution.
  13. * 3. The name of the author may not be used to endorse or promote products
  14. * derived from this software without specific prior written permission.
  15. *
  16. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  17. * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  18. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  19. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  20. * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  21. * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  22. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  23. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  24. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  25. * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  26. */
  27. #ifdef HAVE_CONFIG_H
  28. #include "config.h"
  29. #endif
  30. #ifdef WIN32
  31. #define WIN32_LEAN_AND_MEAN
  32. #include <windows.h>
  33. #undef WIN32_LEAN_AND_MEAN
  34. #include "misc.h"
  35. #endif
  36. #include <sys/types.h>
  37. #include <sys/tree.h>
  38. #ifdef HAVE_SYS_TIME_H
  39. #include <sys/time.h>
  40. #else
  41. #include <sys/_time.h>
  42. #endif
  43. #include <sys/queue.h>
  44. #include <stdio.h>
  45. #include <stdlib.h>
  46. #ifndef WIN32
  47. #include <unistd.h>
  48. #endif
  49. #include <errno.h>
  50. #include <signal.h>
  51. #include <string.h>
  52. #include <assert.h>
  53. #include "event.h"
  54. #include "event-internal.h"
  55. #include "log.h"
  56. #ifdef HAVE_EVENT_PORTS
  57. extern const struct eventop evportops;
  58. #endif
  59. #ifdef HAVE_SELECT
  60. extern const struct eventop selectops;
  61. #endif
  62. #ifdef HAVE_POLL
  63. extern const struct eventop pollops;
  64. #endif
  65. #ifdef HAVE_RTSIG
  66. extern const struct eventop rtsigops;
  67. #endif
  68. #ifdef HAVE_EPOLL
  69. extern const struct eventop epollops;
  70. #endif
  71. #ifdef HAVE_WORKING_KQUEUE
  72. extern const struct eventop kqops;
  73. #endif
  74. #ifdef HAVE_DEVPOLL
  75. extern const struct eventop devpollops;
  76. #endif
  77. #ifdef WIN32
  78. extern const struct eventop win32ops;
  79. #endif
  80. /* In order of preference */
  81. const struct eventop *eventops[] = {
  82. #ifdef HAVE_EVENT_PORTS
  83. &evportops,
  84. #endif
  85. #ifdef HAVE_WORKING_KQUEUE
  86. &kqops,
  87. #endif
  88. #ifdef HAVE_EPOLL
  89. &epollops,
  90. #endif
  91. #ifdef HAVE_DEVPOLL
  92. &devpollops,
  93. #endif
  94. #ifdef HAVE_RTSIG
  95. &rtsigops,
  96. #endif
  97. #ifdef HAVE_POLL
  98. &pollops,
  99. #endif
  100. #ifdef HAVE_SELECT
  101. &selectops,
  102. #endif
  103. #ifdef WIN32
  104. &win32ops,
  105. #endif
  106. NULL
  107. };
  108. /* Global state */
  109. struct event_list signalqueue;
  110. struct event_base *current_base = NULL;
  111. /* Handle signals - This is a deprecated interface */
  112. int (*event_sigcb)(void); /* Signal callback when gotsig is set */
  113. volatile sig_atomic_t event_gotsig; /* Set in signal handler */
  114. /* Prototypes */
  115. static void event_queue_insert(struct event_base *, struct event *, int);
  116. static void event_queue_remove(struct event_base *, struct event *, int);
  117. static int event_haveevents(struct event_base *);
  118. static void event_process_active(struct event_base *);
  119. static int timeout_next(struct event_base *, struct timeval *);
  120. static void timeout_process(struct event_base *);
  121. static void timeout_correct(struct event_base *, struct timeval *);
  122. static int
  123. compare(struct event *a, struct event *b)
  124. {
  125. if (timercmp(&a->ev_timeout, &b->ev_timeout, <))
  126. return (-1);
  127. else if (timercmp(&a->ev_timeout, &b->ev_timeout, >))
  128. return (1);
  129. if (a < b)
  130. return (-1);
  131. else if (a > b)
  132. return (1);
  133. return (0);
  134. }
  135. static int
  136. gettime(struct timeval *tp)
  137. {
  138. #ifdef HAVE_CLOCK_GETTIME
  139. struct timespec ts;
  140. #ifdef HAVE_CLOCK_MONOTONIC
  141. if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1)
  142. #else
  143. if (clock_gettime(CLOCK_REALTIME, &ts) == -1)
  144. #endif
  145. return (-1);
  146. tp->tv_sec = ts.tv_sec;
  147. tp->tv_usec = ts.tv_nsec / 1000;
  148. #else
  149. gettimeofday(tp, NULL);
  150. #endif
  151. return (0);
  152. }
  153. RB_PROTOTYPE(event_tree, event, ev_timeout_node, compare);
  154. RB_GENERATE(event_tree, event, ev_timeout_node, compare);
  155. void *
  156. event_init(void)
  157. {
  158. int i;
  159. if ((current_base = calloc(1, sizeof(struct event_base))) == NULL)
  160. event_err(1, "%s: calloc");
  161. event_sigcb = NULL;
  162. event_gotsig = 0;
  163. gettime(&current_base->event_tv);
  164. RB_INIT(&current_base->timetree);
  165. TAILQ_INIT(&current_base->eventqueue);
  166. TAILQ_INIT(&signalqueue);
  167. current_base->evbase = NULL;
  168. for (i = 0; eventops[i] && !current_base->evbase; i++) {
  169. current_base->evsel = eventops[i];
  170. current_base->evbase = current_base->evsel->init();
  171. }
  172. if (current_base->evbase == NULL)
  173. event_errx(1, "%s: no event mechanism available", __func__);
  174. if (getenv("EVENT_SHOW_METHOD"))
  175. event_msgx("libevent using: %s\n",
  176. current_base->evsel->name);
  177. /* allocate a single active event queue */
  178. event_base_priority_init(current_base, 1);
  179. return (current_base);
  180. }
  181. void
  182. event_base_free(struct event_base *base)
  183. {
  184. int i;
  185. if (base == NULL && current_base)
  186. base = current_base;
  187. if (base == current_base)
  188. current_base = NULL;
  189. assert(base);
  190. assert(TAILQ_EMPTY(&base->eventqueue));
  191. for (i=0; i < base->nactivequeues; ++i)
  192. assert(TAILQ_EMPTY(base->activequeues[i]));
  193. assert(RB_EMPTY(&base->timetree));
  194. for (i = 0; i < base->nactivequeues; ++i)
  195. free(base->activequeues[i]);
  196. free(base->activequeues);
  197. if (base->evsel->dealloc != NULL)
  198. base->evsel->dealloc(base->evbase);
  199. free(base);
  200. }
  201. int
  202. event_priority_init(int npriorities)
  203. {
  204. return event_base_priority_init(current_base, npriorities);
  205. }
  206. int
  207. event_base_priority_init(struct event_base *base, int npriorities)
  208. {
  209. int i;
  210. if (base->event_count_active)
  211. return (-1);
  212. if (base->nactivequeues && npriorities != base->nactivequeues) {
  213. for (i = 0; i < base->nactivequeues; ++i) {
  214. free(base->activequeues[i]);
  215. }
  216. free(base->activequeues);
  217. }
  218. /* Allocate our priority queues */
  219. base->nactivequeues = npriorities;
  220. base->activequeues = (struct event_list **)calloc(base->nactivequeues,
  221. npriorities * sizeof(struct event_list *));
  222. if (base->activequeues == NULL)
  223. event_err(1, "%s: calloc", __func__);
  224. for (i = 0; i < base->nactivequeues; ++i) {
  225. base->activequeues[i] = malloc(sizeof(struct event_list));
  226. if (base->activequeues[i] == NULL)
  227. event_err(1, "%s: malloc", __func__);
  228. TAILQ_INIT(base->activequeues[i]);
  229. }
  230. return (0);
  231. }
  232. int
  233. event_haveevents(struct event_base *base)
  234. {
  235. return (base->event_count > 0);
  236. }
  237. /*
  238. * Active events are stored in priority queues. Lower priorities are always
  239. * process before higher priorities. Low priority events can starve high
  240. * priority ones.
  241. */
  242. static void
  243. event_process_active(struct event_base *base)
  244. {
  245. struct event *ev;
  246. struct event_list *activeq = NULL;
  247. int i;
  248. short ncalls;
  249. if (!base->event_count_active)
  250. return;
  251. for (i = 0; i < base->nactivequeues; ++i) {
  252. if (TAILQ_FIRST(base->activequeues[i]) != NULL) {
  253. activeq = base->activequeues[i];
  254. break;
  255. }
  256. }
  257. assert(activeq != NULL);
  258. for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) {
  259. event_queue_remove(base, ev, EVLIST_ACTIVE);
  260. /* Allows deletes to work */
  261. ncalls = ev->ev_ncalls;
  262. ev->ev_pncalls = &ncalls;
  263. while (ncalls) {
  264. ncalls--;
  265. ev->ev_ncalls = ncalls;
  266. (*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg);
  267. if (event_gotsig)
  268. return;
  269. }
  270. }
  271. }
  272. /*
  273. * Wait continously for events. We exit only if no events are left.
  274. */
  275. int
  276. event_dispatch(void)
  277. {
  278. return (event_loop(0));
  279. }
  280. int
  281. event_base_dispatch(struct event_base *event_base)
  282. {
  283. return (event_base_loop(event_base, 0));
  284. }
  285. static void
  286. event_loopexit_cb(int fd, short what, void *arg)
  287. {
  288. struct event_base *base = arg;
  289. base->event_gotterm = 1;
  290. }
  291. /* not thread safe */
  292. int
  293. event_loopexit(struct timeval *tv)
  294. {
  295. return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
  296. current_base, tv));
  297. }
  298. int
  299. event_base_loopexit(struct event_base *event_base, struct timeval *tv)
  300. {
  301. return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
  302. event_base, tv));
  303. }
  304. /* not thread safe */
  305. int
  306. event_loop(int flags)
  307. {
  308. return event_base_loop(current_base, flags);
  309. }
  310. int
  311. event_base_loop(struct event_base *base, int flags)
  312. {
  313. const struct eventop *evsel = base->evsel;
  314. void *evbase = base->evbase;
  315. struct timeval tv;
  316. int res, done;
  317. done = 0;
  318. while (!done) {
  319. /* Calculate the initial events that we are waiting for */
  320. if (evsel->recalc(base, evbase, 0) == -1)
  321. return (-1);
  322. /* Terminate the loop if we have been asked to */
  323. if (base->event_gotterm) {
  324. base->event_gotterm = 0;
  325. break;
  326. }
  327. /* You cannot use this interface for multi-threaded apps */
  328. while (event_gotsig) {
  329. event_gotsig = 0;
  330. if (event_sigcb) {
  331. res = (*event_sigcb)();
  332. if (res == -1) {
  333. errno = EINTR;
  334. return (-1);
  335. }
  336. }
  337. }
  338. /* Check if time is running backwards */
  339. gettime(&tv);
  340. if (timercmp(&tv, &base->event_tv, <)) {
  341. struct timeval off;
  342. event_debug(("%s: time is running backwards, corrected",
  343. __func__));
  344. timersub(&base->event_tv, &tv, &off);
  345. timeout_correct(base, &off);
  346. }
  347. base->event_tv = tv;
  348. if (!base->event_count_active && !(flags & EVLOOP_NONBLOCK))
  349. timeout_next(base, &tv);
  350. else
  351. timerclear(&tv);
  352. /* If we have no events, we just exit */
  353. if (!event_haveevents(base)) {
  354. event_debug(("%s: no events registered.", __func__));
  355. return (1);
  356. }
  357. res = evsel->dispatch(base, evbase, &tv);
  358. if (res == -1)
  359. return (-1);
  360. timeout_process(base);
  361. if (base->event_count_active) {
  362. event_process_active(base);
  363. if (!base->event_count_active && (flags & EVLOOP_ONCE))
  364. done = 1;
  365. } else if (flags & EVLOOP_NONBLOCK)
  366. done = 1;
  367. }
  368. event_debug(("%s: asked to terminate loop.", __func__));
  369. return (0);
  370. }
  371. /* Sets up an event for processing once */
  372. struct event_once {
  373. struct event ev;
  374. void (*cb)(int, short, void *);
  375. void *arg;
  376. };
  377. /* One-time callback, it deletes itself */
  378. static void
  379. event_once_cb(int fd, short events, void *arg)
  380. {
  381. struct event_once *eonce = arg;
  382. (*eonce->cb)(fd, events, eonce->arg);
  383. free(eonce);
  384. }
  385. /* Schedules an event once */
  386. int
  387. event_once(int fd, short events,
  388. void (*callback)(int, short, void *), void *arg, struct timeval *tv)
  389. {
  390. struct event_once *eonce;
  391. struct timeval etv;
  392. int res;
  393. /* We cannot support signals that just fire once */
  394. if (events & EV_SIGNAL)
  395. return (-1);
  396. if ((eonce = calloc(1, sizeof(struct event_once))) == NULL)
  397. return (-1);
  398. eonce->cb = callback;
  399. eonce->arg = arg;
  400. if (events == EV_TIMEOUT) {
  401. if (tv == NULL) {
  402. timerclear(&etv);
  403. tv = &etv;
  404. }
  405. evtimer_set(&eonce->ev, event_once_cb, eonce);
  406. } else if (events & (EV_READ|EV_WRITE)) {
  407. events &= EV_READ|EV_WRITE;
  408. event_set(&eonce->ev, fd, events, event_once_cb, eonce);
  409. } else {
  410. /* Bad event combination */
  411. free(eonce);
  412. return (-1);
  413. }
  414. res = event_add(&eonce->ev, tv);
  415. if (res != 0) {
  416. free(eonce);
  417. return (res);
  418. }
  419. return (0);
  420. }
  421. void
  422. event_set(struct event *ev, int fd, short events,
  423. void (*callback)(int, short, void *), void *arg)
  424. {
  425. /* Take the current base - caller needs to set the real base later */
  426. ev->ev_base = current_base;
  427. ev->ev_callback = callback;
  428. ev->ev_arg = arg;
  429. ev->ev_fd = fd;
  430. ev->ev_events = events;
  431. ev->ev_flags = EVLIST_INIT;
  432. ev->ev_ncalls = 0;
  433. ev->ev_pncalls = NULL;
  434. /* by default, we put new events into the middle priority */
  435. ev->ev_pri = current_base->nactivequeues/2;
  436. }
  437. int
  438. event_base_set(struct event_base *base, struct event *ev)
  439. {
  440. /* Only innocent events may be assigned to a different base */
  441. if (ev->ev_flags != EVLIST_INIT)
  442. return (-1);
  443. ev->ev_base = base;
  444. ev->ev_pri = base->nactivequeues/2;
  445. return (0);
  446. }
  447. /*
  448. * Set's the priority of an event - if an event is already scheduled
  449. * changing the priority is going to fail.
  450. */
  451. int
  452. event_priority_set(struct event *ev, int pri)
  453. {
  454. if (ev->ev_flags & EVLIST_ACTIVE)
  455. return (-1);
  456. if (pri < 0 || pri >= ev->ev_base->nactivequeues)
  457. return (-1);
  458. ev->ev_pri = pri;
  459. return (0);
  460. }
  461. /*
  462. * Checks if a specific event is pending or scheduled.
  463. */
  464. int
  465. event_pending(struct event *ev, short event, struct timeval *tv)
  466. {
  467. struct timeval now, res;
  468. int flags = 0;
  469. if (ev->ev_flags & EVLIST_INSERTED)
  470. flags |= (ev->ev_events & (EV_READ|EV_WRITE));
  471. if (ev->ev_flags & EVLIST_ACTIVE)
  472. flags |= ev->ev_res;
  473. if (ev->ev_flags & EVLIST_TIMEOUT)
  474. flags |= EV_TIMEOUT;
  475. if (ev->ev_flags & EVLIST_SIGNAL)
  476. flags |= EV_SIGNAL;
  477. event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_SIGNAL);
  478. /* See if there is a timeout that we should report */
  479. if (tv != NULL && (flags & event & EV_TIMEOUT)) {
  480. gettime(&now);
  481. timersub(&ev->ev_timeout, &now, &res);
  482. /* correctly remap to real time */
  483. gettimeofday(&now, NULL);
  484. timeradd(&now, &res, tv);
  485. }
  486. return (flags & event);
  487. }
  488. int
  489. event_add(struct event *ev, struct timeval *tv)
  490. {
  491. struct event_base *base = ev->ev_base;
  492. const struct eventop *evsel = base->evsel;
  493. void *evbase = base->evbase;
  494. event_debug((
  495. "event_add: event: %p, %s%s%scall %p",
  496. ev,
  497. ev->ev_events & EV_READ ? "EV_READ " : " ",
  498. ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
  499. tv ? "EV_TIMEOUT " : " ",
  500. ev->ev_callback));
  501. assert(!(ev->ev_flags & ~EVLIST_ALL));
  502. if (tv != NULL) {
  503. struct timeval now;
  504. if (ev->ev_flags & EVLIST_TIMEOUT)
  505. event_queue_remove(base, ev, EVLIST_TIMEOUT);
  506. /* Check if it is active due to a timeout. Rescheduling
  507. * this timeout before the callback can be executed
  508. * removes it from the active list. */
  509. if ((ev->ev_flags & EVLIST_ACTIVE) &&
  510. (ev->ev_res & EV_TIMEOUT)) {
  511. /* See if we are just active executing this
  512. * event in a loop
  513. */
  514. if (ev->ev_ncalls && ev->ev_pncalls) {
  515. /* Abort loop */
  516. *ev->ev_pncalls = 0;
  517. }
  518. event_queue_remove(base, ev, EVLIST_ACTIVE);
  519. }
  520. gettime(&now);
  521. timeradd(&now, tv, &ev->ev_timeout);
  522. event_debug((
  523. "event_add: timeout in %d seconds, call %p",
  524. tv->tv_sec, ev->ev_callback));
  525. event_queue_insert(base, ev, EVLIST_TIMEOUT);
  526. }
  527. if ((ev->ev_events & (EV_READ|EV_WRITE)) &&
  528. !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) {
  529. event_queue_insert(base, ev, EVLIST_INSERTED);
  530. return (evsel->add(evbase, ev));
  531. } else if ((ev->ev_events & EV_SIGNAL) &&
  532. !(ev->ev_flags & EVLIST_SIGNAL)) {
  533. event_queue_insert(base, ev, EVLIST_SIGNAL);
  534. return (evsel->add(evbase, ev));
  535. }
  536. return (0);
  537. }
  538. int
  539. event_del(struct event *ev)
  540. {
  541. struct event_base *base;
  542. const struct eventop *evsel;
  543. void *evbase;
  544. event_debug(("event_del: %p, callback %p",
  545. ev, ev->ev_callback));
  546. /* An event without a base has not been added */
  547. if (ev->ev_base == NULL)
  548. return (-1);
  549. base = ev->ev_base;
  550. evsel = base->evsel;
  551. evbase = base->evbase;
  552. assert(!(ev->ev_flags & ~EVLIST_ALL));
  553. /* See if we are just active executing this event in a loop */
  554. if (ev->ev_ncalls && ev->ev_pncalls) {
  555. /* Abort loop */
  556. *ev->ev_pncalls = 0;
  557. }
  558. if (ev->ev_flags & EVLIST_TIMEOUT)
  559. event_queue_remove(base, ev, EVLIST_TIMEOUT);
  560. if (ev->ev_flags & EVLIST_ACTIVE)
  561. event_queue_remove(base, ev, EVLIST_ACTIVE);
  562. if (ev->ev_flags & EVLIST_INSERTED) {
  563. event_queue_remove(base, ev, EVLIST_INSERTED);
  564. return (evsel->del(evbase, ev));
  565. } else if (ev->ev_flags & EVLIST_SIGNAL) {
  566. event_queue_remove(base, ev, EVLIST_SIGNAL);
  567. return (evsel->del(evbase, ev));
  568. }
  569. return (0);
  570. }
  571. void
  572. event_active(struct event *ev, int res, short ncalls)
  573. {
  574. /* We get different kinds of events, add them together */
  575. if (ev->ev_flags & EVLIST_ACTIVE) {
  576. ev->ev_res |= res;
  577. return;
  578. }
  579. ev->ev_res = res;
  580. ev->ev_ncalls = ncalls;
  581. ev->ev_pncalls = NULL;
  582. event_queue_insert(ev->ev_base, ev, EVLIST_ACTIVE);
  583. }
  584. int
  585. timeout_next(struct event_base *base, struct timeval *tv)
  586. {
  587. struct timeval dflt = TIMEOUT_DEFAULT;
  588. struct timeval now;
  589. struct event *ev;
  590. if ((ev = RB_MIN(event_tree, &base->timetree)) == NULL) {
  591. *tv = dflt;
  592. return (0);
  593. }
  594. if (gettime(&now) == -1)
  595. return (-1);
  596. if (timercmp(&ev->ev_timeout, &now, <=)) {
  597. timerclear(tv);
  598. return (0);
  599. }
  600. timersub(&ev->ev_timeout, &now, tv);
  601. assert(tv->tv_sec >= 0);
  602. assert(tv->tv_usec >= 0);
  603. event_debug(("timeout_next: in %d seconds", tv->tv_sec));
  604. return (0);
  605. }
  606. static void
  607. timeout_correct(struct event_base *base, struct timeval *off)
  608. {
  609. struct event *ev;
  610. /*
  611. * We can modify the key element of the node without destroying
  612. * the key, beause we apply it to all in the right order.
  613. */
  614. RB_FOREACH(ev, event_tree, &base->timetree)
  615. timersub(&ev->ev_timeout, off, &ev->ev_timeout);
  616. }
  617. void
  618. timeout_process(struct event_base *base)
  619. {
  620. struct timeval now;
  621. struct event *ev, *next;
  622. gettime(&now);
  623. for (ev = RB_MIN(event_tree, &base->timetree); ev; ev = next) {
  624. if (timercmp(&ev->ev_timeout, &now, >))
  625. break;
  626. next = RB_NEXT(event_tree, &base->timetree, ev);
  627. event_queue_remove(base, ev, EVLIST_TIMEOUT);
  628. /* delete this event from the I/O queues */
  629. event_del(ev);
  630. event_debug(("timeout_process: call %p",
  631. ev->ev_callback));
  632. event_active(ev, EV_TIMEOUT, 1);
  633. }
  634. }
  635. void
  636. event_queue_remove(struct event_base *base, struct event *ev, int queue)
  637. {
  638. int docount = 1;
  639. if (!(ev->ev_flags & queue))
  640. event_errx(1, "%s: %p(fd %d) not on queue %x", __func__,
  641. ev, ev->ev_fd, queue);
  642. if (ev->ev_flags & EVLIST_INTERNAL)
  643. docount = 0;
  644. if (docount)
  645. base->event_count--;
  646. ev->ev_flags &= ~queue;
  647. switch (queue) {
  648. case EVLIST_ACTIVE:
  649. if (docount)
  650. base->event_count_active--;
  651. TAILQ_REMOVE(base->activequeues[ev->ev_pri],
  652. ev, ev_active_next);
  653. break;
  654. case EVLIST_SIGNAL:
  655. TAILQ_REMOVE(&signalqueue, ev, ev_signal_next);
  656. break;
  657. case EVLIST_TIMEOUT:
  658. RB_REMOVE(event_tree, &base->timetree, ev);
  659. break;
  660. case EVLIST_INSERTED:
  661. TAILQ_REMOVE(&base->eventqueue, ev, ev_next);
  662. break;
  663. default:
  664. event_errx(1, "%s: unknown queue %x", __func__, queue);
  665. }
  666. }
  667. void
  668. event_queue_insert(struct event_base *base, struct event *ev, int queue)
  669. {
  670. int docount = 1;
  671. if (ev->ev_flags & queue) {
  672. /* Double insertion is possible for active events */
  673. if (queue & EVLIST_ACTIVE)
  674. return;
  675. event_errx(1, "%s: %p(fd %d) already on queue %x", __func__,
  676. ev, ev->ev_fd, queue);
  677. }
  678. if (ev->ev_flags & EVLIST_INTERNAL)
  679. docount = 0;
  680. if (docount)
  681. base->event_count++;
  682. ev->ev_flags |= queue;
  683. switch (queue) {
  684. case EVLIST_ACTIVE:
  685. if (docount)
  686. base->event_count_active++;
  687. TAILQ_INSERT_TAIL(base->activequeues[ev->ev_pri],
  688. ev,ev_active_next);
  689. break;
  690. case EVLIST_SIGNAL:
  691. TAILQ_INSERT_TAIL(&signalqueue, ev, ev_signal_next);
  692. break;
  693. case EVLIST_TIMEOUT: {
  694. struct event *tmp = RB_INSERT(event_tree, &base->timetree, ev);
  695. assert(tmp == NULL);
  696. break;
  697. }
  698. case EVLIST_INSERTED:
  699. TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next);
  700. break;
  701. default:
  702. event_errx(1, "%s: unknown queue %x", __func__, queue);
  703. }
  704. }
  705. /* Functions for debugging */
  706. const char *
  707. event_get_version(void)
  708. {
  709. return (VERSION);
  710. }
  711. /*
  712. * No thread-safe interface needed - the information should be the same
  713. * for all threads.
  714. */
  715. const char *
  716. event_get_method(void)
  717. {
  718. return (current_base->evsel->name);
  719. }