MySQL 5.6.14 Source Code Document
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Groups Pages
event.c
1 /*
2  * Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  * notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  * notice, this list of conditions and the following disclaimer in the
12  * documentation and/or other materials provided with the distribution.
13  * 3. The name of the author may not be used to endorse or promote products
14  * derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 #ifdef HAVE_CONFIG_H
28 #include "config.h"
29 #endif
30 
31 #ifdef WIN32
32 #define WIN32_LEAN_AND_MEAN
33 #include <windows.h>
34 #undef WIN32_LEAN_AND_MEAN
35 #endif
36 #include <sys/types.h>
37 #ifdef HAVE_SYS_TIME_H
38 #include <sys/time.h>
39 #else
40 #include <sys/_time.h>
41 #endif
42 #include <sys/queue.h>
43 #include <stdio.h>
44 #include <stdlib.h>
45 #ifndef WIN32
46 #include <unistd.h>
47 #endif
48 #include <errno.h>
49 #include <signal.h>
50 #include <string.h>
51 #include <assert.h>
52 #include <time.h>
53 
54 #include "event.h"
55 #include "event-internal.h"
56 #include "evutil.h"
57 #include "log.h"
58 
59 #ifdef HAVE_EVENT_PORTS
60 extern const struct eventop evportops;
61 #endif
62 #ifdef HAVE_SELECT
63 extern const struct eventop selectops;
64 #endif
65 #ifdef HAVE_POLL
66 extern const struct eventop pollops;
67 #endif
68 #ifdef HAVE_EPOLL
69 extern const struct eventop epollops;
70 #endif
71 #ifdef HAVE_WORKING_KQUEUE
72 extern const struct eventop kqops;
73 #endif
74 #ifdef HAVE_DEVPOLL
75 extern const struct eventop devpollops;
76 #endif
77 #ifdef WIN32
78 extern const struct eventop win32ops;
79 #endif
80 
81 /* In order of preference */
82 static const struct eventop *eventops[] = {
83 #ifdef HAVE_EVENT_PORTS
84  &evportops,
85 #endif
86 #ifdef HAVE_WORKING_KQUEUE
87  &kqops,
88 #endif
89 #ifdef HAVE_EPOLL
90  &epollops,
91 #endif
92 #ifdef HAVE_DEVPOLL
93  &devpollops,
94 #endif
95 #ifdef HAVE_POLL
96  &pollops,
97 #endif
98 #ifdef HAVE_SELECT
99  &selectops,
100 #endif
101 #ifdef WIN32
102  &win32ops,
103 #endif
104  NULL
105 };
106 
107 /* Global state */
108 struct event_base *current_base = NULL;
109 extern struct event_base *evsignal_base;
110 static int use_monotonic;
111 
112 /* Handle signals - This is a deprecated interface */
113 int (*event_sigcb)(void); /* Signal callback when gotsig is set */
114 volatile sig_atomic_t event_gotsig; /* Set in signal handler */
115 
116 /* Prototypes */
117 static void event_queue_insert(struct event_base *, struct event *, int);
118 static void event_queue_remove(struct event_base *, struct event *, int);
119 static int event_haveevents(struct event_base *);
120 
121 static void event_process_active(struct event_base *);
122 
123 static int timeout_next(struct event_base *, struct timeval **);
124 static void timeout_process(struct event_base *);
125 static void timeout_correct(struct event_base *, struct timeval *);
126 
127 static void
128 detect_monotonic(void)
129 {
130 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
131  struct timespec ts;
132 
133  if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0)
134  use_monotonic = 1;
135 #endif
136 }
137 
138 static int
139 gettime(struct event_base *base, struct timeval *tp)
140 {
141  if (base->tv_cache.tv_sec) {
142  *tp = base->tv_cache;
143  return (0);
144  }
145 
146 #if defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC)
147  if (use_monotonic) {
148  struct timespec ts;
149 
150  if (clock_gettime(CLOCK_MONOTONIC, &ts) == -1)
151  return (-1);
152 
153  tp->tv_sec = ts.tv_sec;
154  tp->tv_usec = ts.tv_nsec / 1000;
155  return (0);
156  }
157 #endif
158 
159  return (evutil_gettimeofday(tp, NULL));
160 }
161 
162 struct event_base *
163 event_init(void)
164 {
165  struct event_base *base = event_base_new();
166 
167  if (base != NULL)
168  current_base = base;
169 
170  return (base);
171 }
172 
173 struct event_base *
174 event_base_new(void)
175 {
176  int i;
177  struct event_base *base;
178 
179  if ((base = calloc(1, sizeof(struct event_base))) == NULL)
180  event_err(1, "%s: calloc", __func__);
181 
182  event_sigcb = NULL;
183  event_gotsig = 0;
184 
185  detect_monotonic();
186  gettime(base, &base->event_tv);
187 
188  min_heap_ctor(&base->timeheap);
189  TAILQ_INIT(&base->eventqueue);
190  base->sig.ev_signal_pair[0] = -1;
191  base->sig.ev_signal_pair[1] = -1;
192 
193  base->evbase = NULL;
194  for (i = 0; eventops[i] && !base->evbase; i++) {
195  base->evsel = eventops[i];
196 
197  base->evbase = base->evsel->init(base);
198  }
199 
200  if (base->evbase == NULL)
201  event_errx(1, "%s: no event mechanism available", __func__);
202 
203  if (getenv("EVENT_SHOW_METHOD"))
204  event_msgx("libevent using: %s\n",
205  base->evsel->name);
206 
207  /* allocate a single active event queue */
208  event_base_priority_init(base, 1);
209 
210  return (base);
211 }
212 
213 void
214 event_base_free(struct event_base *base)
215 {
216  int i, n_deleted=0;
217  struct event *ev;
218 
219  if (base == NULL && current_base)
220  base = current_base;
221  if (base == current_base)
222  current_base = NULL;
223 
224  /* XXX(niels) - check for internal events first */
225  assert(base);
226  /* Delete all non-internal events. */
227  for (ev = TAILQ_FIRST(&base->eventqueue); ev; ) {
228  struct event *next = TAILQ_NEXT(ev, ev_next);
229  if (!(ev->ev_flags & EVLIST_INTERNAL)) {
230  event_del(ev);
231  ++n_deleted;
232  }
233  ev = next;
234  }
235  while ((ev = min_heap_top(&base->timeheap)) != NULL) {
236  event_del(ev);
237  ++n_deleted;
238  }
239 
240  for (i = 0; i < base->nactivequeues; ++i) {
241  for (ev = TAILQ_FIRST(base->activequeues[i]); ev; ) {
242  struct event *next = TAILQ_NEXT(ev, ev_active_next);
243  if (!(ev->ev_flags & EVLIST_INTERNAL)) {
244  event_del(ev);
245  ++n_deleted;
246  }
247  ev = next;
248  }
249  }
250 
251  if (n_deleted)
252  event_debug(("%s: %d events were still set in base",
253  __func__, n_deleted));
254 
255  if (base->evsel->dealloc != NULL)
256  base->evsel->dealloc(base, base->evbase);
257 
258  for (i = 0; i < base->nactivequeues; ++i)
259  assert(TAILQ_EMPTY(base->activequeues[i]));
260 
261  assert(min_heap_empty(&base->timeheap));
262  min_heap_dtor(&base->timeheap);
263 
264  for (i = 0; i < base->nactivequeues; ++i)
265  free(base->activequeues[i]);
266  free(base->activequeues);
267 
268  assert(TAILQ_EMPTY(&base->eventqueue));
269 
270  free(base);
271 }
272 
273 /* reinitialized the event base after a fork */
274 int
275 event_reinit(struct event_base *base)
276 {
277  const struct eventop *evsel = base->evsel;
278  void *evbase = base->evbase;
279  int res = 0;
280  struct event *ev;
281 
282  /* check if this event mechanism requires reinit */
283  if (!evsel->need_reinit)
284  return (0);
285 
286  /* prevent internal delete */
287  if (base->sig.ev_signal_added) {
288  /* we cannot call event_del here because the base has
289  * not been reinitialized yet. */
290  event_queue_remove(base, &base->sig.ev_signal,
291  EVLIST_INSERTED);
292  if (base->sig.ev_signal.ev_flags & EVLIST_ACTIVE)
293  event_queue_remove(base, &base->sig.ev_signal,
294  EVLIST_ACTIVE);
295  base->sig.ev_signal_added = 0;
296  }
297 
298  if (base->evsel->dealloc != NULL)
299  base->evsel->dealloc(base, base->evbase);
300  evbase = base->evbase = evsel->init(base);
301  if (base->evbase == NULL)
302  event_errx(1, "%s: could not reinitialize event mechanism",
303  __func__);
304 
305  TAILQ_FOREACH(ev, &base->eventqueue, ev_next) {
306  if (evsel->add(evbase, ev) == -1)
307  res = -1;
308  }
309 
310  return (res);
311 }
312 
313 int
314 event_priority_init(int npriorities)
315 {
316  return event_base_priority_init(current_base, npriorities);
317 }
318 
319 int
320 event_base_priority_init(struct event_base *base, int npriorities)
321 {
322  int i;
323 
324  if (base->event_count_active)
325  return (-1);
326 
327  if (base->nactivequeues && npriorities != base->nactivequeues) {
328  for (i = 0; i < base->nactivequeues; ++i) {
329  free(base->activequeues[i]);
330  }
331  free(base->activequeues);
332  }
333 
334  /* Allocate our priority queues */
335  base->nactivequeues = npriorities;
336  base->activequeues = (struct event_list **)calloc(base->nactivequeues,
337  npriorities * sizeof(struct event_list *));
338  if (base->activequeues == NULL)
339  event_err(1, "%s: calloc", __func__);
340 
341  for (i = 0; i < base->nactivequeues; ++i) {
342  base->activequeues[i] = malloc(sizeof(struct event_list));
343  if (base->activequeues[i] == NULL)
344  event_err(1, "%s: malloc", __func__);
345  TAILQ_INIT(base->activequeues[i]);
346  }
347 
348  return (0);
349 }
350 
351 int
352 event_haveevents(struct event_base *base)
353 {
354  return (base->event_count > 0);
355 }
356 
357 /*
358  * Active events are stored in priority queues. Lower priorities are always
359  * process before higher priorities. Low priority events can starve high
360  * priority ones.
361  */
362 
363 static void
364 event_process_active(struct event_base *base)
365 {
366  struct event *ev;
367  struct event_list *activeq = NULL;
368  int i;
369  short ncalls;
370 
371  for (i = 0; i < base->nactivequeues; ++i) {
372  if (TAILQ_FIRST(base->activequeues[i]) != NULL) {
373  activeq = base->activequeues[i];
374  break;
375  }
376  }
377 
378  assert(activeq != NULL);
379 
380  for (ev = TAILQ_FIRST(activeq); ev; ev = TAILQ_FIRST(activeq)) {
381  if (ev->ev_events & EV_PERSIST)
382  event_queue_remove(base, ev, EVLIST_ACTIVE);
383  else
384  event_del(ev);
385 
386  /* Allows deletes to work */
387  ncalls = ev->ev_ncalls;
388  ev->ev_pncalls = &ncalls;
389  while (ncalls) {
390  ncalls--;
391  ev->ev_ncalls = ncalls;
392  (*ev->ev_callback)((int)ev->ev_fd, ev->ev_res, ev->ev_arg);
393  if (event_gotsig || base->event_break)
394  return;
395  }
396  }
397 }
398 
399 /*
400  * Wait continously for events. We exit only if no events are left.
401  */
402 
403 int
404 event_dispatch(void)
405 {
406  return (event_loop(0));
407 }
408 
409 int
410 event_base_dispatch(struct event_base *event_base)
411 {
412  return (event_base_loop(event_base, 0));
413 }
414 
415 const char *
416 event_base_get_method(struct event_base *base)
417 {
418  assert(base);
419  return (base->evsel->name);
420 }
421 
422 static void
423 event_loopexit_cb(int fd, short what, void *arg)
424 {
425  struct event_base *base = arg;
426  base->event_gotterm = 1;
427 }
428 
429 /* not thread safe */
430 int
431 event_loopexit(const struct timeval *tv)
432 {
433  return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
434  current_base, tv));
435 }
436 
437 int
438 event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
439 {
440  return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
441  event_base, tv));
442 }
443 
444 /* not thread safe */
445 int
446 event_loopbreak(void)
447 {
448  return (event_base_loopbreak(current_base));
449 }
450 
451 int
452 event_base_loopbreak(struct event_base *event_base)
453 {
454  if (event_base == NULL)
455  return (-1);
456 
457  event_base->event_break = 1;
458  return (0);
459 }
460 
461 
462 
463 /* not thread safe */
464 
465 int
466 event_loop(int flags)
467 {
468  return event_base_loop(current_base, flags);
469 }
470 
471 int
472 event_base_loop(struct event_base *base, int flags)
473 {
474  const struct eventop *evsel = base->evsel;
475  void *evbase = base->evbase;
476  struct timeval tv;
477  struct timeval *tv_p;
478  int res, done;
479 
480  /* clear time cache */
481  base->tv_cache.tv_sec = 0;
482 
483  if (base->sig.ev_signal_added)
484  evsignal_base = base;
485  done = 0;
486  while (!done) {
487  /* Terminate the loop if we have been asked to */
488  if (base->event_gotterm) {
489  base->event_gotterm = 0;
490  break;
491  }
492 
493  if (base->event_break) {
494  base->event_break = 0;
495  break;
496  }
497 
498  /* You cannot use this interface for multi-threaded apps */
499  while (event_gotsig) {
500  event_gotsig = 0;
501  if (event_sigcb) {
502  res = (*event_sigcb)();
503  if (res == -1) {
504  errno = EINTR;
505  return (-1);
506  }
507  }
508  }
509 
510  timeout_correct(base, &tv);
511 
512  tv_p = &tv;
513  if (!base->event_count_active && !(flags & EVLOOP_NONBLOCK)) {
514  timeout_next(base, &tv_p);
515  } else {
516  /*
517  * if we have active events, we just poll new events
518  * without waiting.
519  */
520  evutil_timerclear(&tv);
521  }
522 
523  /* If we have no events, we just exit */
524  if (!event_haveevents(base)) {
525  event_debug(("%s: no events registered.", __func__));
526  return (1);
527  }
528 
529  /* update last old time */
530  gettime(base, &base->event_tv);
531 
532  /* clear time cache */
533  base->tv_cache.tv_sec = 0;
534 
535  res = evsel->dispatch(base, evbase, tv_p);
536 
537  if (res == -1)
538  return (-1);
539  gettime(base, &base->tv_cache);
540 
541  timeout_process(base);
542 
543  if (base->event_count_active) {
544  event_process_active(base);
545  if (!base->event_count_active && (flags & EVLOOP_ONCE))
546  done = 1;
547  } else if (flags & EVLOOP_NONBLOCK)
548  done = 1;
549  }
550 
551  /* clear time cache */
552  base->tv_cache.tv_sec = 0;
553 
554  event_debug(("%s: asked to terminate loop.", __func__));
555  return (0);
556 }
557 
558 /* Sets up an event for processing once */
559 
560 struct event_once {
561  struct event ev;
562 
563  void (*cb)(int, short, void *);
564  void *arg;
565 };
566 
567 /* One-time callback, it deletes itself */
568 
569 static void
570 event_once_cb(int fd, short events, void *arg)
571 {
572  struct event_once *eonce = arg;
573 
574  (*eonce->cb)(fd, events, eonce->arg);
575  free(eonce);
576 }
577 
578 /* not threadsafe, event scheduled once. */
579 int
580 event_once(int fd, short events,
581  void (*callback)(int, short, void *), void *arg, const struct timeval *tv)
582 {
583  return event_base_once(current_base, fd, events, callback, arg, tv);
584 }
585 
586 /* Schedules an event once */
587 int
588 event_base_once(struct event_base *base, int fd, short events,
589  void (*callback)(int, short, void *), void *arg, const struct timeval *tv)
590 {
591  struct event_once *eonce;
592  struct timeval etv;
593  int res;
594 
595  /* We cannot support signals that just fire once */
596  if (events & EV_SIGNAL)
597  return (-1);
598 
599  if ((eonce = calloc(1, sizeof(struct event_once))) == NULL)
600  return (-1);
601 
602  eonce->cb = callback;
603  eonce->arg = arg;
604 
605  if (events == EV_TIMEOUT) {
606  if (tv == NULL) {
607  evutil_timerclear(&etv);
608  tv = &etv;
609  }
610 
611  evtimer_set(&eonce->ev, event_once_cb, eonce);
612  } else if (events & (EV_READ|EV_WRITE)) {
613  events &= EV_READ|EV_WRITE;
614 
615  event_set(&eonce->ev, fd, events, event_once_cb, eonce);
616  } else {
617  /* Bad event combination */
618  free(eonce);
619  return (-1);
620  }
621 
622  res = event_base_set(base, &eonce->ev);
623  if (res == 0)
624  res = event_add(&eonce->ev, tv);
625  if (res != 0) {
626  free(eonce);
627  return (res);
628  }
629 
630  return (0);
631 }
632 
633 void
634 event_set(struct event *ev, int fd, short events,
635  void (*callback)(int, short, void *), void *arg)
636 {
637  /* Take the current base - caller needs to set the real base later */
638  ev->ev_base = current_base;
639 
640  ev->ev_callback = callback;
641  ev->ev_arg = arg;
642  ev->ev_fd = fd;
643  ev->ev_events = events;
644  ev->ev_res = 0;
645  ev->ev_flags = EVLIST_INIT;
646  ev->ev_ncalls = 0;
647  ev->ev_pncalls = NULL;
648 
649  min_heap_elem_init(ev);
650 
651  /* by default, we put new events into the middle priority */
652  if(current_base)
653  ev->ev_pri = current_base->nactivequeues/2;
654 }
655 
656 int
657 event_base_set(struct event_base *base, struct event *ev)
658 {
659  /* Only innocent events may be assigned to a different base */
660  if (ev->ev_flags != EVLIST_INIT)
661  return (-1);
662 
663  ev->ev_base = base;
664  ev->ev_pri = base->nactivequeues/2;
665 
666  return (0);
667 }
668 
669 /*
670  * Set's the priority of an event - if an event is already scheduled
671  * changing the priority is going to fail.
672  */
673 
674 int
675 event_priority_set(struct event *ev, int pri)
676 {
677  if (ev->ev_flags & EVLIST_ACTIVE)
678  return (-1);
679  if (pri < 0 || pri >= ev->ev_base->nactivequeues)
680  return (-1);
681 
682  ev->ev_pri = pri;
683 
684  return (0);
685 }
686 
687 /*
688  * Checks if a specific event is pending or scheduled.
689  */
690 
691 int
692 event_pending(struct event *ev, short event, struct timeval *tv)
693 {
694  struct timeval now, res;
695  int flags = 0;
696 
697  if (ev->ev_flags & EVLIST_INSERTED)
698  flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL));
699  if (ev->ev_flags & EVLIST_ACTIVE)
700  flags |= ev->ev_res;
701  if (ev->ev_flags & EVLIST_TIMEOUT)
702  flags |= EV_TIMEOUT;
703 
704  event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_SIGNAL);
705 
706  /* See if there is a timeout that we should report */
707  if (tv != NULL && (flags & event & EV_TIMEOUT)) {
708  gettime(ev->ev_base, &now);
709  evutil_timersub(&ev->ev_timeout, &now, &res);
710  /* correctly remap to real time */
711  evutil_gettimeofday(&now, NULL);
712  evutil_timeradd(&now, &res, tv);
713  }
714 
715  return (flags & event);
716 }
717 
718 int
719 event_add(struct event *ev, const struct timeval *tv)
720 {
721  struct event_base *base = ev->ev_base;
722  const struct eventop *evsel = base->evsel;
723  void *evbase = base->evbase;
724  int res = 0;
725 
726  event_debug((
727  "event_add: event: %p, %s%s%scall %p",
728  ev,
729  ev->ev_events & EV_READ ? "EV_READ " : " ",
730  ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
731  tv ? "EV_TIMEOUT " : " ",
732  ev->ev_callback));
733 
734  assert(!(ev->ev_flags & ~EVLIST_ALL));
735 
736  /*
737  * prepare for timeout insertion further below, if we get a
738  * failure on any step, we should not change any state.
739  */
740  if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
741  if (min_heap_reserve(&base->timeheap,
742  1 + min_heap_size(&base->timeheap)) == -1)
743  return (-1); /* ENOMEM == errno */
744  }
745 
746  if ((ev->ev_events & (EV_READ|EV_WRITE|EV_SIGNAL)) &&
747  !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE))) {
748  res = evsel->add(evbase, ev);
749  if (res != -1)
750  event_queue_insert(base, ev, EVLIST_INSERTED);
751  }
752 
753  /*
754  * we should change the timout state only if the previous event
755  * addition succeeded.
756  */
757  if (res != -1 && tv != NULL) {
758  struct timeval now;
759 
760  /*
761  * we already reserved memory above for the case where we
762  * are not replacing an exisiting timeout.
763  */
764  if (ev->ev_flags & EVLIST_TIMEOUT)
765  event_queue_remove(base, ev, EVLIST_TIMEOUT);
766 
767  /* Check if it is active due to a timeout. Rescheduling
768  * this timeout before the callback can be executed
769  * removes it from the active list. */
770  if ((ev->ev_flags & EVLIST_ACTIVE) &&
771  (ev->ev_res & EV_TIMEOUT)) {
772  /* See if we are just active executing this
773  * event in a loop
774  */
775  if (ev->ev_ncalls && ev->ev_pncalls) {
776  /* Abort loop */
777  *ev->ev_pncalls = 0;
778  }
779 
780  event_queue_remove(base, ev, EVLIST_ACTIVE);
781  }
782 
783  gettime(base, &now);
784  evutil_timeradd(&now, tv, &ev->ev_timeout);
785 
786  event_debug((
787  "event_add: timeout in %ld seconds, call %p",
788  tv->tv_sec, ev->ev_callback));
789 
790  event_queue_insert(base, ev, EVLIST_TIMEOUT);
791  }
792 
793  return (res);
794 }
795 
796 int
797 event_del(struct event *ev)
798 {
799  struct event_base *base;
800  const struct eventop *evsel;
801  void *evbase;
802 
803  event_debug(("event_del: %p, callback %p",
804  ev, ev->ev_callback));
805 
806  /* An event without a base has not been added */
807  if (ev->ev_base == NULL)
808  return (-1);
809 
810  base = ev->ev_base;
811  evsel = base->evsel;
812  evbase = base->evbase;
813 
814  assert(!(ev->ev_flags & ~EVLIST_ALL));
815 
816  /* See if we are just active executing this event in a loop */
817  if (ev->ev_ncalls && ev->ev_pncalls) {
818  /* Abort loop */
819  *ev->ev_pncalls = 0;
820  }
821 
822  if (ev->ev_flags & EVLIST_TIMEOUT)
823  event_queue_remove(base, ev, EVLIST_TIMEOUT);
824 
825  if (ev->ev_flags & EVLIST_ACTIVE)
826  event_queue_remove(base, ev, EVLIST_ACTIVE);
827 
828  if (ev->ev_flags & EVLIST_INSERTED) {
829  event_queue_remove(base, ev, EVLIST_INSERTED);
830  return (evsel->del(evbase, ev));
831  }
832 
833  return (0);
834 }
835 
836 void
837 event_active(struct event *ev, int res, short ncalls)
838 {
839  /* We get different kinds of events, add them together */
840  if (ev->ev_flags & EVLIST_ACTIVE) {
841  ev->ev_res |= res;
842  return;
843  }
844 
845  ev->ev_res = res;
846  ev->ev_ncalls = ncalls;
847  ev->ev_pncalls = NULL;
848  event_queue_insert(ev->ev_base, ev, EVLIST_ACTIVE);
849 }
850 
851 static int
852 timeout_next(struct event_base *base, struct timeval **tv_p)
853 {
854  struct timeval now;
855  struct event *ev;
856  struct timeval *tv = *tv_p;
857 
858  if ((ev = min_heap_top(&base->timeheap)) == NULL) {
859  /* if no time-based events are active wait for I/O */
860  *tv_p = NULL;
861  return (0);
862  }
863 
864  if (gettime(base, &now) == -1)
865  return (-1);
866 
867  if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
868  evutil_timerclear(tv);
869  return (0);
870  }
871 
872  evutil_timersub(&ev->ev_timeout, &now, tv);
873 
874  assert(tv->tv_sec >= 0);
875  assert(tv->tv_usec >= 0);
876 
877  event_debug(("timeout_next: in %ld seconds", tv->tv_sec));
878  return (0);
879 }
880 
881 /*
882  * Determines if the time is running backwards by comparing the current
883  * time against the last time we checked. Not needed when using clock
884  * monotonic.
885  */
886 
887 static void
888 timeout_correct(struct event_base *base, struct timeval *tv)
889 {
890  struct event **pev;
891  unsigned int size;
892  struct timeval off;
893 
894  if (use_monotonic)
895  return;
896 
897  /* Check if time is running backwards */
898  gettime(base, tv);
899  if (evutil_timercmp(tv, &base->event_tv, >=)) {
900  base->event_tv = *tv;
901  return;
902  }
903 
904  event_debug(("%s: time is running backwards, corrected",
905  __func__));
906  evutil_timersub(&base->event_tv, tv, &off);
907 
908  /*
909  * We can modify the key element of the node without destroying
910  * the key, beause we apply it to all in the right order.
911  */
912  pev = base->timeheap.p;
913  size = base->timeheap.n;
914  for (; size-- > 0; ++pev) {
915  struct timeval *ev_tv = &(**pev).ev_timeout;
916  evutil_timersub(ev_tv, &off, ev_tv);
917  }
918  /* Now remember what the new time turned out to be. */
919  base->event_tv = *tv;
920 }
921 
922 void
923 timeout_process(struct event_base *base)
924 {
925  struct timeval now;
926  struct event *ev;
927 
928  if (min_heap_empty(&base->timeheap))
929  return;
930 
931  gettime(base, &now);
932 
933  while ((ev = min_heap_top(&base->timeheap))) {
934  if (evutil_timercmp(&ev->ev_timeout, &now, >))
935  break;
936 
937  /* delete this event from the I/O queues */
938  event_del(ev);
939 
940  event_debug(("timeout_process: call %p",
941  ev->ev_callback));
942  event_active(ev, EV_TIMEOUT, 1);
943  }
944 }
945 
946 void
947 event_queue_remove(struct event_base *base, struct event *ev, int queue)
948 {
949  if (!(ev->ev_flags & queue))
950  event_errx(1, "%s: %p(fd %d) not on queue %x", __func__,
951  ev, ev->ev_fd, queue);
952 
953  if (~ev->ev_flags & EVLIST_INTERNAL)
954  base->event_count--;
955 
956  ev->ev_flags &= ~queue;
957  switch (queue) {
958  case EVLIST_INSERTED:
959  TAILQ_REMOVE(&base->eventqueue, ev, ev_next);
960  break;
961  case EVLIST_ACTIVE:
962  base->event_count_active--;
963  TAILQ_REMOVE(base->activequeues[ev->ev_pri],
964  ev, ev_active_next);
965  break;
966  case EVLIST_TIMEOUT:
967  min_heap_erase(&base->timeheap, ev);
968  break;
969  default:
970  event_errx(1, "%s: unknown queue %x", __func__, queue);
971  }
972 }
973 
974 void
975 event_queue_insert(struct event_base *base, struct event *ev, int queue)
976 {
977  if (ev->ev_flags & queue) {
978  /* Double insertion is possible for active events */
979  if (queue & EVLIST_ACTIVE)
980  return;
981 
982  event_errx(1, "%s: %p(fd %d) already on queue %x", __func__,
983  ev, ev->ev_fd, queue);
984  }
985 
986  if (~ev->ev_flags & EVLIST_INTERNAL)
987  base->event_count++;
988 
989  ev->ev_flags |= queue;
990  switch (queue) {
991  case EVLIST_INSERTED:
992  TAILQ_INSERT_TAIL(&base->eventqueue, ev, ev_next);
993  break;
994  case EVLIST_ACTIVE:
995  base->event_count_active++;
996  TAILQ_INSERT_TAIL(base->activequeues[ev->ev_pri],
997  ev,ev_active_next);
998  break;
999  case EVLIST_TIMEOUT: {
1000  min_heap_push(&base->timeheap, ev);
1001  break;
1002  }
1003  default:
1004  event_errx(1, "%s: unknown queue %x", __func__, queue);
1005  }
1006 }
1007 
1008 /* Functions for debugging */
1009 
1010 const char *
1011 event_get_version(void)
1012 {
1013  return (VERSION);
1014 }
1015 
1016 /*
1017  * No thread-safe interface needed - the information should be the same
1018  * for all threads.
1019  */
1020 
1021 const char *
1022 event_get_method(void)
1023 {
1024  return (current_base->evsel->name);
1025 }