OpenVPN
event.c
Go to the documentation of this file.
1/*
2 * OpenVPN -- An application to securely tunnel IP networks
3 * over a single TCP/UDP port, with support for SSL/TLS-based
4 * session authentication and key exchange,
5 * packet encryption, packet authentication, and
6 * packet compression.
7 *
8 * Copyright (C) 2002-2025 OpenVPN Inc <sales@openvpn.net>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2
12 * as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License along
20 * with this program; if not, see <https://www.gnu.org/licenses/>.
21 */
22
23#ifdef HAVE_CONFIG_H
24#include "config.h"
25#endif
26
27#include "syshead.h"
28
29#include "buffer.h"
30#include "error.h"
31#include "integer.h"
32#include "event.h"
33#include "fdmisc.h"
34
35#if EPOLL
36#include <sys/epoll.h>
37#endif
38
39#include "memdbg.h"
40
41/*
42 * Some OSes will prefer select() over poll()
43 * when both are available.
44 */
45#if defined(TARGET_DARWIN)
46#define SELECT_PREFERRED_OVER_POLL
47#endif
48
49/*
50 * All non-windows OSes are assumed to have select()
51 */
52#ifdef _WIN32
53#define SELECT 0
54#else
55#define SELECT 1
56#endif
57
58/*
59 * This should be set to the highest file descriptor
60 * which can be used in one of the FD_ macros.
61 */
62#ifdef FD_SETSIZE
63#define SELECT_MAX_FDS FD_SETSIZE
64#else
65#define SELECT_MAX_FDS 256
66#endif
67
77static inline int
78tv_to_ms_timeout(const struct timeval *tv)
79{
80 if (tv->tv_sec == 0 && tv->tv_usec == 0)
81 {
82 return 0;
83 }
84 else
85 {
86 /* might overflow but not for practically useful numbers */
87 return max_int((int)(tv->tv_sec * 1000 + (tv->tv_usec + 500) / 1000), 1);
88 }
89}
90
91#ifdef _WIN32
92
93struct we_set
94{
96 bool fast;
97 HANDLE *events;
101};
102
103static inline void
104we_set_event(struct we_set *wes, int i, event_t event, unsigned int rwflags, void *arg)
105{
106 ASSERT(i >= 0 && i < wes->capacity);
107
108 if (rwflags == EVENT_READ)
109 {
110 ASSERT(event->read != NULL);
111 wes->events[i] = event->read;
112 }
113 else if (rwflags == EVENT_WRITE)
114 {
115 ASSERT(event->write != NULL);
116 wes->events[i] = event->write;
117 }
118 else
119 {
120 msg(M_FATAL, "fatal error in we_set_events: rwflags=%d", rwflags);
121 }
122
123 wes->esr[i].rwflags = rwflags;
124 wes->esr[i].arg = arg;
125}
126
127static inline bool
128we_append_event(struct we_set *wes, event_t event, unsigned int rwflags, void *arg)
129{
130 if (rwflags & EVENT_WRITE)
131 {
132 if (wes->n_events < wes->capacity)
133 {
134 we_set_event(wes, wes->n_events, event, EVENT_WRITE, arg);
135 ++wes->n_events;
136 }
137 else
138 {
139 return false;
140 }
141 }
142 if (rwflags & EVENT_READ)
143 {
144 if (wes->n_events < wes->capacity)
145 {
146 we_set_event(wes, wes->n_events, event, EVENT_READ, arg);
147 ++wes->n_events;
148 }
149 else
150 {
151 return false;
152 }
153 }
154 return true;
155}
156
157static void
158we_del_event(struct we_set *wes, event_t event)
159{
160 int i, j = 0;
161 const int len = wes->n_events;
162
163 for (i = 0; i < len; ++i)
164 {
165 const HANDLE h = wes->events[i];
166 if (h == event->read || h == event->write)
167 {
168 --wes->n_events;
169 }
170 else
171 {
172 if (i != j)
173 {
174 wes->events[j] = wes->events[i];
175 wes->esr[j] = wes->esr[i];
176 }
177 ++j;
178 }
179 }
180}
181
182static void
183we_del_index(struct we_set *wes, int index)
184{
185 int i;
186 ASSERT(index >= 0 && index < wes->n_events);
187 for (i = index; i < wes->n_events - 1; ++i)
188 {
189 wes->events[i] = wes->events[i + 1];
190 wes->esr[i] = wes->esr[i + 1];
191 }
192 --wes->n_events;
193}
194
195static void
196we_get_rw_indices(struct we_set *wes, event_t event, int *ri, int *wi)
197{
198 int i;
199 *ri = *wi = -1;
200 for (i = 0; i < wes->n_events; ++i)
201 {
202 const HANDLE h = wes->events[i];
203 if (h == event->read)
204 {
205 ASSERT(*ri == -1);
206 *ri = i;
207 }
208 else if (h == event->write)
209 {
210 ASSERT(*wi == -1);
211 *wi = i;
212 }
213 }
214}
215
216static void
218{
219 struct we_set *wes = (struct we_set *)es;
220 free(wes->events);
221 free(wes->esr);
222 free(wes);
223}
224
225static void
227{
228 struct we_set *wes = (struct we_set *)es;
229 ASSERT(wes->fast);
230 wes->n_events = 0;
231}
232
233static void
234we_del(struct event_set *es, event_t event)
235{
236 struct we_set *wes = (struct we_set *)es;
237 ASSERT(!wes->fast);
238 we_del_event(wes, event);
239}
240
241static void
242we_ctl(struct event_set *es, event_t event, unsigned int rwflags, void *arg)
243{
244 struct we_set *wes = (struct we_set *)es;
245
246 dmsg(D_EVENT_WAIT, "WE_CTL n=%d ev=%p rwflags=0x%04x arg=" ptr_format, wes->n_events, event,
247 rwflags, (ptr_type)arg);
248
249 if (wes->fast)
250 {
251 if (!we_append_event(wes, event, rwflags, arg))
252 {
253 goto err;
254 }
255 }
256 else
257 {
258 int ri, wi;
259 int one = -1;
260 int n = 0;
261
262 we_get_rw_indices(wes, event, &ri, &wi);
263 if (wi >= 0)
264 {
265 one = wi;
266 ++n;
267 }
268 if (ri >= 0)
269 {
270 one = ri;
271 ++n;
272 }
273 switch (rwflags)
274 {
275 case 0:
276 switch (n)
277 {
278 case 0:
279 break;
280
281 case 1:
282 we_del_index(wes, one);
283 break;
284
285 case 2:
286 we_del_event(wes, event);
287 break;
288
289 default:
290 ASSERT(0);
291 }
292 break;
293
294 case EVENT_READ:
295 switch (n)
296 {
297 case 0:
298 if (!we_append_event(wes, event, EVENT_READ, arg))
299 {
300 goto err;
301 }
302 break;
303
304 case 1:
305 we_set_event(wes, one, event, EVENT_READ, arg);
306 break;
307
308 case 2:
309 we_del_index(wes, wi);
310 break;
311
312 default:
313 ASSERT(0);
314 }
315 break;
316
317 case EVENT_WRITE:
318 switch (n)
319 {
320 case 0:
321 if (!we_append_event(wes, event, EVENT_WRITE, arg))
322 {
323 goto err;
324 }
325 break;
326
327 case 1:
328 we_set_event(wes, one, event, EVENT_WRITE, arg);
329 break;
330
331 case 2:
332 we_del_index(wes, ri);
333 break;
334
335 default:
336 ASSERT(0);
337 }
338 break;
339
340 case EVENT_READ | EVENT_WRITE:
341 switch (n)
342 {
343 case 0:
344 if (!we_append_event(wes, event, EVENT_READ | EVENT_WRITE, arg))
345 {
346 goto err;
347 }
348 break;
349
350 case 1:
351 if (ri == -1)
352 {
353 ASSERT(wi != -1);
354 if (!we_append_event(wes, event, EVENT_READ, arg))
355 {
356 goto err;
357 }
358 }
359 else if (wi == -1)
360 {
361 if (!we_append_event(wes, event, EVENT_WRITE, arg))
362 {
363 goto err;
364 }
365 }
366 else
367 {
368 ASSERT(0);
369 }
370 break;
371
372 case 2:
373 break;
374
375 default:
376 ASSERT(0);
377 }
378 break;
379
380 default:
381 msg(M_FATAL, "fatal error in we_ctl: rwflags=%d", rwflags);
382 }
383 }
384 return;
385
386err:
388 "Error: Windows resource limit WSA_MAXIMUM_WAIT_EVENTS (%d) has been exceeded",
389 WSA_MAXIMUM_WAIT_EVENTS);
390}
391
392static int
393we_wait(struct event_set *es, const struct timeval *tv, struct event_set_return *out, int outlen)
394{
395 struct we_set *wes = (struct we_set *)es;
396 const int timeout = tv_to_ms_timeout(tv);
397 DWORD status;
398
399 dmsg(D_EVENT_WAIT, "WE_WAIT enter n=%d to=%d", wes->n_events, timeout);
400
401#ifdef ENABLE_DEBUG
403 {
404 int i;
405 for (i = 0; i < wes->n_events; ++i)
406 {
407 dmsg(D_EVENT_WAIT, "[%d] ev=%p rwflags=0x%04x arg=" ptr_format, i, wes->events[i],
408 wes->esr[i].rwflags, (ptr_type)wes->esr[i].arg);
409 }
410 }
411#endif
412
413 /* WSA_WAIT_EVENT_0 == 0 but the API documentation is written in a way
414 that doesn't guarantee that. So we make useless checks. */
415#if defined(__GNUC__)
416#pragma GCC diagnostic push
417#pragma GCC diagnostic ignored "-Wtype-limits"
418#endif
419
420 /*
421 * First poll our event list with 0 timeout
422 */
423 status = WSAWaitForMultipleEvents((DWORD)wes->n_events, wes->events, FALSE, (DWORD)0, FALSE);
424
425 /*
426 * If at least one event is already set, we must
427 * individually poll the whole list.
428 */
429 if (status >= WSA_WAIT_EVENT_0 && status < WSA_WAIT_EVENT_0 + (DWORD)wes->n_events)
430 {
431 int i;
432 int j = 0;
433 for (i = 0; i < wes->n_events; ++i)
434 {
435 if (j >= outlen)
436 {
437 break;
438 }
439 if (WaitForSingleObject(wes->events[i], 0) == WAIT_OBJECT_0)
440 {
441 *out = wes->esr[i];
442 dmsg(D_EVENT_WAIT, "WE_WAIT leave [%d,%d] rwflags=0x%04x arg=" ptr_format, i, j,
443 out->rwflags, (ptr_type)out->arg);
444 ++j;
445 ++out;
446 }
447 }
448 return j;
449 }
450 else
451 {
452 /*
453 * If caller specified timeout > 0, we know at this point
454 * that no events are set, so wait only for the first event
455 * (or timeout) and return at most one event_set_return object.
456 *
457 * If caller specified timeout == 0, the second call to
458 * WSAWaitForMultipleEvents would be redundant -- just
459 * return 0 indicating timeout.
460 */
461 if (timeout > 0)
462 {
463 status = WSAWaitForMultipleEvents((DWORD)wes->n_events, wes->events, FALSE,
464 (DWORD)timeout, FALSE);
465 }
466
467 if (outlen >= 1 && status >= WSA_WAIT_EVENT_0
468 && status < WSA_WAIT_EVENT_0 + (DWORD)wes->n_events)
469 {
470 *out = wes->esr[status - WSA_WAIT_EVENT_0];
471 dmsg(D_EVENT_WAIT, "WE_WAIT leave rwflags=0x%04x arg=" ptr_format, out->rwflags,
472 (ptr_type)out->arg);
473 return 1;
474 }
475 else if (status == WSA_WAIT_TIMEOUT)
476 {
477 return 0;
478 }
479 else
480 {
481 return -1;
482 }
483 }
484#if defined(__GNUC__)
485#pragma GCC diagnostic pop
486#endif
487}
488
489static struct event_set *
490we_init(int *maxevents, unsigned int flags)
491{
492 struct we_set *wes;
493
494 dmsg(D_EVENT_WAIT, "WE_INIT maxevents=%d flags=0x%08x", *maxevents, flags);
495
496 ALLOC_OBJ_CLEAR(wes, struct we_set);
497
498 /* set dispatch functions */
499 wes->func.free = we_free;
500 wes->func.reset = we_reset;
501 wes->func.del = we_del;
502 wes->func.ctl = we_ctl;
503 wes->func.wait = we_wait;
504
505 if (flags & EVENT_METHOD_FAST)
506 {
507 wes->fast = true;
508 }
509 wes->n_events = 0;
510
511 /* Figure our event capacity */
512 ASSERT(*maxevents > 0);
513 wes->capacity = min_int(*maxevents * 2, WSA_MAXIMUM_WAIT_EVENTS);
514 *maxevents = min_int(*maxevents, WSA_MAXIMUM_WAIT_EVENTS);
515
516 /* Allocate space for Win32 event handles */
517 ALLOC_ARRAY_CLEAR(wes->events, HANDLE, wes->capacity);
518
519 /* Allocate space for event_set_return objects */
521
522 dmsg(D_EVENT_WAIT, "WE_INIT maxevents=%d capacity=%d", *maxevents, wes->capacity);
523
524 return (struct event_set *)wes;
525}
526
527#endif /* _WIN32 */
528
529#if EPOLL
530
531struct ep_set
532{
533 struct event_set_functions func;
534 bool fast;
535 int epfd;
536 int maxevents;
537 struct epoll_event *events;
538};
539
540static void
541ep_free(struct event_set *es)
542{
543 struct ep_set *eps = (struct ep_set *)es;
544 close(eps->epfd);
545 free(eps->events);
546 free(eps);
547}
548
549static void
550ep_reset(struct event_set *es)
551{
552 const struct ep_set *eps = (struct ep_set *)es;
553 ASSERT(eps->fast);
554}
555
556static void
557ep_del(struct event_set *es, event_t event)
558{
559 struct epoll_event ev;
560 struct ep_set *eps = (struct ep_set *)es;
561
562 dmsg(D_EVENT_WAIT, "EP_DEL ev=%d", (int)event);
563
564 ASSERT(!eps->fast);
565 CLEAR(ev);
566 if (epoll_ctl(eps->epfd, EPOLL_CTL_DEL, event, &ev) < 0)
567 {
568 msg(M_WARN | M_ERRNO, "EVENT: epoll_ctl EPOLL_CTL_DEL failed, sd=%d", (int)event);
569 }
570}
571
572static void
573ep_ctl(struct event_set *es, event_t event, unsigned int rwflags, void *arg)
574{
575 struct ep_set *eps = (struct ep_set *)es;
576 struct epoll_event ev;
577
578 CLEAR(ev);
579
580 ev.data.ptr = arg;
581 if (rwflags & EVENT_READ)
582 {
583 ev.events |= EPOLLIN;
584 }
585 if (rwflags & EVENT_WRITE)
586 {
587 ev.events |= EPOLLOUT;
588 }
589
590 dmsg(D_EVENT_WAIT, "EP_CTL fd=%d rwflags=0x%04x ev=0x%08x arg=" ptr_format, (int)event, rwflags,
591 (unsigned int)ev.events, (ptr_type)ev.data.ptr);
592
593 if (epoll_ctl(eps->epfd, EPOLL_CTL_MOD, event, &ev) < 0)
594 {
595 if (errno == ENOENT)
596 {
597 if (epoll_ctl(eps->epfd, EPOLL_CTL_ADD, event, &ev) < 0)
598 {
599 msg(M_ERR, "EVENT: epoll_ctl EPOLL_CTL_ADD failed, sd=%d", (int)event);
600 }
601 }
602 else
603 {
604 msg(M_ERR, "EVENT: epoll_ctl EPOLL_CTL_MOD failed, sd=%d", (int)event);
605 }
606 }
607}
608
609static int
610ep_wait(struct event_set *es, const struct timeval *tv, struct event_set_return *out, int outlen)
611{
612 struct ep_set *eps = (struct ep_set *)es;
613 int stat;
614
615 if (outlen > eps->maxevents)
616 {
617 outlen = eps->maxevents;
618 }
619
620 stat = epoll_wait(eps->epfd, eps->events, outlen, tv_to_ms_timeout(tv));
621 ASSERT(stat <= outlen);
622
623 if (stat > 0)
624 {
625 int i;
626 const struct epoll_event *ev = eps->events;
627 struct event_set_return *esr = out;
628 for (i = 0; i < stat; ++i)
629 {
630 esr->rwflags = 0;
631 if (ev->events & (EPOLLIN | EPOLLPRI | EPOLLERR | EPOLLHUP))
632 {
633 esr->rwflags |= EVENT_READ;
634 }
635 if (ev->events & EPOLLOUT)
636 {
637 esr->rwflags |= EVENT_WRITE;
638 }
639 esr->arg = ev->data.ptr;
640 dmsg(D_EVENT_WAIT, "EP_WAIT[%d] rwflags=0x%04x ev=0x%08x arg=" ptr_format, i,
641 esr->rwflags, ev->events, (ptr_type)ev->data.ptr);
642 ++ev;
643 ++esr;
644 }
645 }
646 return stat;
647}
648
649static struct event_set *
650ep_init(int *maxevents, unsigned int flags)
651{
652 struct ep_set *eps;
653 int fd;
654
655 dmsg(D_EVENT_WAIT, "EP_INIT maxevents=%d flags=0x%08x", *maxevents, flags);
656
657 /* open epoll file descriptor */
658 fd = epoll_create(*maxevents);
659 if (fd < 0)
660 {
661 return NULL;
662 }
663
664 set_cloexec(fd);
665
666 ALLOC_OBJ_CLEAR(eps, struct ep_set);
667
668 /* set dispatch functions */
669 eps->func.free = ep_free;
670 eps->func.reset = ep_reset;
671 eps->func.del = ep_del;
672 eps->func.ctl = ep_ctl;
673 eps->func.wait = ep_wait;
674
675 /* fast method ("sort of") corresponds to epoll one-shot */
676 if (flags & EVENT_METHOD_FAST)
677 {
678 eps->fast = true;
679 }
680
681 /* allocate space for epoll_wait return */
682 ASSERT(*maxevents > 0);
683 eps->maxevents = *maxevents;
684 ALLOC_ARRAY_CLEAR(eps->events, struct epoll_event, eps->maxevents);
685
686 /* set epoll control fd */
687 eps->epfd = fd;
688
689 return (struct event_set *)eps;
690}
691#endif /* EPOLL */
692
693#if POLL
694
695struct po_set
696{
697 struct event_set_functions func;
698 bool fast;
699 struct pollfd *events;
700 void **args;
701 int n_events;
702 int capacity;
703};
704
705static void
706po_free(struct event_set *es)
707{
708 struct po_set *pos = (struct po_set *)es;
709 free(pos->events);
710 free(pos->args);
711 free(pos);
712}
713
714static void
715po_reset(struct event_set *es)
716{
717 struct po_set *pos = (struct po_set *)es;
718 ASSERT(pos->fast);
719 pos->n_events = 0;
720}
721
722static void
723po_del(struct event_set *es, event_t event)
724{
725 struct po_set *pos = (struct po_set *)es;
726 int i;
727
728 dmsg(D_EVENT_WAIT, "PO_DEL ev=%d", (int)event);
729
730 ASSERT(!pos->fast);
731 for (i = 0; i < pos->n_events; ++i)
732 {
733 if (pos->events[i].fd == event)
734 {
735 int j;
736 for (j = i; j < pos->n_events - 1; ++j)
737 {
738 pos->events[j] = pos->events[j + 1];
739 pos->args[j] = pos->args[j + 1];
740 }
741 --pos->n_events;
742 break;
743 }
744 }
745}
746
747static inline void
748po_set_pollfd_events(struct pollfd *pfdp, unsigned int rwflags)
749{
750 pfdp->events = 0;
751 if (rwflags & EVENT_WRITE)
752 {
753 pfdp->events |= POLLOUT;
754 }
755 if (rwflags & EVENT_READ)
756 {
757 pfdp->events |= (POLLIN | POLLPRI);
758 }
759}
760
761static inline bool
762po_append_event(struct po_set *pos, event_t event, unsigned int rwflags, void *arg)
763{
764 if (pos->n_events < pos->capacity)
765 {
766 struct pollfd *pfdp = &pos->events[pos->n_events];
767 pfdp->fd = event;
768 pos->args[pos->n_events] = arg;
769 po_set_pollfd_events(pfdp, rwflags);
770 ++pos->n_events;
771 return true;
772 }
773 else
774 {
775 return false;
776 }
777}
778
779static void
780po_ctl(struct event_set *es, event_t event, unsigned int rwflags, void *arg)
781{
782 struct po_set *pos = (struct po_set *)es;
783
784 dmsg(D_EVENT_WAIT, "PO_CTL rwflags=0x%04x ev=%d arg=" ptr_format, rwflags, (int)event,
785 (ptr_type)arg);
786
787 if (pos->fast)
788 {
789 if (!po_append_event(pos, event, rwflags, arg))
790 {
791 goto err;
792 }
793 }
794 else
795 {
796 int i;
797 for (i = 0; i < pos->n_events; ++i)
798 {
799 struct pollfd *pfdp = &pos->events[i];
800 if (pfdp->fd == event)
801 {
802 pos->args[i] = arg;
803 po_set_pollfd_events(pfdp, rwflags);
804 goto done;
805 }
806 }
807 if (!po_append_event(pos, event, rwflags, arg))
808 {
809 goto err;
810 }
811 }
812
813done:
814 return;
815
816err:
817 msg(D_EVENT_ERRORS, "Error: poll: too many I/O wait events");
818}
819
820static int
821po_wait(struct event_set *es, const struct timeval *tv, struct event_set_return *out, int outlen)
822{
823 struct po_set *pos = (struct po_set *)es;
824 int stat;
825
826 stat = poll(pos->events, pos->n_events, tv_to_ms_timeout(tv));
827
828 ASSERT(stat <= pos->n_events);
829
830 if (stat > 0)
831 {
832 int i, j = 0;
833 const struct pollfd *pfdp = pos->events;
834 for (i = 0; i < pos->n_events && j < outlen; ++i)
835 {
836 if (pfdp->revents & (POLLIN | POLLPRI | POLLERR | POLLHUP | POLLOUT))
837 {
838 out->rwflags = 0;
839 if (pfdp->revents & (POLLIN | POLLPRI | POLLERR | POLLHUP))
840 {
841 out->rwflags |= EVENT_READ;
842 }
843 if (pfdp->revents & POLLOUT)
844 {
845 out->rwflags |= EVENT_WRITE;
846 }
847 out->arg = pos->args[i];
849 "PO_WAIT[%d,%d] fd=%d rev=0x%08x rwflags=0x%04x arg=" ptr_format " %s", i, j,
850 pfdp->fd, pfdp->revents, out->rwflags, (ptr_type)out->arg,
851 pos->fast ? "" : "[scalable]");
852 ++out;
853 ++j;
854 }
855 else if (pfdp->revents)
856 {
857 msg(D_EVENT_ERRORS, "Error: poll: unknown revents=0x%04x for fd=%d",
858 (unsigned int)pfdp->revents, pfdp->fd);
859 }
860 ++pfdp;
861 }
862 return j;
863 }
864 return stat;
865}
866
867static struct event_set *
868po_init(int *maxevents, unsigned int flags)
869{
870 struct po_set *pos;
871
872 dmsg(D_EVENT_WAIT, "PO_INIT maxevents=%d flags=0x%08x", *maxevents, flags);
873
874 ALLOC_OBJ_CLEAR(pos, struct po_set);
875
876 /* set dispatch functions */
877 pos->func.free = po_free;
878 pos->func.reset = po_reset;
879 pos->func.del = po_del;
880 pos->func.ctl = po_ctl;
881 pos->func.wait = po_wait;
882
883 if (flags & EVENT_METHOD_FAST)
884 {
885 pos->fast = true;
886 }
887
888 pos->n_events = 0;
889
890 /* Figure our event capacity */
891 ASSERT(*maxevents > 0);
892 pos->capacity = *maxevents;
893
894 /* Allocate space for pollfd structures to be passed to poll() */
895 ALLOC_ARRAY_CLEAR(pos->events, struct pollfd, pos->capacity);
896
897 /* Allocate space for event_set_return objects */
898 ALLOC_ARRAY_CLEAR(pos->args, void *, pos->capacity);
899
900 return (struct event_set *)pos;
901}
902#endif /* POLL */
903
904#if SELECT
905
906struct se_set
907{
908 struct event_set_functions func;
909 bool fast;
910 fd_set readfds;
911 fd_set writefds;
912 void **args; /* allocated to capacity size */
913 int maxfd; /* largest fd seen so far, always < capacity */
914 int capacity; /* fixed largest fd + 1 */
915};
916
917static void
918se_free(struct event_set *es)
919{
920 struct se_set *ses = (struct se_set *)es;
921 free(ses->args);
922 free(ses);
923}
924
925static void
926se_reset(struct event_set *es)
927{
928 struct se_set *ses = (struct se_set *)es;
929 int i;
930 ASSERT(ses->fast);
931
932 dmsg(D_EVENT_WAIT, "SE_RESET");
933
934 FD_ZERO(&ses->readfds);
935 FD_ZERO(&ses->writefds);
936 for (i = 0; i <= ses->maxfd; ++i)
937 {
938 ses->args[i] = NULL;
939 }
940 ses->maxfd = -1;
941}
942
943static void
944se_del(struct event_set *es, event_t event)
945{
946 struct se_set *ses = (struct se_set *)es;
947 ASSERT(!ses->fast);
948
949 dmsg(D_EVENT_WAIT, "SE_DEL ev=%d", (int)event);
950
951 if (event >= 0 && event < ses->capacity)
952 {
953 FD_CLR(event, &ses->readfds);
954 FD_CLR(event, &ses->writefds);
955 ses->args[event] = NULL;
956 }
957 else
958 {
959 msg(D_EVENT_ERRORS, "Error: select/se_del: too many I/O wait events");
960 }
961 return;
962}
963
964static void
965se_ctl(struct event_set *es, event_t event, unsigned int rwflags, void *arg)
966{
967 struct se_set *ses = (struct se_set *)es;
968
969 dmsg(D_EVENT_WAIT, "SE_CTL rwflags=0x%04x ev=%d fast=%d cap=%d maxfd=%d arg=" ptr_format,
970 rwflags, (int)event, (int)ses->fast, ses->capacity, ses->maxfd, (ptr_type)arg);
971
972 if (event >= 0 && event < ses->capacity)
973 {
974 ses->maxfd = max_int(event, ses->maxfd);
975 ses->args[event] = arg;
976 if (ses->fast)
977 {
978 if (rwflags & EVENT_READ)
979 {
980 openvpn_fd_set(event, &ses->readfds);
981 }
982 if (rwflags & EVENT_WRITE)
983 {
984 openvpn_fd_set(event, &ses->writefds);
985 }
986 }
987 else
988 {
989 if (rwflags & EVENT_READ)
990 {
991 openvpn_fd_set(event, &ses->readfds);
992 }
993 else
994 {
995 FD_CLR(event, &ses->readfds);
996 }
997 if (rwflags & EVENT_WRITE)
998 {
999 openvpn_fd_set(event, &ses->writefds);
1000 }
1001 else
1002 {
1003 FD_CLR(event, &ses->writefds);
1004 }
1005 }
1006 }
1007 else
1008 {
1009 msg(D_EVENT_ERRORS, "Error: select: too many I/O wait events, fd=%d cap=%d", (int)event,
1010 ses->capacity);
1011 }
1012}
1013
1014static int
1015se_wait_return(struct se_set *ses, fd_set *read, fd_set *write, struct event_set_return *out,
1016 int outlen)
1017{
1018 int i, j = 0;
1019 for (i = 0; i <= ses->maxfd && j < outlen; ++i)
1020 {
1021 const bool r = FD_ISSET(i, read);
1022 const bool w = FD_ISSET(i, write);
1023 if (r || w)
1024 {
1025 out->rwflags = 0;
1026 if (r)
1027 {
1028 out->rwflags |= EVENT_READ;
1029 }
1030 if (w)
1031 {
1032 out->rwflags |= EVENT_WRITE;
1033 }
1034 out->arg = ses->args[i];
1035 dmsg(D_EVENT_WAIT, "SE_WAIT[%d,%d] rwflags=0x%04x arg=" ptr_format, i, j, out->rwflags,
1036 (ptr_type)out->arg);
1037 ++out;
1038 ++j;
1039 }
1040 }
1041 return j;
1042}
1043
1044static int
1045se_wait_fast(struct event_set *es, const struct timeval *tv, struct event_set_return *out,
1046 int outlen)
1047{
1048 struct se_set *ses = (struct se_set *)es;
1049 struct timeval tv_tmp = *tv;
1050 int stat;
1051
1052 dmsg(D_EVENT_WAIT, "SE_WAIT_FAST maxfd=%d tv=%" PRIi64 "/%ld", ses->maxfd,
1053 (int64_t)tv_tmp.tv_sec, (long)tv_tmp.tv_usec);
1054
1055 stat = select(ses->maxfd + 1, &ses->readfds, &ses->writefds, NULL, &tv_tmp);
1056
1057 if (stat > 0)
1058 {
1059 stat = se_wait_return(ses, &ses->readfds, &ses->writefds, out, outlen);
1060 }
1061
1062 return stat;
1063}
1064
1065static int
1066se_wait_scalable(struct event_set *es, const struct timeval *tv, struct event_set_return *out,
1067 int outlen)
1068{
1069 struct se_set *ses = (struct se_set *)es;
1070 struct timeval tv_tmp = *tv;
1071 fd_set read = ses->readfds;
1072 fd_set write = ses->writefds;
1073 int stat;
1074
1075 dmsg(D_EVENT_WAIT, "SE_WAIT_SCALEABLE maxfd=%d tv=%" PRIi64 "/%ld", ses->maxfd,
1076 (int64_t)tv_tmp.tv_sec, (long)tv_tmp.tv_usec);
1077
1078 stat = select(ses->maxfd + 1, &read, &write, NULL, &tv_tmp);
1079
1080 if (stat > 0)
1081 {
1082 stat = se_wait_return(ses, &read, &write, out, outlen);
1083 }
1084
1085 return stat;
1086}
1087
1088static struct event_set *
1089se_init(int *maxevents, unsigned int flags)
1090{
1091 struct se_set *ses;
1092
1093 dmsg(D_EVENT_WAIT, "SE_INIT maxevents=%d flags=0x%08x", *maxevents, flags);
1094
1095 ALLOC_OBJ_CLEAR(ses, struct se_set);
1096
1097 /* set dispatch functions */
1098 ses->func.free = se_free;
1099 ses->func.reset = se_reset;
1100 ses->func.del = se_del;
1101 ses->func.ctl = se_ctl;
1102 ses->func.wait = se_wait_scalable;
1103
1104 if (flags & EVENT_METHOD_FAST)
1105 {
1106 ses->fast = true;
1107 ses->func.wait = se_wait_fast;
1108 }
1109
1110 /* Select needs to be passed this value + 1 */
1111 ses->maxfd = -1;
1112
1113 /* Set our event capacity */
1114 ASSERT(*maxevents > 0);
1115 *maxevents = min_int(*maxevents, SELECT_MAX_FDS);
1116 ses->capacity = SELECT_MAX_FDS;
1117
1118 /* Allocate space for event_set_return void * args */
1119 ALLOC_ARRAY_CLEAR(ses->args, void *, ses->capacity);
1120
1121 return (struct event_set *)ses;
1122}
1123#endif /* SELECT */
1124
1125static struct event_set *
1126event_set_init_simple(int *maxevents, unsigned int flags)
1127{
1128 struct event_set *ret = NULL;
1129#ifdef _WIN32
1130 ret = we_init(maxevents, flags);
1131#elif POLL && SELECT
1132#if 0 /* Define to 1 if EVENT_METHOD_US_TIMEOUT should cause select to be favored over poll */
1133 if (flags & EVENT_METHOD_US_TIMEOUT)
1134 {
1135 ret = se_init(maxevents, flags);
1136 }
1137#endif
1138#ifdef SELECT_PREFERRED_OVER_POLL
1139 if (!ret)
1140 {
1141 ret = se_init(maxevents, flags);
1142 }
1143 if (!ret)
1144 {
1145 ret = po_init(maxevents, flags);
1146 }
1147#else /* ifdef SELECT_PREFERRED_OVER_POLL */
1148 if (!ret)
1149 {
1150 ret = po_init(maxevents, flags);
1151 }
1152 if (!ret)
1153 {
1154 ret = se_init(maxevents, flags);
1155 }
1156#endif
1157#elif POLL
1158 ret = po_init(maxevents, flags);
1159#elif SELECT
1160 ret = se_init(maxevents, flags);
1161#else /* ifdef _WIN32 */
1162#error At least one of poll, select, or WSAWaitForMultipleEvents must be supported by the kernel
1163#endif /* ifdef _WIN32 */
1164 ASSERT(ret);
1165 return ret;
1166}
1167
1168static struct event_set *
1169event_set_init_scalable(int *maxevents, unsigned int flags)
1170{
1171 struct event_set *ret = NULL;
1172#if EPOLL
1173 ret = ep_init(maxevents, flags);
1174 if (!ret)
1175 {
1176 msg(M_WARN, "Note: sys_epoll API is unavailable, falling back to poll/select API");
1177 ret = event_set_init_simple(maxevents, flags);
1178 }
1179#else /* if EPOLL */
1180 ret = event_set_init_simple(maxevents, flags);
1181#endif
1182 ASSERT(ret);
1183 return ret;
1184}
1185
1186struct event_set *
1187event_set_init(int *maxevents, unsigned int flags)
1188{
1189 if (flags & EVENT_METHOD_FAST)
1190 {
1191 return event_set_init_simple(maxevents, flags);
1192 }
1193 else
1194 {
1195 return event_set_init_scalable(maxevents, flags);
1196 }
1197}
#define ALLOC_ARRAY_CLEAR(dptr, type, n)
Definition buffer.h:1080
#define ALLOC_OBJ_CLEAR(dptr, type)
Definition buffer.h:1064
unsigned long ptr_type
Definition common.h:59
#define ptr_format
Definition common.h:50
#define D_EVENT_ERRORS
Definition errlevel.h:65
#define D_EVENT_WAIT
Definition errlevel.h:161
static bool we_append_event(struct we_set *wes, event_t event, unsigned int rwflags, void *arg)
Definition event.c:128
#define SELECT_MAX_FDS
Definition event.c:65
static struct event_set * event_set_init_simple(int *maxevents, unsigned int flags)
Definition event.c:1126
static void we_free(struct event_set *es)
Definition event.c:217
static void we_get_rw_indices(struct we_set *wes, event_t event, int *ri, int *wi)
Definition event.c:196
static void we_del_index(struct we_set *wes, int index)
Definition event.c:183
static int tv_to_ms_timeout(const struct timeval *tv)
Convert timeval value (which is in seconds and microseconds) to a value of milliseconds which is requ...
Definition event.c:78
static struct event_set * event_set_init_scalable(int *maxevents, unsigned int flags)
Definition event.c:1169
static void we_del(struct event_set *es, event_t event)
Definition event.c:234
static int we_wait(struct event_set *es, const struct timeval *tv, struct event_set_return *out, int outlen)
Definition event.c:393
static void we_ctl(struct event_set *es, event_t event, unsigned int rwflags, void *arg)
Definition event.c:242
static struct event_set * we_init(int *maxevents, unsigned int flags)
Definition event.c:490
struct event_set * event_set_init(int *maxevents, unsigned int flags)
Definition event.c:1187
static void we_del_event(struct we_set *wes, event_t event)
Definition event.c:158
static void we_set_event(struct we_set *wes, int i, event_t event, unsigned int rwflags, void *arg)
Definition event.c:104
static void we_reset(struct event_set *es)
Definition event.c:226
#define EVENT_METHOD_FAST
Definition event.h:79
#define EVENT_METHOD_US_TIMEOUT
Definition event.h:78
#define EVENT_WRITE
Definition event.h:38
#define EVENT_READ
Definition event.h:37
void set_cloexec(socket_descriptor_t fd)
Definition fdmisc.c:78
static void openvpn_fd_set(socket_descriptor_t fd, fd_set *setp)
Definition fdmisc.h:39
static int min_int(int x, int y)
Definition integer.h:105
static int max_int(int x, int y)
Definition integer.h:92
static SERVICE_STATUS status
Definition interactive.c:51
@ write
@ read
#define CLEAR(x)
Definition basic.h:32
static bool check_debug_level(msglvl_t level)
Definition error.h:259
#define M_FATAL
Definition error.h:90
#define dmsg(flags,...)
Definition error.h:172
#define M_ERR
Definition error.h:106
#define msg(flags,...)
Definition error.h:152
#define ASSERT(x)
Definition error.h:219
#define M_WARN
Definition error.h:92
#define M_ERRNO
Definition error.h:95
static int pos(char c)
Definition base64.c:104
void(* ctl)(struct event_set *es, event_t event, unsigned int rwflags, void *arg)
Definition event.h:109
int(* wait)(struct event_set *es, const struct timeval *tv, struct event_set_return *out, int outlen)
Definition event.h:117
void(* del)(struct event_set *es, event_t event)
Definition event.h:108
void(* reset)(struct event_set *es)
Definition event.h:107
void(* free)(struct event_set *es)
Definition event.h:106
unsigned int rwflags
Definition event.h:123
HANDLE write
Definition win32.h:82
HANDLE read
Definition win32.h:81
Definition event.c:94
struct event_set_return * esr
Definition event.c:98
struct event_set_functions func
Definition event.c:95
int capacity
Definition event.c:100
HANDLE * events
Definition event.c:97
bool fast
Definition event.c:96
int n_events
Definition event.c:99
struct env_set * es