Thanks to visit codestin.com
Credit goes to doxygen.postgresql.org

PostgreSQL Source Code git master
waiteventset.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * waiteventset.c
4 * ppoll()/pselect() like abstraction
5 *
6 * WaitEvents are an abstraction for waiting for one or more events at a time.
7 * The waiting can be done in a race free fashion, similar ppoll() or
8 * pselect() (as opposed to plain poll()/select()).
9 *
10 * You can wait for:
11 * - a latch being set from another process or from signal handler in the same
12 * process (WL_LATCH_SET)
13 * - data to become readable or writeable on a socket (WL_SOCKET_*)
14 * - postmaster death (WL_POSTMASTER_DEATH or WL_EXIT_ON_PM_DEATH)
15 * - timeout (WL_TIMEOUT)
16 *
17 * Implementation
18 * --------------
19 *
20 * The poll() implementation uses the so-called self-pipe trick to overcome the
21 * race condition involved with poll() and setting a global flag in the signal
22 * handler. When a latch is set and the current process is waiting for it, the
23 * signal handler wakes up the poll() in WaitLatch by writing a byte to a pipe.
24 * A signal by itself doesn't interrupt poll() on all platforms, and even on
25 * platforms where it does, a signal that arrives just before the poll() call
26 * does not prevent poll() from entering sleep. An incoming byte on a pipe
27 * however reliably interrupts the sleep, and causes poll() to return
28 * immediately even if the signal arrives before poll() begins.
29 *
30 * The epoll() implementation overcomes the race with a different technique: it
31 * keeps SIGURG blocked and consumes from a signalfd() descriptor instead. We
32 * don't need to register a signal handler or create our own self-pipe. We
33 * assume that any system that has Linux epoll() also has Linux signalfd().
34 *
35 * The kqueue() implementation waits for SIGURG with EVFILT_SIGNAL.
36 *
37 * The Windows implementation uses Windows events that are inherited by all
38 * postmaster child processes. There's no need for the self-pipe trick there.
39 *
40 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
41 * Portions Copyright (c) 1994, Regents of the University of California
42 *
43 * IDENTIFICATION
44 * src/backend/storage/ipc/waiteventset.c
45 *
46 *-------------------------------------------------------------------------
47 */
48#include "postgres.h"
49
50#include <fcntl.h>
51#include <limits.h>
52#include <signal.h>
53#include <unistd.h>
54#ifdef HAVE_SYS_EPOLL_H
55#include <sys/epoll.h>
56#endif
57#ifdef HAVE_SYS_EVENT_H
58#include <sys/event.h>
59#endif
60#ifdef HAVE_SYS_SIGNALFD_H
61#include <sys/signalfd.h>
62#endif
63#ifdef HAVE_POLL_H
64#include <poll.h>
65#endif
66
67#include "libpq/pqsignal.h"
68#include "miscadmin.h"
69#include "pgstat.h"
70#include "port/atomics.h"
73#include "storage/fd.h"
74#include "storage/ipc.h"
75#include "storage/pmsignal.h"
76#include "storage/latch.h"
78#include "utils/memutils.h"
79#include "utils/resowner.h"
80
81/*
82 * Select the fd readiness primitive to use. Normally the "most modern"
83 * primitive supported by the OS will be used, but for testing it can be
84 * useful to manually specify the used primitive. If desired, just add a
85 * define somewhere before this block.
86 */
87#if defined(WAIT_USE_EPOLL) || defined(WAIT_USE_POLL) || \
88 defined(WAIT_USE_KQUEUE) || defined(WAIT_USE_WIN32)
89/* don't overwrite manual choice */
90#elif defined(HAVE_SYS_EPOLL_H)
91#define WAIT_USE_EPOLL
92#elif defined(HAVE_KQUEUE)
93#define WAIT_USE_KQUEUE
94#elif defined(HAVE_POLL)
95#define WAIT_USE_POLL
96#elif WIN32
97#define WAIT_USE_WIN32
98#else
99#error "no wait set implementation available"
100#endif
101
102/*
103 * By default, we use a self-pipe with poll() and a signalfd with epoll(), if
104 * available. For testing the choice can also be manually specified.
105 */
106#if defined(WAIT_USE_POLL) || defined(WAIT_USE_EPOLL)
107#if defined(WAIT_USE_SELF_PIPE) || defined(WAIT_USE_SIGNALFD)
108/* don't overwrite manual choice */
109#elif defined(WAIT_USE_EPOLL) && defined(HAVE_SYS_SIGNALFD_H)
110#define WAIT_USE_SIGNALFD
111#else
112#define WAIT_USE_SELF_PIPE
113#endif
114#endif
115
116/* typedef in waiteventset.h */
118{
120
121 int nevents; /* number of registered events */
122 int nevents_space; /* maximum number of events in this set */
123
124 /*
125 * Array, of nevents_space length, storing the definition of events this
126 * set is waiting for.
127 */
129
130 /*
131 * If WL_LATCH_SET is specified in any wait event, latch is a pointer to
132 * said latch, and latch_pos the offset in the ->events array. This is
133 * useful because we check the state of the latch before performing doing
134 * syscalls related to waiting.
135 */
138
139 /*
140 * WL_EXIT_ON_PM_DEATH is converted to WL_POSTMASTER_DEATH, but this flag
141 * is set so that we'll exit immediately if postmaster death is detected,
142 * instead of returning.
143 */
145
146#if defined(WAIT_USE_EPOLL)
147 int epoll_fd;
148 /* epoll_wait returns events in a user provided arrays, allocate once */
149 struct epoll_event *epoll_ret_events;
150#elif defined(WAIT_USE_KQUEUE)
151 int kqueue_fd;
152 /* kevent returns events in a user provided arrays, allocate once */
153 struct kevent *kqueue_ret_events;
154 bool report_postmaster_not_running;
155#elif defined(WAIT_USE_POLL)
156 /* poll expects events to be waited on every poll() call, prepare once */
157 struct pollfd *pollfds;
158#elif defined(WAIT_USE_WIN32)
159
160 /*
161 * Array of windows events. The first element always contains
162 * pgwin32_signal_event, so the remaining elements are offset by one (i.e.
163 * event->pos + 1).
164 */
165 HANDLE *handles;
166#endif
167};
168
169#ifndef WIN32
170/* Are we currently in WaitLatch? The signal handler would like to know. */
171static volatile sig_atomic_t waiting = false;
172#endif
173
174#ifdef WAIT_USE_SIGNALFD
175/* On Linux, we'll receive SIGURG via a signalfd file descriptor. */
176static int signal_fd = -1;
177#endif
178
179#ifdef WAIT_USE_SELF_PIPE
180/* Read and write ends of the self-pipe */
181static int selfpipe_readfd = -1;
182static int selfpipe_writefd = -1;
183
184/* Process owning the self-pipe --- needed for checking purposes */
185static int selfpipe_owner_pid = 0;
186
187/* Private function prototypes */
189static void sendSelfPipeByte(void);
190#endif
191
192#if defined(WAIT_USE_SELF_PIPE) || defined(WAIT_USE_SIGNALFD)
193static void drain(void);
194#endif
195
196#if defined(WAIT_USE_EPOLL)
197static void WaitEventAdjustEpoll(WaitEventSet *set, WaitEvent *event, int action);
198#elif defined(WAIT_USE_KQUEUE)
199static void WaitEventAdjustKqueue(WaitEventSet *set, WaitEvent *event, int old_events);
200#elif defined(WAIT_USE_POLL)
201static void WaitEventAdjustPoll(WaitEventSet *set, WaitEvent *event);
202#elif defined(WAIT_USE_WIN32)
203static void WaitEventAdjustWin32(WaitEventSet *set, WaitEvent *event);
204#endif
205
206static inline int WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
207 WaitEvent *occurred_events, int nevents);
208
209/* ResourceOwner support to hold WaitEventSets */
210static void ResOwnerReleaseWaitEventSet(Datum res);
211
213{
214 .name = "WaitEventSet",
215 .release_phase = RESOURCE_RELEASE_AFTER_LOCKS,
216 .release_priority = RELEASE_PRIO_WAITEVENTSETS,
217 .ReleaseResource = ResOwnerReleaseWaitEventSet,
218 .DebugPrint = NULL
219};
220
221/* Convenience wrappers over ResourceOwnerRemember/Forget */
222static inline void
224{
226}
227static inline void
229{
231}
232
233
234/*
235 * Initialize the process-local wait event infrastructure.
236 *
237 * This must be called once during startup of any process that can wait on
238 * latches, before it issues any InitLatch() or OwnLatch() calls.
239 */
240void
242{
243#if defined(WAIT_USE_SELF_PIPE)
244 int pipefd[2];
245
247 {
248 /*
249 * We might have inherited connections to a self-pipe created by the
250 * postmaster. It's critical that child processes create their own
251 * self-pipes, of course, and we really want them to close the
252 * inherited FDs for safety's sake.
253 */
254 if (selfpipe_owner_pid != 0)
255 {
256 /* Assert we go through here but once in a child process */
258 /* Release postmaster's pipe FDs; ignore any error */
259 (void) close(selfpipe_readfd);
260 (void) close(selfpipe_writefd);
261 /* Clean up, just for safety's sake; we'll set these below */
264 /* Keep fd.c's accounting straight */
267 }
268 else
269 {
270 /*
271 * Postmaster didn't create a self-pipe ... or else we're in an
272 * EXEC_BACKEND build, in which case it doesn't matter since the
273 * postmaster's pipe FDs were closed by the action of FD_CLOEXEC.
274 * fd.c won't have state to clean up, either.
275 */
276 Assert(selfpipe_readfd == -1);
277 }
278 }
279 else
280 {
281 /* In postmaster or standalone backend, assert we do this but once */
282 Assert(selfpipe_readfd == -1);
284 }
285
286 /*
287 * Set up the self-pipe that allows a signal handler to wake up the
288 * poll()/epoll_wait() in WaitLatch. Make the write-end non-blocking, so
289 * that SetLatch won't block if the event has already been set many times
290 * filling the kernel buffer. Make the read-end non-blocking too, so that
291 * we can easily clear the pipe by reading until EAGAIN or EWOULDBLOCK.
292 * Also, make both FDs close-on-exec, since we surely do not want any
293 * child processes messing with them.
294 */
295 if (pipe(pipefd) < 0)
296 elog(FATAL, "pipe() failed: %m");
297 if (fcntl(pipefd[0], F_SETFL, O_NONBLOCK) == -1)
298 elog(FATAL, "fcntl(F_SETFL) failed on read-end of self-pipe: %m");
299 if (fcntl(pipefd[1], F_SETFL, O_NONBLOCK) == -1)
300 elog(FATAL, "fcntl(F_SETFL) failed on write-end of self-pipe: %m");
301 if (fcntl(pipefd[0], F_SETFD, FD_CLOEXEC) == -1)
302 elog(FATAL, "fcntl(F_SETFD) failed on read-end of self-pipe: %m");
303 if (fcntl(pipefd[1], F_SETFD, FD_CLOEXEC) == -1)
304 elog(FATAL, "fcntl(F_SETFD) failed on write-end of self-pipe: %m");
305
306 selfpipe_readfd = pipefd[0];
307 selfpipe_writefd = pipefd[1];
309
310 /* Tell fd.c about these two long-lived FDs */
313
315#endif
316
317#ifdef WAIT_USE_SIGNALFD
318 sigset_t signalfd_mask;
319
321 {
322 /*
323 * It would probably be safe to re-use the inherited signalfd since
324 * signalfds only see the current process's pending signals, but it
325 * seems less surprising to close it and create our own.
326 */
327 if (signal_fd != -1)
328 {
329 /* Release postmaster's signal FD; ignore any error */
330 (void) close(signal_fd);
331 signal_fd = -1;
333 }
334 }
335
336 /* Block SIGURG, because we'll receive it through a signalfd. */
337 sigaddset(&UnBlockSig, SIGURG);
338
339 /* Set up the signalfd to receive SIGURG notifications. */
340 sigemptyset(&signalfd_mask);
341 sigaddset(&signalfd_mask, SIGURG);
342 signal_fd = signalfd(-1, &signalfd_mask, SFD_NONBLOCK | SFD_CLOEXEC);
343 if (signal_fd < 0)
344 elog(FATAL, "signalfd() failed");
346#endif
347
348#ifdef WAIT_USE_KQUEUE
349 /* Ignore SIGURG, because we'll receive it via kqueue. */
350 pqsignal(SIGURG, SIG_IGN);
351#endif
352}
353
354/*
355 * Create a WaitEventSet with space for nevents different events to wait for.
356 *
357 * These events can then be efficiently waited upon together, using
358 * WaitEventSetWait().
359 *
360 * The WaitEventSet is tracked by the given 'resowner'. Use NULL for session
361 * lifetime.
362 */
364CreateWaitEventSet(ResourceOwner resowner, int nevents)
365{
366 WaitEventSet *set;
367 char *data;
368 Size sz = 0;
369
370 /*
371 * Use MAXALIGN size/alignment to guarantee that later uses of memory are
372 * aligned correctly. E.g. epoll_event might need 8 byte alignment on some
373 * platforms, but earlier allocations like WaitEventSet and WaitEvent
374 * might not be sized to guarantee that when purely using sizeof().
375 */
376 sz += MAXALIGN(sizeof(WaitEventSet));
377 sz += MAXALIGN(sizeof(WaitEvent) * nevents);
378
379#if defined(WAIT_USE_EPOLL)
380 sz += MAXALIGN(sizeof(struct epoll_event) * nevents);
381#elif defined(WAIT_USE_KQUEUE)
382 sz += MAXALIGN(sizeof(struct kevent) * nevents);
383#elif defined(WAIT_USE_POLL)
384 sz += MAXALIGN(sizeof(struct pollfd) * nevents);
385#elif defined(WAIT_USE_WIN32)
386 /* need space for the pgwin32_signal_event */
387 sz += MAXALIGN(sizeof(HANDLE) * (nevents + 1));
388#endif
389
390 if (resowner != NULL)
391 ResourceOwnerEnlarge(resowner);
392
394
395 set = (WaitEventSet *) data;
396 data += MAXALIGN(sizeof(WaitEventSet));
397
398 set->events = (WaitEvent *) data;
399 data += MAXALIGN(sizeof(WaitEvent) * nevents);
400
401#if defined(WAIT_USE_EPOLL)
402 set->epoll_ret_events = (struct epoll_event *) data;
403 data += MAXALIGN(sizeof(struct epoll_event) * nevents);
404#elif defined(WAIT_USE_KQUEUE)
405 set->kqueue_ret_events = (struct kevent *) data;
406 data += MAXALIGN(sizeof(struct kevent) * nevents);
407#elif defined(WAIT_USE_POLL)
408 set->pollfds = (struct pollfd *) data;
409 data += MAXALIGN(sizeof(struct pollfd) * nevents);
410#elif defined(WAIT_USE_WIN32)
411 set->handles = (HANDLE) data;
412 data += MAXALIGN(sizeof(HANDLE) * nevents);
413#endif
414
415 set->latch = NULL;
416 set->nevents_space = nevents;
417 set->exit_on_postmaster_death = false;
418
419 if (resowner != NULL)
420 {
422 set->owner = resowner;
423 }
424
425#if defined(WAIT_USE_EPOLL)
426 if (!AcquireExternalFD())
427 elog(ERROR, "AcquireExternalFD, for epoll_create1, failed: %m");
428 set->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
429 if (set->epoll_fd < 0)
430 {
432 elog(ERROR, "epoll_create1 failed: %m");
433 }
434#elif defined(WAIT_USE_KQUEUE)
435 if (!AcquireExternalFD())
436 elog(ERROR, "AcquireExternalFD, for kqueue, failed: %m");
437 set->kqueue_fd = kqueue();
438 if (set->kqueue_fd < 0)
439 {
441 elog(ERROR, "kqueue failed: %m");
442 }
443 if (fcntl(set->kqueue_fd, F_SETFD, FD_CLOEXEC) == -1)
444 {
445 int save_errno = errno;
446
447 close(set->kqueue_fd);
449 errno = save_errno;
450 elog(ERROR, "fcntl(F_SETFD) failed on kqueue descriptor: %m");
451 }
452 set->report_postmaster_not_running = false;
453#elif defined(WAIT_USE_WIN32)
454
455 /*
456 * To handle signals while waiting, we need to add a win32 specific event.
457 * We accounted for the additional event at the top of this routine. See
458 * port/win32/signal.c for more details.
459 *
460 * Note: pgwin32_signal_event should be first to ensure that it will be
461 * reported when multiple events are set. We want to guarantee that
462 * pending signals are serviced.
463 */
464 set->handles[0] = pgwin32_signal_event;
465 StaticAssertStmt(WSA_INVALID_EVENT == NULL, "");
466#endif
467
468 return set;
469}
470
471/*
472 * Free a previously created WaitEventSet.
473 *
474 * Note: preferably, this shouldn't have to free any resources that could be
475 * inherited across an exec(). If it did, we'd likely leak those resources in
476 * many scenarios. For the epoll case, we ensure that by setting EPOLL_CLOEXEC
477 * when the FD is created. For the Windows case, we assume that the handles
478 * involved are non-inheritable.
479 */
480void
482{
483 if (set->owner)
484 {
486 set->owner = NULL;
487 }
488
489#if defined(WAIT_USE_EPOLL)
490 close(set->epoll_fd);
492#elif defined(WAIT_USE_KQUEUE)
493 close(set->kqueue_fd);
495#elif defined(WAIT_USE_WIN32)
496 for (WaitEvent *cur_event = set->events;
497 cur_event < (set->events + set->nevents);
498 cur_event++)
499 {
500 if (cur_event->events & WL_LATCH_SET)
501 {
502 /* uses the latch's HANDLE */
503 }
504 else if (cur_event->events & WL_POSTMASTER_DEATH)
505 {
506 /* uses PostmasterHandle */
507 }
508 else
509 {
510 /* Clean up the event object we created for the socket */
511 WSAEventSelect(cur_event->fd, NULL, 0);
512 WSACloseEvent(set->handles[cur_event->pos + 1]);
513 }
514 }
515#endif
516
517 pfree(set);
518}
519
520/*
521 * Free a previously created WaitEventSet in a child process after a fork().
522 */
523void
525{
526#if defined(WAIT_USE_EPOLL)
527 close(set->epoll_fd);
529#elif defined(WAIT_USE_KQUEUE)
530 /* kqueues are not normally inherited by child processes */
532#endif
533
534 pfree(set);
535}
536
537/* ---
538 * Add an event to the set. Possible events are:
539 * - WL_LATCH_SET: Wait for the latch to be set
540 * - WL_POSTMASTER_DEATH: Wait for postmaster to die
541 * - WL_SOCKET_READABLE: Wait for socket to become readable,
542 * can be combined in one event with other WL_SOCKET_* events
543 * - WL_SOCKET_WRITEABLE: Wait for socket to become writeable,
544 * can be combined with other WL_SOCKET_* events
545 * - WL_SOCKET_CONNECTED: Wait for socket connection to be established,
546 * can be combined with other WL_SOCKET_* events (on non-Windows
547 * platforms, this is the same as WL_SOCKET_WRITEABLE)
548 * - WL_SOCKET_ACCEPT: Wait for new connection to a server socket,
549 * can be combined with other WL_SOCKET_* events (on non-Windows
550 * platforms, this is the same as WL_SOCKET_READABLE)
551 * - WL_SOCKET_CLOSED: Wait for socket to be closed by remote peer.
552 * - WL_EXIT_ON_PM_DEATH: Exit immediately if the postmaster dies
553 *
554 * Returns the offset in WaitEventSet->events (starting from 0), which can be
555 * used to modify previously added wait events using ModifyWaitEvent().
556 *
557 * In the WL_LATCH_SET case the latch must be owned by the current process,
558 * i.e. it must be a process-local latch initialized with InitLatch, or a
559 * shared latch associated with the current process by calling OwnLatch.
560 *
561 * In the WL_SOCKET_READABLE/WRITEABLE/CONNECTED/ACCEPT cases, EOF and error
562 * conditions cause the socket to be reported as readable/writable/connected,
563 * so that the caller can deal with the condition.
564 *
565 * The user_data pointer specified here will be set for the events returned
566 * by WaitEventSetWait(), allowing to easily associate additional data with
567 * events.
568 */
569int
571 void *user_data)
572{
573 WaitEvent *event;
574
575 /* not enough space */
576 Assert(set->nevents < set->nevents_space);
577
578 if (events == WL_EXIT_ON_PM_DEATH)
579 {
580 events = WL_POSTMASTER_DEATH;
581 set->exit_on_postmaster_death = true;
582 }
583
584 if (latch)
585 {
586 if (latch->owner_pid != MyProcPid)
587 elog(ERROR, "cannot wait on a latch owned by another process");
588 if (set->latch)
589 elog(ERROR, "cannot wait on more than one latch");
590 if ((events & WL_LATCH_SET) != WL_LATCH_SET)
591 elog(ERROR, "latch events only support being set");
592 }
593 else
594 {
595 if (events & WL_LATCH_SET)
596 elog(ERROR, "cannot wait on latch without a specified latch");
597 }
598
599 /* waiting for socket readiness without a socket indicates a bug */
600 if (fd == PGINVALID_SOCKET && (events & WL_SOCKET_MASK))
601 elog(ERROR, "cannot wait on socket event without a socket");
602
603 event = &set->events[set->nevents];
604 event->pos = set->nevents++;
605 event->fd = fd;
606 event->events = events;
607 event->user_data = user_data;
608#ifdef WIN32
609 event->reset = false;
610#endif
611
612 if (events == WL_LATCH_SET)
613 {
614 set->latch = latch;
615 set->latch_pos = event->pos;
616#if defined(WAIT_USE_SELF_PIPE)
617 event->fd = selfpipe_readfd;
618#elif defined(WAIT_USE_SIGNALFD)
619 event->fd = signal_fd;
620#else
621 event->fd = PGINVALID_SOCKET;
622#ifdef WAIT_USE_EPOLL
623 return event->pos;
624#endif
625#endif
626 }
627 else if (events == WL_POSTMASTER_DEATH)
628 {
629#ifndef WIN32
631#endif
632 }
633
634 /* perform wait primitive specific initialization, if needed */
635#if defined(WAIT_USE_EPOLL)
636 WaitEventAdjustEpoll(set, event, EPOLL_CTL_ADD);
637#elif defined(WAIT_USE_KQUEUE)
638 WaitEventAdjustKqueue(set, event, 0);
639#elif defined(WAIT_USE_POLL)
640 WaitEventAdjustPoll(set, event);
641#elif defined(WAIT_USE_WIN32)
642 WaitEventAdjustWin32(set, event);
643#endif
644
645 return event->pos;
646}
647
648/*
649 * Change the event mask and, in the WL_LATCH_SET case, the latch associated
650 * with the WaitEvent. The latch may be changed to NULL to disable the latch
651 * temporarily, and then set back to a latch later.
652 *
653 * 'pos' is the id returned by AddWaitEventToSet.
654 */
655void
656ModifyWaitEvent(WaitEventSet *set, int pos, uint32 events, Latch *latch)
657{
658 WaitEvent *event;
659#if defined(WAIT_USE_KQUEUE)
660 int old_events;
661#endif
662
663 Assert(pos < set->nevents);
664
665 event = &set->events[pos];
666#if defined(WAIT_USE_KQUEUE)
667 old_events = event->events;
668#endif
669
670 /*
671 * Allow switching between WL_POSTMASTER_DEATH and WL_EXIT_ON_PM_DEATH.
672 *
673 * Note that because WL_EXIT_ON_PM_DEATH is mapped to WL_POSTMASTER_DEATH
674 * in AddWaitEventToSet(), this needs to be checked before the fast-path
675 * below that checks if 'events' has changed.
676 */
677 if (event->events == WL_POSTMASTER_DEATH)
678 {
679 if (events != WL_POSTMASTER_DEATH && events != WL_EXIT_ON_PM_DEATH)
680 elog(ERROR, "cannot remove postmaster death event");
681 set->exit_on_postmaster_death = ((events & WL_EXIT_ON_PM_DEATH) != 0);
682 return;
683 }
684
685 /*
686 * If neither the event mask nor the associated latch changes, return
687 * early. That's an important optimization for some sockets, where
688 * ModifyWaitEvent is frequently used to switch from waiting for reads to
689 * waiting on writes.
690 */
691 if (events == event->events &&
692 (!(event->events & WL_LATCH_SET) || set->latch == latch))
693 return;
694
695 if (event->events & WL_LATCH_SET && events != event->events)
696 elog(ERROR, "cannot modify latch event");
697
698 /* FIXME: validate event mask */
699 event->events = events;
700
701 if (events == WL_LATCH_SET)
702 {
703 if (latch && latch->owner_pid != MyProcPid)
704 elog(ERROR, "cannot wait on a latch owned by another process");
705 set->latch = latch;
706
707 /*
708 * On Unix, we don't need to modify the kernel object because the
709 * underlying pipe (if there is one) is the same for all latches so we
710 * can return immediately. On Windows, we need to update our array of
711 * handles, but we leave the old one in place and tolerate spurious
712 * wakeups if the latch is disabled.
713 */
714#if defined(WAIT_USE_WIN32)
715 if (!latch)
716 return;
717#else
718 return;
719#endif
720 }
721
722#if defined(WAIT_USE_EPOLL)
723 WaitEventAdjustEpoll(set, event, EPOLL_CTL_MOD);
724#elif defined(WAIT_USE_KQUEUE)
725 WaitEventAdjustKqueue(set, event, old_events);
726#elif defined(WAIT_USE_POLL)
727 WaitEventAdjustPoll(set, event);
728#elif defined(WAIT_USE_WIN32)
729 WaitEventAdjustWin32(set, event);
730#endif
731}
732
733#if defined(WAIT_USE_EPOLL)
734/*
735 * action can be one of EPOLL_CTL_ADD | EPOLL_CTL_MOD | EPOLL_CTL_DEL
736 */
737static void
738WaitEventAdjustEpoll(WaitEventSet *set, WaitEvent *event, int action)
739{
740 struct epoll_event epoll_ev;
741 int rc;
742
743 /* pointer to our event, returned by epoll_wait */
744 epoll_ev.data.ptr = event;
745 /* always wait for errors */
746 epoll_ev.events = EPOLLERR | EPOLLHUP;
747
748 /* prepare pollfd entry once */
749 if (event->events == WL_LATCH_SET)
750 {
751 Assert(set->latch != NULL);
752 epoll_ev.events |= EPOLLIN;
753 }
754 else if (event->events == WL_POSTMASTER_DEATH)
755 {
756 epoll_ev.events |= EPOLLIN;
757 }
758 else
759 {
760 Assert(event->fd != PGINVALID_SOCKET);
764
765 if (event->events & WL_SOCKET_READABLE)
766 epoll_ev.events |= EPOLLIN;
767 if (event->events & WL_SOCKET_WRITEABLE)
768 epoll_ev.events |= EPOLLOUT;
769 if (event->events & WL_SOCKET_CLOSED)
770 epoll_ev.events |= EPOLLRDHUP;
771 }
772
773 /*
774 * Even though unused, we also pass epoll_ev as the data argument if
775 * EPOLL_CTL_DEL is passed as action. There used to be an epoll bug
776 * requiring that, and actually it makes the code simpler...
777 */
778 rc = epoll_ctl(set->epoll_fd, action, event->fd, &epoll_ev);
779
780 if (rc < 0)
783 errmsg("%s() failed: %m",
784 "epoll_ctl")));
785}
786#endif
787
788#if defined(WAIT_USE_POLL)
789static void
791{
792 struct pollfd *pollfd = &set->pollfds[event->pos];
793
794 pollfd->revents = 0;
795 pollfd->fd = event->fd;
796
797 /* prepare pollfd entry once */
798 if (event->events == WL_LATCH_SET)
799 {
800 Assert(set->latch != NULL);
801 pollfd->events = POLLIN;
802 }
803 else if (event->events == WL_POSTMASTER_DEATH)
804 {
805 pollfd->events = POLLIN;
806 }
807 else
808 {
812 pollfd->events = 0;
813 if (event->events & WL_SOCKET_READABLE)
814 pollfd->events |= POLLIN;
815 if (event->events & WL_SOCKET_WRITEABLE)
816 pollfd->events |= POLLOUT;
817#ifdef POLLRDHUP
818 if (event->events & WL_SOCKET_CLOSED)
819 pollfd->events |= POLLRDHUP;
820#endif
821 }
822
823 Assert(event->fd != PGINVALID_SOCKET);
824}
825#endif
826
827#if defined(WAIT_USE_KQUEUE)
828
829/*
830 * On most BSD family systems, the udata member of struct kevent is of type
831 * void *, so we could directly convert to/from WaitEvent *. Unfortunately,
832 * NetBSD has it as intptr_t, so here we wallpaper over that difference with
833 * an lvalue cast.
834 */
835#define AccessWaitEvent(k_ev) (*((WaitEvent **)(&(k_ev)->udata)))
836
837static inline void
838WaitEventAdjustKqueueAdd(struct kevent *k_ev, int filter, int action,
839 WaitEvent *event)
840{
841 k_ev->ident = event->fd;
842 k_ev->filter = filter;
843 k_ev->flags = action;
844 k_ev->fflags = 0;
845 k_ev->data = 0;
846 AccessWaitEvent(k_ev) = event;
847}
848
849static inline void
850WaitEventAdjustKqueueAddPostmaster(struct kevent *k_ev, WaitEvent *event)
851{
852 /* For now postmaster death can only be added, not removed. */
853 k_ev->ident = PostmasterPid;
854 k_ev->filter = EVFILT_PROC;
855 k_ev->flags = EV_ADD;
856 k_ev->fflags = NOTE_EXIT;
857 k_ev->data = 0;
858 AccessWaitEvent(k_ev) = event;
859}
860
861static inline void
862WaitEventAdjustKqueueAddLatch(struct kevent *k_ev, WaitEvent *event)
863{
864 /* For now latch can only be added, not removed. */
865 k_ev->ident = SIGURG;
866 k_ev->filter = EVFILT_SIGNAL;
867 k_ev->flags = EV_ADD;
868 k_ev->fflags = 0;
869 k_ev->data = 0;
870 AccessWaitEvent(k_ev) = event;
871}
872
873/*
874 * old_events is the previous event mask, used to compute what has changed.
875 */
876static void
877WaitEventAdjustKqueue(WaitEventSet *set, WaitEvent *event, int old_events)
878{
879 int rc;
880 struct kevent k_ev[2];
881 int count = 0;
882 bool new_filt_read = false;
883 bool old_filt_read = false;
884 bool new_filt_write = false;
885 bool old_filt_write = false;
886
887 if (old_events == event->events)
888 return;
889
890 Assert(event->events != WL_LATCH_SET || set->latch != NULL);
891 Assert(event->events == WL_LATCH_SET ||
892 event->events == WL_POSTMASTER_DEATH ||
893 (event->events & (WL_SOCKET_READABLE |
896
897 if (event->events == WL_POSTMASTER_DEATH)
898 {
899 /*
900 * Unlike all the other implementations, we detect postmaster death
901 * using process notification instead of waiting on the postmaster
902 * alive pipe.
903 */
904 WaitEventAdjustKqueueAddPostmaster(&k_ev[count++], event);
905 }
906 else if (event->events == WL_LATCH_SET)
907 {
908 /* We detect latch wakeup using a signal event. */
909 WaitEventAdjustKqueueAddLatch(&k_ev[count++], event);
910 }
911 else
912 {
913 /*
914 * We need to compute the adds and deletes required to get from the
915 * old event mask to the new event mask, since kevent treats readable
916 * and writable as separate events.
917 */
918 if (old_events & (WL_SOCKET_READABLE | WL_SOCKET_CLOSED))
919 old_filt_read = true;
921 new_filt_read = true;
922 if (old_events & WL_SOCKET_WRITEABLE)
923 old_filt_write = true;
924 if (event->events & WL_SOCKET_WRITEABLE)
925 new_filt_write = true;
926 if (old_filt_read && !new_filt_read)
927 WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_READ, EV_DELETE,
928 event);
929 else if (!old_filt_read && new_filt_read)
930 WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_READ, EV_ADD,
931 event);
932 if (old_filt_write && !new_filt_write)
933 WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_WRITE, EV_DELETE,
934 event);
935 else if (!old_filt_write && new_filt_write)
936 WaitEventAdjustKqueueAdd(&k_ev[count++], EVFILT_WRITE, EV_ADD,
937 event);
938 }
939
940 /* For WL_SOCKET_READ -> WL_SOCKET_CLOSED, no change needed. */
941 if (count == 0)
942 return;
943
944 Assert(count <= 2);
945
946 rc = kevent(set->kqueue_fd, &k_ev[0], count, NULL, 0, NULL);
947
948 /*
949 * When adding the postmaster's pid, we have to consider that it might
950 * already have exited and perhaps even been replaced by another process
951 * with the same pid. If so, we have to defer reporting this as an event
952 * until the next call to WaitEventSetWaitBlock().
953 */
954
955 if (rc < 0)
956 {
957 if (event->events == WL_POSTMASTER_DEATH &&
958 (errno == ESRCH || errno == EACCES))
959 set->report_postmaster_not_running = true;
960 else
963 errmsg("%s() failed: %m",
964 "kevent")));
965 }
966 else if (event->events == WL_POSTMASTER_DEATH &&
967 PostmasterPid != getppid() &&
969 {
970 /*
971 * The extra PostmasterIsAliveInternal() check prevents false alarms
972 * on systems that give a different value for getppid() while being
973 * traced by a debugger.
974 */
975 set->report_postmaster_not_running = true;
976 }
977}
978
979#endif
980
981#if defined(WAIT_USE_WIN32)
982static void
983WaitEventAdjustWin32(WaitEventSet *set, WaitEvent *event)
984{
985 HANDLE *handle = &set->handles[event->pos + 1];
986
987 if (event->events == WL_LATCH_SET)
988 {
989 Assert(set->latch != NULL);
990 *handle = set->latch->event;
991 }
992 else if (event->events == WL_POSTMASTER_DEATH)
993 {
994 *handle = PostmasterHandle;
995 }
996 else
997 {
998 int flags = FD_CLOSE; /* always check for errors/EOF */
999
1000 if (event->events & WL_SOCKET_READABLE)
1001 flags |= FD_READ;
1002 if (event->events & WL_SOCKET_WRITEABLE)
1003 flags |= FD_WRITE;
1004 if (event->events & WL_SOCKET_CONNECTED)
1005 flags |= FD_CONNECT;
1006 if (event->events & WL_SOCKET_ACCEPT)
1007 flags |= FD_ACCEPT;
1008
1009 if (*handle == WSA_INVALID_EVENT)
1010 {
1011 *handle = WSACreateEvent();
1012 if (*handle == WSA_INVALID_EVENT)
1013 elog(ERROR, "failed to create event for socket: error code %d",
1014 WSAGetLastError());
1015 }
1016 if (WSAEventSelect(event->fd, *handle, flags) != 0)
1017 elog(ERROR, "failed to set up event for socket: error code %d",
1018 WSAGetLastError());
1019
1020 Assert(event->fd != PGINVALID_SOCKET);
1021 }
1022}
1023#endif
1024
1025/*
1026 * Wait for events added to the set to happen, or until the timeout is
1027 * reached. At most nevents occurred events are returned.
1028 *
1029 * If timeout = -1, block until an event occurs; if 0, check sockets for
1030 * readiness, but don't block; if > 0, block for at most timeout milliseconds.
1031 *
1032 * Returns the number of events occurred, or 0 if the timeout was reached.
1033 *
1034 * Returned events will have the fd, pos, user_data fields set to the
1035 * values associated with the registered event.
1036 */
1037int
1039 WaitEvent *occurred_events, int nevents,
1040 uint32 wait_event_info)
1041{
1042 int returned_events = 0;
1044 instr_time cur_time;
1045 long cur_timeout = -1;
1046
1047 Assert(nevents > 0);
1048
1049 /*
1050 * Initialize timeout if requested. We must record the current time so
1051 * that we can determine the remaining timeout if interrupted.
1052 */
1053 if (timeout >= 0)
1054 {
1056 Assert(timeout >= 0 && timeout <= INT_MAX);
1057 cur_timeout = timeout;
1058 }
1059 else
1061
1062 pgstat_report_wait_start(wait_event_info);
1063
1064#ifndef WIN32
1065 waiting = true;
1066#else
1067 /* Ensure that signals are serviced even if latch is already set */
1069#endif
1070 while (returned_events == 0)
1071 {
1072 int rc;
1073
1074 /*
1075 * Check if the latch is set already first. If so, we either exit
1076 * immediately or ask the kernel for further events available right
1077 * now without waiting, depending on how many events the caller wants.
1078 *
1079 * If someone sets the latch between this and the
1080 * WaitEventSetWaitBlock() below, the setter will write a byte to the
1081 * pipe (or signal us and the signal handler will do that), and the
1082 * readiness routine will return immediately.
1083 *
1084 * On unix, If there's a pending byte in the self pipe, we'll notice
1085 * whenever blocking. Only clearing the pipe in that case avoids
1086 * having to drain it every time WaitLatchOrSocket() is used. Should
1087 * the pipe-buffer fill up we're still ok, because the pipe is in
1088 * nonblocking mode. It's unlikely for that to happen, because the
1089 * self pipe isn't filled unless we're blocking (waiting = true), or
1090 * from inside a signal handler in latch_sigurg_handler().
1091 *
1092 * On windows, we'll also notice if there's a pending event for the
1093 * latch when blocking, but there's no danger of anything filling up,
1094 * as "Setting an event that is already set has no effect.".
1095 *
1096 * Note: we assume that the kernel calls involved in latch management
1097 * will provide adequate synchronization on machines with weak memory
1098 * ordering, so that we cannot miss seeing is_set if a notification
1099 * has already been queued.
1100 */
1101 if (set->latch && !set->latch->is_set)
1102 {
1103 /* about to sleep on a latch */
1104 set->latch->maybe_sleeping = true;
1106 /* and recheck */
1107 }
1108
1109 if (set->latch && set->latch->is_set)
1110 {
1111 occurred_events->fd = PGINVALID_SOCKET;
1112 occurred_events->pos = set->latch_pos;
1113 occurred_events->user_data =
1114 set->events[set->latch_pos].user_data;
1115 occurred_events->events = WL_LATCH_SET;
1116 occurred_events++;
1117 returned_events++;
1118
1119 /* could have been set above */
1120 set->latch->maybe_sleeping = false;
1121
1122 if (returned_events == nevents)
1123 break; /* output buffer full already */
1124
1125 /*
1126 * Even though we already have an event, we'll poll just once with
1127 * zero timeout to see what non-latch events we can fit into the
1128 * output buffer at the same time.
1129 */
1130 cur_timeout = 0;
1131 timeout = 0;
1132 }
1133
1134 /*
1135 * Wait for events using the readiness primitive chosen at the top of
1136 * this file. If -1 is returned, a timeout has occurred, if 0 we have
1137 * to retry, everything >= 1 is the number of returned events.
1138 */
1139 rc = WaitEventSetWaitBlock(set, cur_timeout,
1140 occurred_events, nevents - returned_events);
1141
1142 if (set->latch &&
1143 set->latch->maybe_sleeping)
1144 set->latch->maybe_sleeping = false;
1145
1146 if (rc == -1)
1147 break; /* timeout occurred */
1148 else
1149 returned_events += rc;
1150
1151 /* If we're not done, update cur_timeout for next iteration */
1152 if (returned_events == 0 && timeout >= 0)
1153 {
1154 INSTR_TIME_SET_CURRENT(cur_time);
1156 cur_timeout = timeout - (long) INSTR_TIME_GET_MILLISEC(cur_time);
1157 if (cur_timeout <= 0)
1158 break;
1159 }
1160 }
1161#ifndef WIN32
1162 waiting = false;
1163#endif
1164
1166
1167 return returned_events;
1168}
1169
1170
1171#if defined(WAIT_USE_EPOLL)
1172
1173/*
1174 * Wait using linux's epoll_wait(2).
1175 *
1176 * This is the preferable wait method, as several readiness notifications are
1177 * delivered, without having to iterate through all of set->events. The return
1178 * epoll_event struct contain a pointer to our events, making association
1179 * easy.
1180 */
1181static inline int
1182WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
1183 WaitEvent *occurred_events, int nevents)
1184{
1185 int returned_events = 0;
1186 int rc;
1187 WaitEvent *cur_event;
1188 struct epoll_event *cur_epoll_event;
1189
1190 /* Sleep */
1191 rc = epoll_wait(set->epoll_fd, set->epoll_ret_events,
1192 Min(nevents, set->nevents_space), cur_timeout);
1193
1194 /* Check return code */
1195 if (rc < 0)
1196 {
1197 /* EINTR is okay, otherwise complain */
1198 if (errno != EINTR)
1199 {
1200 waiting = false;
1201 ereport(ERROR,
1203 errmsg("%s() failed: %m",
1204 "epoll_wait")));
1205 }
1206 return 0;
1207 }
1208 else if (rc == 0)
1209 {
1210 /* timeout exceeded */
1211 return -1;
1212 }
1213
1214 /*
1215 * At least one event occurred, iterate over the returned epoll events
1216 * until they're either all processed, or we've returned all the events
1217 * the caller desired.
1218 */
1219 for (cur_epoll_event = set->epoll_ret_events;
1220 cur_epoll_event < (set->epoll_ret_events + rc) &&
1221 returned_events < nevents;
1222 cur_epoll_event++)
1223 {
1224 /* epoll's data pointer is set to the associated WaitEvent */
1225 cur_event = (WaitEvent *) cur_epoll_event->data.ptr;
1226
1227 occurred_events->pos = cur_event->pos;
1228 occurred_events->user_data = cur_event->user_data;
1229 occurred_events->events = 0;
1230
1231 if (cur_event->events == WL_LATCH_SET &&
1232 cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP))
1233 {
1234 /* Drain the signalfd. */
1235 drain();
1236
1237 if (set->latch && set->latch->maybe_sleeping && set->latch->is_set)
1238 {
1239 occurred_events->fd = PGINVALID_SOCKET;
1240 occurred_events->events = WL_LATCH_SET;
1241 occurred_events++;
1242 returned_events++;
1243 }
1244 }
1245 else if (cur_event->events == WL_POSTMASTER_DEATH &&
1246 cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP))
1247 {
1248 /*
1249 * We expect an EPOLLHUP when the remote end is closed, but
1250 * because we don't expect the pipe to become readable or to have
1251 * any errors either, treat those cases as postmaster death, too.
1252 *
1253 * Be paranoid about a spurious event signaling the postmaster as
1254 * being dead. There have been reports about that happening with
1255 * older primitives (select(2) to be specific), and a spurious
1256 * WL_POSTMASTER_DEATH event would be painful. Re-checking doesn't
1257 * cost much.
1258 */
1260 {
1261 if (set->exit_on_postmaster_death)
1262 proc_exit(1);
1263 occurred_events->fd = PGINVALID_SOCKET;
1264 occurred_events->events = WL_POSTMASTER_DEATH;
1265 occurred_events++;
1266 returned_events++;
1267 }
1268 }
1269 else if (cur_event->events & (WL_SOCKET_READABLE |
1272 {
1273 Assert(cur_event->fd != PGINVALID_SOCKET);
1274
1275 if ((cur_event->events & WL_SOCKET_READABLE) &&
1276 (cur_epoll_event->events & (EPOLLIN | EPOLLERR | EPOLLHUP)))
1277 {
1278 /* data available in socket, or EOF */
1279 occurred_events->events |= WL_SOCKET_READABLE;
1280 }
1281
1282 if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
1283 (cur_epoll_event->events & (EPOLLOUT | EPOLLERR | EPOLLHUP)))
1284 {
1285 /* writable, or EOF */
1286 occurred_events->events |= WL_SOCKET_WRITEABLE;
1287 }
1288
1289 if ((cur_event->events & WL_SOCKET_CLOSED) &&
1290 (cur_epoll_event->events & (EPOLLRDHUP | EPOLLERR | EPOLLHUP)))
1291 {
1292 /* remote peer shut down, or error */
1293 occurred_events->events |= WL_SOCKET_CLOSED;
1294 }
1295
1296 if (occurred_events->events != 0)
1297 {
1298 occurred_events->fd = cur_event->fd;
1299 occurred_events++;
1300 returned_events++;
1301 }
1302 }
1303 }
1304
1305 return returned_events;
1306}
1307
1308#elif defined(WAIT_USE_KQUEUE)
1309
1310/*
1311 * Wait using kevent(2) on BSD-family systems and macOS.
1312 *
1313 * For now this mirrors the epoll code, but in future it could modify the fd
1314 * set in the same call to kevent as it uses for waiting instead of doing that
1315 * with separate system calls.
1316 */
1317static int
1318WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
1319 WaitEvent *occurred_events, int nevents)
1320{
1321 int returned_events = 0;
1322 int rc;
1323 WaitEvent *cur_event;
1324 struct kevent *cur_kqueue_event;
1325 struct timespec timeout;
1326 struct timespec *timeout_p;
1327
1328 if (cur_timeout < 0)
1329 timeout_p = NULL;
1330 else
1331 {
1332 timeout.tv_sec = cur_timeout / 1000;
1333 timeout.tv_nsec = (cur_timeout % 1000) * 1000000;
1334 timeout_p = &timeout;
1335 }
1336
1337 /*
1338 * Report postmaster events discovered by WaitEventAdjustKqueue() or an
1339 * earlier call to WaitEventSetWait().
1340 */
1341 if (unlikely(set->report_postmaster_not_running))
1342 {
1343 if (set->exit_on_postmaster_death)
1344 proc_exit(1);
1345 occurred_events->fd = PGINVALID_SOCKET;
1346 occurred_events->events = WL_POSTMASTER_DEATH;
1347 return 1;
1348 }
1349
1350 /* Sleep */
1351 rc = kevent(set->kqueue_fd, NULL, 0,
1352 set->kqueue_ret_events,
1353 Min(nevents, set->nevents_space),
1354 timeout_p);
1355
1356 /* Check return code */
1357 if (rc < 0)
1358 {
1359 /* EINTR is okay, otherwise complain */
1360 if (errno != EINTR)
1361 {
1362 waiting = false;
1363 ereport(ERROR,
1365 errmsg("%s() failed: %m",
1366 "kevent")));
1367 }
1368 return 0;
1369 }
1370 else if (rc == 0)
1371 {
1372 /* timeout exceeded */
1373 return -1;
1374 }
1375
1376 /*
1377 * At least one event occurred, iterate over the returned kqueue events
1378 * until they're either all processed, or we've returned all the events
1379 * the caller desired.
1380 */
1381 for (cur_kqueue_event = set->kqueue_ret_events;
1382 cur_kqueue_event < (set->kqueue_ret_events + rc) &&
1383 returned_events < nevents;
1384 cur_kqueue_event++)
1385 {
1386 /* kevent's udata points to the associated WaitEvent */
1387 cur_event = AccessWaitEvent(cur_kqueue_event);
1388
1389 occurred_events->pos = cur_event->pos;
1390 occurred_events->user_data = cur_event->user_data;
1391 occurred_events->events = 0;
1392
1393 if (cur_event->events == WL_LATCH_SET &&
1394 cur_kqueue_event->filter == EVFILT_SIGNAL)
1395 {
1396 if (set->latch && set->latch->maybe_sleeping && set->latch->is_set)
1397 {
1398 occurred_events->fd = PGINVALID_SOCKET;
1399 occurred_events->events = WL_LATCH_SET;
1400 occurred_events++;
1401 returned_events++;
1402 }
1403 }
1404 else if (cur_event->events == WL_POSTMASTER_DEATH &&
1405 cur_kqueue_event->filter == EVFILT_PROC &&
1406 (cur_kqueue_event->fflags & NOTE_EXIT) != 0)
1407 {
1408 /*
1409 * The kernel will tell this kqueue object only once about the
1410 * exit of the postmaster, so let's remember that for next time so
1411 * that we provide level-triggered semantics.
1412 */
1413 set->report_postmaster_not_running = true;
1414
1415 if (set->exit_on_postmaster_death)
1416 proc_exit(1);
1417 occurred_events->fd = PGINVALID_SOCKET;
1418 occurred_events->events = WL_POSTMASTER_DEATH;
1419 occurred_events++;
1420 returned_events++;
1421 }
1422 else if (cur_event->events & (WL_SOCKET_READABLE |
1425 {
1426 Assert(cur_event->fd >= 0);
1427
1428 if ((cur_event->events & WL_SOCKET_READABLE) &&
1429 (cur_kqueue_event->filter == EVFILT_READ))
1430 {
1431 /* readable, or EOF */
1432 occurred_events->events |= WL_SOCKET_READABLE;
1433 }
1434
1435 if ((cur_event->events & WL_SOCKET_CLOSED) &&
1436 (cur_kqueue_event->filter == EVFILT_READ) &&
1437 (cur_kqueue_event->flags & EV_EOF))
1438 {
1439 /* the remote peer has shut down */
1440 occurred_events->events |= WL_SOCKET_CLOSED;
1441 }
1442
1443 if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
1444 (cur_kqueue_event->filter == EVFILT_WRITE))
1445 {
1446 /* writable, or EOF */
1447 occurred_events->events |= WL_SOCKET_WRITEABLE;
1448 }
1449
1450 if (occurred_events->events != 0)
1451 {
1452 occurred_events->fd = cur_event->fd;
1453 occurred_events++;
1454 returned_events++;
1455 }
1456 }
1457 }
1458
1459 return returned_events;
1460}
1461
1462#elif defined(WAIT_USE_POLL)
1463
1464/*
1465 * Wait using poll(2).
1466 *
1467 * This allows to receive readiness notifications for several events at once,
1468 * but requires iterating through all of set->pollfds.
1469 */
1470static inline int
1472 WaitEvent *occurred_events, int nevents)
1473{
1474 int returned_events = 0;
1475 int rc;
1476 WaitEvent *cur_event;
1477 struct pollfd *cur_pollfd;
1478
1479 /* Sleep */
1480 rc = poll(set->pollfds, set->nevents, (int) cur_timeout);
1481
1482 /* Check return code */
1483 if (rc < 0)
1484 {
1485 /* EINTR is okay, otherwise complain */
1486 if (errno != EINTR)
1487 {
1488 waiting = false;
1489 ereport(ERROR,
1491 errmsg("%s() failed: %m",
1492 "poll")));
1493 }
1494 return 0;
1495 }
1496 else if (rc == 0)
1497 {
1498 /* timeout exceeded */
1499 return -1;
1500 }
1501
1502 for (cur_event = set->events, cur_pollfd = set->pollfds;
1503 cur_event < (set->events + set->nevents) &&
1504 returned_events < nevents;
1505 cur_event++, cur_pollfd++)
1506 {
1507 /* no activity on this FD, skip */
1508 if (cur_pollfd->revents == 0)
1509 continue;
1510
1511 occurred_events->pos = cur_event->pos;
1512 occurred_events->user_data = cur_event->user_data;
1513 occurred_events->events = 0;
1514
1515 if (cur_event->events == WL_LATCH_SET &&
1516 (cur_pollfd->revents & (POLLIN | POLLHUP | POLLERR | POLLNVAL)))
1517 {
1518 /* There's data in the self-pipe, clear it. */
1519 drain();
1520
1521 if (set->latch && set->latch->maybe_sleeping && set->latch->is_set)
1522 {
1523 occurred_events->fd = PGINVALID_SOCKET;
1524 occurred_events->events = WL_LATCH_SET;
1525 occurred_events++;
1526 returned_events++;
1527 }
1528 }
1529 else if (cur_event->events == WL_POSTMASTER_DEATH &&
1530 (cur_pollfd->revents & (POLLIN | POLLHUP | POLLERR | POLLNVAL)))
1531 {
1532 /*
1533 * We expect an POLLHUP when the remote end is closed, but because
1534 * we don't expect the pipe to become readable or to have any
1535 * errors either, treat those cases as postmaster death, too.
1536 *
1537 * Be paranoid about a spurious event signaling the postmaster as
1538 * being dead. There have been reports about that happening with
1539 * older primitives (select(2) to be specific), and a spurious
1540 * WL_POSTMASTER_DEATH event would be painful. Re-checking doesn't
1541 * cost much.
1542 */
1544 {
1545 if (set->exit_on_postmaster_death)
1546 proc_exit(1);
1547 occurred_events->fd = PGINVALID_SOCKET;
1548 occurred_events->events = WL_POSTMASTER_DEATH;
1549 occurred_events++;
1550 returned_events++;
1551 }
1552 }
1553 else if (cur_event->events & (WL_SOCKET_READABLE |
1556 {
1557 int errflags = POLLHUP | POLLERR | POLLNVAL;
1558
1559 Assert(cur_event->fd >= PGINVALID_SOCKET);
1560
1561 if ((cur_event->events & WL_SOCKET_READABLE) &&
1562 (cur_pollfd->revents & (POLLIN | errflags)))
1563 {
1564 /* data available in socket, or EOF */
1565 occurred_events->events |= WL_SOCKET_READABLE;
1566 }
1567
1568 if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
1569 (cur_pollfd->revents & (POLLOUT | errflags)))
1570 {
1571 /* writeable, or EOF */
1572 occurred_events->events |= WL_SOCKET_WRITEABLE;
1573 }
1574
1575#ifdef POLLRDHUP
1576 if ((cur_event->events & WL_SOCKET_CLOSED) &&
1577 (cur_pollfd->revents & (POLLRDHUP | errflags)))
1578 {
1579 /* remote peer closed, or error */
1580 occurred_events->events |= WL_SOCKET_CLOSED;
1581 }
1582#endif
1583
1584 if (occurred_events->events != 0)
1585 {
1586 occurred_events->fd = cur_event->fd;
1587 occurred_events++;
1588 returned_events++;
1589 }
1590 }
1591 }
1592 return returned_events;
1593}
1594
1595#elif defined(WAIT_USE_WIN32)
1596
1597/*
1598 * Wait using Windows' WaitForMultipleObjects(). Each call only "consumes" one
1599 * event, so we keep calling until we've filled up our output buffer to match
1600 * the behavior of the other implementations.
1601 *
1602 * https://blogs.msdn.microsoft.com/oldnewthing/20150409-00/?p=44273
1603 */
1604static inline int
1605WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout,
1606 WaitEvent *occurred_events, int nevents)
1607{
1608 int returned_events = 0;
1609 DWORD rc;
1610 WaitEvent *cur_event;
1611
1612 /* Reset any wait events that need it */
1613 for (cur_event = set->events;
1614 cur_event < (set->events + set->nevents);
1615 cur_event++)
1616 {
1617 if (cur_event->reset)
1618 {
1619 WaitEventAdjustWin32(set, cur_event);
1620 cur_event->reset = false;
1621 }
1622
1623 /*
1624 * We associate the socket with a new event handle for each
1625 * WaitEventSet. FD_CLOSE is only generated once if the other end
1626 * closes gracefully. Therefore we might miss the FD_CLOSE
1627 * notification, if it was delivered to another event after we stopped
1628 * waiting for it. Close that race by peeking for EOF after setting
1629 * up this handle to receive notifications, and before entering the
1630 * sleep.
1631 *
1632 * XXX If we had one event handle for the lifetime of a socket, we
1633 * wouldn't need this.
1634 */
1635 if (cur_event->events & WL_SOCKET_READABLE)
1636 {
1637 char c;
1638 WSABUF buf;
1639 DWORD received;
1640 DWORD flags;
1641
1642 buf.buf = &c;
1643 buf.len = 1;
1644 flags = MSG_PEEK;
1645 if (WSARecv(cur_event->fd, &buf, 1, &received, &flags, NULL, NULL) == 0)
1646 {
1647 occurred_events->pos = cur_event->pos;
1648 occurred_events->user_data = cur_event->user_data;
1649 occurred_events->events = WL_SOCKET_READABLE;
1650 occurred_events->fd = cur_event->fd;
1651 return 1;
1652 }
1653 }
1654
1655 /*
1656 * Windows does not guarantee to log an FD_WRITE network event
1657 * indicating that more data can be sent unless the previous send()
1658 * failed with WSAEWOULDBLOCK. While our caller might well have made
1659 * such a call, we cannot assume that here. Therefore, if waiting for
1660 * write-ready, force the issue by doing a dummy send(). If the dummy
1661 * send() succeeds, assume that the socket is in fact write-ready, and
1662 * return immediately. Also, if it fails with something other than
1663 * WSAEWOULDBLOCK, return a write-ready indication to let our caller
1664 * deal with the error condition.
1665 */
1666 if (cur_event->events & WL_SOCKET_WRITEABLE)
1667 {
1668 char c;
1669 WSABUF buf;
1670 DWORD sent;
1671 int r;
1672
1673 buf.buf = &c;
1674 buf.len = 0;
1675
1676 r = WSASend(cur_event->fd, &buf, 1, &sent, 0, NULL, NULL);
1677 if (r == 0 || WSAGetLastError() != WSAEWOULDBLOCK)
1678 {
1679 occurred_events->pos = cur_event->pos;
1680 occurred_events->user_data = cur_event->user_data;
1681 occurred_events->events = WL_SOCKET_WRITEABLE;
1682 occurred_events->fd = cur_event->fd;
1683 return 1;
1684 }
1685 }
1686 }
1687
1688 /*
1689 * Sleep.
1690 *
1691 * Need to wait for ->nevents + 1, because signal handle is in [0].
1692 */
1693 rc = WaitForMultipleObjects(set->nevents + 1, set->handles, FALSE,
1694 cur_timeout);
1695
1696 /* Check return code */
1697 if (rc == WAIT_FAILED)
1698 elog(ERROR, "WaitForMultipleObjects() failed: error code %lu",
1699 GetLastError());
1700 else if (rc == WAIT_TIMEOUT)
1701 {
1702 /* timeout exceeded */
1703 return -1;
1704 }
1705
1706 if (rc == WAIT_OBJECT_0)
1707 {
1708 /* Service newly-arrived signals */
1710 return 0; /* retry */
1711 }
1712
1713 /*
1714 * With an offset of one, due to the always present pgwin32_signal_event,
1715 * the handle offset directly corresponds to a wait event.
1716 */
1717 cur_event = (WaitEvent *) &set->events[rc - WAIT_OBJECT_0 - 1];
1718
1719 for (;;)
1720 {
1721 int next_pos;
1722 int count;
1723
1724 occurred_events->pos = cur_event->pos;
1725 occurred_events->user_data = cur_event->user_data;
1726 occurred_events->events = 0;
1727
1728 if (cur_event->events == WL_LATCH_SET)
1729 {
1730 /*
1731 * We cannot use set->latch->event to reset the fired event if we
1732 * aren't waiting on this latch now.
1733 */
1734 if (!ResetEvent(set->handles[cur_event->pos + 1]))
1735 elog(ERROR, "ResetEvent failed: error code %lu", GetLastError());
1736
1737 if (set->latch && set->latch->maybe_sleeping && set->latch->is_set)
1738 {
1739 occurred_events->fd = PGINVALID_SOCKET;
1740 occurred_events->events = WL_LATCH_SET;
1741 occurred_events++;
1742 returned_events++;
1743 }
1744 }
1745 else if (cur_event->events == WL_POSTMASTER_DEATH)
1746 {
1747 /*
1748 * Postmaster apparently died. Since the consequences of falsely
1749 * returning WL_POSTMASTER_DEATH could be pretty unpleasant, we
1750 * take the trouble to positively verify this with
1751 * PostmasterIsAlive(), even though there is no known reason to
1752 * think that the event could be falsely set on Windows.
1753 */
1755 {
1756 if (set->exit_on_postmaster_death)
1757 proc_exit(1);
1758 occurred_events->fd = PGINVALID_SOCKET;
1759 occurred_events->events = WL_POSTMASTER_DEATH;
1760 occurred_events++;
1761 returned_events++;
1762 }
1763 }
1764 else if (cur_event->events & WL_SOCKET_MASK)
1765 {
1766 WSANETWORKEVENTS resEvents;
1767 HANDLE handle = set->handles[cur_event->pos + 1];
1768
1769 Assert(cur_event->fd);
1770
1771 occurred_events->fd = cur_event->fd;
1772
1773 ZeroMemory(&resEvents, sizeof(resEvents));
1774 if (WSAEnumNetworkEvents(cur_event->fd, handle, &resEvents) != 0)
1775 elog(ERROR, "failed to enumerate network events: error code %d",
1776 WSAGetLastError());
1777 if ((cur_event->events & WL_SOCKET_READABLE) &&
1778 (resEvents.lNetworkEvents & FD_READ))
1779 {
1780 /* data available in socket */
1781 occurred_events->events |= WL_SOCKET_READABLE;
1782
1783 /*------
1784 * WaitForMultipleObjects doesn't guarantee that a read event
1785 * will be returned if the latch is set at the same time. Even
1786 * if it did, the caller might drop that event expecting it to
1787 * reoccur on next call. So, we must force the event to be
1788 * reset if this WaitEventSet is used again in order to avoid
1789 * an indefinite hang.
1790 *
1791 * Refer
1792 * https://msdn.microsoft.com/en-us/library/windows/desktop/ms741576(v=vs.85).aspx
1793 * for the behavior of socket events.
1794 *------
1795 */
1796 cur_event->reset = true;
1797 }
1798 if ((cur_event->events & WL_SOCKET_WRITEABLE) &&
1799 (resEvents.lNetworkEvents & FD_WRITE))
1800 {
1801 /* writeable */
1802 occurred_events->events |= WL_SOCKET_WRITEABLE;
1803 }
1804 if ((cur_event->events & WL_SOCKET_CONNECTED) &&
1805 (resEvents.lNetworkEvents & FD_CONNECT))
1806 {
1807 /* connected */
1808 occurred_events->events |= WL_SOCKET_CONNECTED;
1809 }
1810 if ((cur_event->events & WL_SOCKET_ACCEPT) &&
1811 (resEvents.lNetworkEvents & FD_ACCEPT))
1812 {
1813 /* incoming connection could be accepted */
1814 occurred_events->events |= WL_SOCKET_ACCEPT;
1815 }
1816 if (resEvents.lNetworkEvents & FD_CLOSE)
1817 {
1818 /* EOF/error, so signal all caller-requested socket flags */
1819 occurred_events->events |= (cur_event->events & WL_SOCKET_MASK);
1820 }
1821
1822 if (occurred_events->events != 0)
1823 {
1824 occurred_events++;
1825 returned_events++;
1826 }
1827 }
1828
1829 /* Is the output buffer full? */
1830 if (returned_events == nevents)
1831 break;
1832
1833 /* Have we run out of possible events? */
1834 next_pos = cur_event->pos + 1;
1835 if (next_pos == set->nevents)
1836 break;
1837
1838 /*
1839 * Poll the rest of the event handles in the array starting at
1840 * next_pos being careful to skip over the initial signal handle too.
1841 * This time we use a zero timeout.
1842 */
1843 count = set->nevents - next_pos;
1844 rc = WaitForMultipleObjects(count,
1845 set->handles + 1 + next_pos,
1846 false,
1847 0);
1848
1849 /*
1850 * We don't distinguish between errors and WAIT_TIMEOUT here because
1851 * we already have events to report.
1852 */
1853 if (rc < WAIT_OBJECT_0 || rc >= WAIT_OBJECT_0 + count)
1854 break;
1855
1856 /* We have another event to decode. */
1857 cur_event = &set->events[next_pos + (rc - WAIT_OBJECT_0)];
1858 }
1859
1860 return returned_events;
1861}
1862#endif
1863
1864/*
1865 * Return whether the current build options can report WL_SOCKET_CLOSED.
1866 */
1867bool
1869{
1870#if (defined(WAIT_USE_POLL) && defined(POLLRDHUP)) || \
1871 defined(WAIT_USE_EPOLL) || \
1872 defined(WAIT_USE_KQUEUE)
1873 return true;
1874#else
1875 return false;
1876#endif
1877}
1878
1879/*
1880 * Get the number of wait events registered in a given WaitEventSet.
1881 */
1882int
1884{
1885 return set->nevents;
1886}
1887
1888#if defined(WAIT_USE_SELF_PIPE)
1889
1890/*
1891 * SetLatch uses SIGURG to wake up the process waiting on the latch.
1892 *
1893 * Wake up WaitLatch, if we're waiting.
1894 */
1895static void
1897{
1898 if (waiting)
1900}
1901
1902/* Send one byte to the self-pipe, to wake up WaitLatch */
1903static void
1905{
1906 int rc;
1907 char dummy = 0;
1908
1909retry:
1910 rc = write(selfpipe_writefd, &dummy, 1);
1911 if (rc < 0)
1912 {
1913 /* If interrupted by signal, just retry */
1914 if (errno == EINTR)
1915 goto retry;
1916
1917 /*
1918 * If the pipe is full, we don't need to retry, the data that's there
1919 * already is enough to wake up WaitLatch.
1920 */
1921 if (errno == EAGAIN || errno == EWOULDBLOCK)
1922 return;
1923
1924 /*
1925 * Oops, the write() failed for some other reason. We might be in a
1926 * signal handler, so it's not safe to elog(). We have no choice but
1927 * silently ignore the error.
1928 */
1929 return;
1930 }
1931}
1932
1933#endif
1934
1935#if defined(WAIT_USE_SELF_PIPE) || defined(WAIT_USE_SIGNALFD)
1936
1937/*
1938 * Read all available data from self-pipe or signalfd.
1939 *
1940 * Note: this is only called when waiting = true. If it fails and doesn't
1941 * return, it must reset that flag first (though ideally, this will never
1942 * happen).
1943 */
1944static void
1946{
1947 char buf[1024];
1948 int rc;
1949 int fd;
1950
1951#ifdef WAIT_USE_SELF_PIPE
1953#else
1954 fd = signal_fd;
1955#endif
1956
1957 for (;;)
1958 {
1959 rc = read(fd, buf, sizeof(buf));
1960 if (rc < 0)
1961 {
1962 if (errno == EAGAIN || errno == EWOULDBLOCK)
1963 break; /* the descriptor is empty */
1964 else if (errno == EINTR)
1965 continue; /* retry */
1966 else
1967 {
1968 waiting = false;
1969#ifdef WAIT_USE_SELF_PIPE
1970 elog(ERROR, "read() on self-pipe failed: %m");
1971#else
1972 elog(ERROR, "read() on signalfd failed: %m");
1973#endif
1974 }
1975 }
1976 else if (rc == 0)
1977 {
1978 waiting = false;
1979#ifdef WAIT_USE_SELF_PIPE
1980 elog(ERROR, "unexpected EOF on self-pipe");
1981#else
1982 elog(ERROR, "unexpected EOF on signalfd");
1983#endif
1984 }
1985 else if (rc < sizeof(buf))
1986 {
1987 /* we successfully drained the pipe; no need to read() again */
1988 break;
1989 }
1990 /* else buffer wasn't big enough, so read again */
1991 }
1992}
1993
1994#endif
1995
1996static void
1998{
2000
2001 Assert(set->owner != NULL);
2002 set->owner = NULL;
2003 FreeWaitEventSet(set);
2004}
2005
2006#ifndef WIN32
2007/*
2008 * Wake up my process if it's currently sleeping in WaitEventSetWaitBlock()
2009 *
2010 * NB: be sure to save and restore errno around it. (That's standard practice
2011 * in most signal handlers, of course, but we used to omit it in handlers that
2012 * only set a flag.) XXX
2013 *
2014 * NB: this function is called from critical sections and signal handlers so
2015 * throwing an error is not a good idea.
2016 *
2017 * On Windows, Latch uses SetEvent directly and this is not used.
2018 */
2019void
2021{
2022#if defined(WAIT_USE_SELF_PIPE)
2023 if (waiting)
2025#else
2026 if (waiting)
2027 kill(MyProcPid, SIGURG);
2028#endif
2029}
2030
2031/* Similar to WakeupMyProc, but wake up another process */
2032void
2034{
2035 kill(pid, SIGURG);
2036}
2037#endif
#define pg_memory_barrier()
Definition: atomics.h:141
sigset_t UnBlockSig
Definition: pqsignal.c:22
#define Min(x, y)
Definition: c.h:1003
#define MAXALIGN(LEN)
Definition: c.h:810
#define SIGNAL_ARGS
Definition: c.h:1348
#define unlikely(x)
Definition: c.h:402
uint32_t uint32
Definition: c.h:538
#define StaticAssertStmt(condition, errmessage)
Definition: c.h:937
size_t Size
Definition: c.h:610
int errcode_for_socket_access(void)
Definition: elog.c:954
int errmsg(const char *fmt,...)
Definition: elog.c:1071
#define FATAL
Definition: elog.h:41
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
#define ereport(elevel,...)
Definition: elog.h:150
void ReleaseExternalFD(void)
Definition: fd.c:1238
bool AcquireExternalFD(void)
Definition: fd.c:1185
void ReserveExternalFD(void)
Definition: fd.c:1220
pid_t PostmasterPid
Definition: globals.c:106
int MyProcPid
Definition: globals.c:47
bool IsUnderPostmaster
Definition: globals.c:120
Assert(PointerIsAligned(start, uint64))
for(;;)
#define INSTR_TIME_SET_CURRENT(t)
Definition: instr_time.h:122
#define INSTR_TIME_SUBTRACT(x, y)
Definition: instr_time.h:181
#define INSTR_TIME_GET_MILLISEC(t)
Definition: instr_time.h:191
#define INSTR_TIME_SET_ZERO(t)
Definition: instr_time.h:172
#define close(a)
Definition: win32.h:12
#define write(a, b, c)
Definition: win32.h:14
#define read(a, b, c)
Definition: win32.h:13
void proc_exit(int code)
Definition: ipc.c:104
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:81
void * MemoryContextAllocZero(MemoryContext context, Size size)
Definition: mcxt.c:1263
void pfree(void *pointer)
Definition: mcxt.c:1594
MemoryContext TopMemoryContext
Definition: mcxt.c:166
const void * data
static time_t start_time
Definition: pg_ctl.c:95
static char * buf
Definition: pg_test_fsync.c:72
bool PostmasterIsAliveInternal(void)
Definition: pmsignal.c:346
#define PostmasterIsAlive()
Definition: pmsignal.h:107
#define pqsignal
Definition: port.h:531
int pgsocket
Definition: port.h:29
#define PGINVALID_SOCKET
Definition: port.h:31
static Datum PointerGetDatum(const void *X)
Definition: postgres.h:332
uint64_t Datum
Definition: postgres.h:70
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:322
int postmaster_alive_fds[2]
Definition: postmaster.c:483
#define POSTMASTER_FD_WATCH
Definition: postmaster.h:83
char * c
static int fd(const char *x, int i)
Definition: preproc-init.c:105
void ResourceOwnerForget(ResourceOwner owner, Datum value, const ResourceOwnerDesc *kind)
Definition: resowner.c:561
void ResourceOwnerRemember(ResourceOwner owner, Datum value, const ResourceOwnerDesc *kind)
Definition: resowner.c:521
void ResourceOwnerEnlarge(ResourceOwner owner)
Definition: resowner.c:449
#define RELEASE_PRIO_WAITEVENTSETS
Definition: resowner.h:77
@ RESOURCE_RELEASE_AFTER_LOCKS
Definition: resowner.h:56
void pgwin32_dispatch_queued_signals(void)
Definition: signal.c:120
HANDLE pgwin32_signal_event
Definition: signal.c:27
Definition: latch.h:114
sig_atomic_t is_set
Definition: latch.h:115
sig_atomic_t maybe_sleeping
Definition: latch.h:116
int owner_pid
Definition: latch.h:118
const char * name
Definition: resowner.h:93
Latch * latch
Definition: waiteventset.c:136
bool exit_on_postmaster_death
Definition: waiteventset.c:144
ResourceOwner owner
Definition: waiteventset.c:119
WaitEvent * events
Definition: waiteventset.c:128
struct pollfd * pollfds
Definition: waiteventset.c:157
pgsocket fd
Definition: waiteventset.h:63
void * user_data
Definition: waiteventset.h:64
uint32 events
Definition: waiteventset.h:62
static void pgstat_report_wait_start(uint32 wait_event_info)
Definition: wait_event.h:69
static void pgstat_report_wait_end(void)
Definition: wait_event.h:85
static void latch_sigurg_handler(SIGNAL_ARGS)
static void sendSelfPipeByte(void)
static void ResourceOwnerForgetWaitEventSet(ResourceOwner owner, WaitEventSet *set)
Definition: waiteventset.c:228
static int selfpipe_readfd
Definition: waiteventset.c:181
static const ResourceOwnerDesc wait_event_set_resowner_desc
Definition: waiteventset.c:212
void FreeWaitEventSetAfterFork(WaitEventSet *set)
Definition: waiteventset.c:524
static void WaitEventAdjustPoll(WaitEventSet *set, WaitEvent *event)
Definition: waiteventset.c:790
void WakeupMyProc(void)
static int selfpipe_owner_pid
Definition: waiteventset.c:185
static int selfpipe_writefd
Definition: waiteventset.c:182
int GetNumRegisteredWaitEvents(WaitEventSet *set)
void WakeupOtherProc(int pid)
static void ResourceOwnerRememberWaitEventSet(ResourceOwner owner, WaitEventSet *set)
Definition: waiteventset.c:223
void ModifyWaitEvent(WaitEventSet *set, int pos, uint32 events, Latch *latch)
Definition: waiteventset.c:656
static int WaitEventSetWaitBlock(WaitEventSet *set, int cur_timeout, WaitEvent *occurred_events, int nevents)
static void ResOwnerReleaseWaitEventSet(Datum res)
void InitializeWaitEventSupport(void)
Definition: waiteventset.c:241
bool WaitEventSetCanReportClosed(void)
int AddWaitEventToSet(WaitEventSet *set, uint32 events, pgsocket fd, Latch *latch, void *user_data)
Definition: waiteventset.c:570
int WaitEventSetWait(WaitEventSet *set, long timeout, WaitEvent *occurred_events, int nevents, uint32 wait_event_info)
static void drain(void)
static volatile sig_atomic_t waiting
Definition: waiteventset.c:171
void FreeWaitEventSet(WaitEventSet *set)
Definition: waiteventset.c:481
WaitEventSet * CreateWaitEventSet(ResourceOwner resowner, int nevents)
Definition: waiteventset.c:364
#define WL_SOCKET_READABLE
Definition: waiteventset.h:35
#define WL_SOCKET_ACCEPT
Definition: waiteventset.h:51
#define WL_SOCKET_CLOSED
Definition: waiteventset.h:46
#define WL_EXIT_ON_PM_DEATH
Definition: waiteventset.h:39
#define WL_LATCH_SET
Definition: waiteventset.h:34
#define WL_SOCKET_CONNECTED
Definition: waiteventset.h:44
#define WL_POSTMASTER_DEATH
Definition: waiteventset.h:38
#define WL_SOCKET_WRITEABLE
Definition: waiteventset.h:36
#define WL_SOCKET_MASK
Definition: waiteventset.h:53
#define EINTR
Definition: win32_port.h:364
#define EWOULDBLOCK
Definition: win32_port.h:370
#define kill(pid, sig)
Definition: win32_port.h:493
#define EAGAIN
Definition: win32_port.h:362