Here's a diff to call WSAWaitForMultipleEvents() repeatedly to check
for events other than the first one returned. I've also added a
map of io_t to events[] index so avoid the need for splay_search().
The value of event_count will change if a callback adds an event
so we need to handle that.
I've added a generation number to the splay tree head that gets
incremented by io_del(). I'm undecided whether it should also be
incremented by io_add().
Does this seem like the right direction?
- todd
diff --git a/src/event.c b/src/event.c
index 33d205a..b60a6d7 100644
--- a/src/event.c
+++ b/src/event.c
@@ -387,71 +389,89 @@ bool event_loop(void) {
Note that technically FD_CLOSE has the same problem, but it's okay
because user code does not rely on
this event being fired again if ignored.
*/
- io_t *writeable_io = NULL;
+ unsigned int curgen = io_tree.generation;
- for splay_each(io_t, io, &io_tree)
+ for splay_each(io_t, io, &io_tree) {
if(io->flags & IO_WRITE && send(io->fd, NULL, 0, 0) == 0)
{
- writeable_io = io;
- break;
+ io->cb(io->data, IO_WRITE);
+
+ if(curgen != io_tree.generation) {
+ break;
+ }
}
+ }
- if(writeable_io) {
- writeable_io->cb(writeable_io->data, IO_WRITE);
- continue;
+ if(event_count > WSA_MAXIMUM_WAIT_EVENTS) {
+ WSASetLastError(WSA_INVALID_PARAMETER);
+ return(false);
}
- WSAEVENT *events = xmalloc(event_count * sizeof(*events));
+ WSAEVENT events[WSA_MAXIMUM_WAIT_EVENTS];
+ io_t *io_map[WSA_MAXIMUM_WAIT_EVENTS];
DWORD event_index = 0;
for splay_each(io_t, io, &io_tree) {
events[event_index] = io->event;
+ io_map[event_index] = io;
event_index++;
}
- DWORD result = WSAWaitForMultipleEvents(event_count, events, FALSE,
timeout_ms, FALSE);
+ /*
+ * If the generation number changes due to event removal
+ * by a callback we restart the loop.
+ * Note that event_count may be changed by callabcks.
+ */
+ curgen = io_tree.generation;
+ DWORD num_events = event_count;
- WSAEVENT event;
+ for(DWORD event_offset = 0; event_offset < num_events;) {
+ DWORD result = WSAWaitForMultipleEvents(num_events - event_offset,
&events[event_offset], FALSE, timeout_ms, FALSE);
- if(result >= WSA_WAIT_EVENT_0 && result < WSA_WAIT_EVENT_0 +
event_count) {
- event = events[result - WSA_WAIT_EVENT_0];
- }
+ if(result == WSA_WAIT_TIMEOUT) {
+ break;
+ }
+
+ if(result < WSA_WAIT_EVENT_0 || result >= WSA_WAIT_EVENT_0 +
num_events - event_offset) {
+ return(false);
+ }
- free(events);
+ /* Look up io in the map by index. */
+ event_index = result - WSA_WAIT_EVENT_0 + event_offset;
+ io_t *io = io_map[event_index];
- if(result == WSA_WAIT_TIMEOUT) {
- continue;
- }
+ if(io->fd == -1) {
+ io->cb(io->data, 0);
- if(result < WSA_WAIT_EVENT_0 || result >= WSA_WAIT_EVENT_0 +
event_count) {
- return false;
- }
+ if(curgen != io_tree.generation) {
+ break;
+ }
+ } else {
+ WSANETWORKEVENTS network_events;
- io_t *io = splay_search(&io_tree, &((io_t) {
- .event = event
- }));
+ if(WSAEnumNetworkEvents(io->fd, io->event, &network_events) != 0)
{
+ return(false);
+ }
- if(!io) {
- abort();
- }
+ if(network_events.lNetworkEvents & READ_EVENTS) {
+ io->cb(io->data, IO_READ);
- if(io->fd == -1) {
- io->cb(io->data, 0);
- } else {
- WSANETWORKEVENTS network_events;
+ if(curgen != io_tree.generation) {
+ break;
+ }
+ }
- if(WSAEnumNetworkEvents(io->fd, io->event, &network_events) != 0)
{
- return false;
+ /*
+ The fd might be available for write too. However, if we already fired
the read callback, that
+ callback might have deleted the io (e.g. through
terminate_connection()), so we can't fire the
+ write callback here. Instead, we loop back and let the writable io loop
above handle it.
+ */
}
- if(network_events.lNetworkEvents & READ_EVENTS) {
- io->cb(io->data, IO_READ);
- }
+ /* Continue checking the rest of the events. */
+ event_offset = event_index + 1;
- /*
- The fd might be available for write too. However, if we already fired
the read callback, that
- callback might have deleted the io (e.g. through
terminate_connection()), so we can't fire the
- write callback here. Instead, we loop back and let the writable io loop
above handle it.
- */
+ /* Just poll the next time through. */
+ timeout_ms = 0;
}
}
diff --git a/src/splay_tree.c b/src/splay_tree.c
index 2b6186f..ca83022 100644
--- a/src/splay_tree.c
+++ b/src/splay_tree.c
@@ -581,6 +581,7 @@ void splay_unlink_node(splay_tree_t *tree, splay_node_t
*node) {
}
tree->count--;
+ tree->generation++;
}
void splay_delete_node(splay_tree_t *tree, splay_node_t *node) {
diff --git a/src/splay_tree.h b/src/splay_tree.h
index 06367bf..d5ab742 100644
--- a/src/splay_tree.h
+++ b/src/splay_tree.h
@@ -57,7 +57,8 @@ typedef struct splay_tree_t {
splay_compare_t compare;
splay_action_t delete;
- int count;
+ unsigned int count;
+ unsigned int generation;
} splay_tree_t;