From 51bc214379c02f97efc63c93a69f2c8e8ebc4f91 Mon Sep 17 00:00:00 2001 From: Davin McCall Date: Mon, 18 Dec 2017 09:28:56 +0000 Subject: [PATCH] Upgrade bundled Dasynq to 1.0.1. --- src/dasynq/dasynq-basewatchers.h | 7 +- src/dasynq/dasynq-naryheap.h | 47 ++++---- src/dasynq/dasynq-stableheap.h | 122 ++++++++++++++++++++ src/dasynq/dasynq-svec.h | 184 +++++++++++++++++++++++++++++++ src/dasynq/dasynq-timerbase.h | 2 +- src/dasynq/dasynq-timerfd.h | 4 +- src/dasynq/dasynq.h | 19 +++- src/dinit.cc | 2 +- 8 files changed, 357 insertions(+), 30 deletions(-) create mode 100644 src/dasynq/dasynq-stableheap.h create mode 100644 src/dasynq/dasynq-svec.h diff --git a/src/dasynq/dasynq-basewatchers.h b/src/dasynq/dasynq-basewatchers.h index cbabe91..2dd0a1d 100644 --- a/src/dasynq/dasynq-basewatchers.h +++ b/src/dasynq/dasynq-basewatchers.h @@ -14,12 +14,12 @@ namespace dprivate { // specialised to call one or the other depending on the mutex type: template void sigmaskf(int how, const sigset_t *set, sigset_t *oset) { - sigprocmask(how, set, oset); + pthread_sigmask(how, set, oset); } template <> inline void sigmaskf(int how, const sigset_t *set, sigset_t *oset) { - pthread_sigmask(how, set, oset); + sigprocmask(how, set, oset); } } @@ -51,7 +51,8 @@ namespace dprivate { // (non-public API) class base_watcher; - using prio_queue = NaryHeap; + template using nary_heap_def = nary_heap; + using prio_queue = stable_heap; template class fd_watcher; template class bidi_fd_watcher; diff --git a/src/dasynq/dasynq-naryheap.h b/src/dasynq/dasynq-naryheap.h index 7713b81..df3a4db 100644 --- a/src/dasynq/dasynq-naryheap.h +++ b/src/dasynq/dasynq-naryheap.h @@ -1,7 +1,7 @@ #ifndef DASYNC_NARYHEAP_H_INCLUDED #define DASYNC_NARYHEAP_H_INCLUDED -#include +#include "dasynq-svec.h" #include #include #include @@ -28,7 +28,7 @@ namespace dasynq { * Compare : functional object type to compare priorities */ template , int N = 16> -class NaryHeap +class nary_heap { public: struct handle_t; @@ -51,7 +51,7 @@ class NaryHeap HeapNode() { } }; - std::vector hvec; + svector hvec; using hindex_t = typename decltype(hvec)::size_type; @@ -65,7 +65,14 @@ class NaryHeap // separate container, and have the handle be an index into that container). struct handle_t { - T hd; + union hd_u_t { + // The data member is kept in a union so it doesn't get constructed/destructed + // automatically, and we can construct it lazily. + public: + hd_u_t() { } + ~hd_u_t() { } + T hd; + } hd_u; hindex_t heap_index; }; @@ -225,21 +232,15 @@ class NaryHeap void remove_h(hindex_t hidx) { - // bvec[hvec[hidx].data_index].heap_index = -1; hvec[hidx].hnd_p->heap_index = -1; if (hvec.size() != hidx + 1) { - // replace the first element with the last: - // bvec[hvec.back().data_index].heap_index = hidx; - - //hvec.back().hnd_p->heap_index = hidx; - //hvec[hidx] = hvec.back(); - //hvec.pop_back(); + // replace the removed element with the last: + hvec[hidx] = hvec.back(); + hvec[hidx].hnd_p->heap_index = hidx; + hvec.pop_back(); // Now bubble up: - //bubble_up(hidx); - - bubble_up(hidx, hvec.size() - 2, *(hvec.back().hnd_p), hvec.back().data); - hvec.pop_back(); + bubble_up(hidx); } else { hvec.pop_back(); @@ -250,14 +251,14 @@ class NaryHeap T & node_data(handle_t & index) noexcept { - return index.hd; + return index.hd_u.hd; } // Allocate a slot, but do not incorporate into the heap: // u... : parameters for data constructor T::T(...) template void allocate(handle_t & hnd, U... u) { - new (& hnd.hd) T(u...); + new (& hnd.hd_u.hd) T(u...); hnd.heap_index = -1; hindex_t max_allowed = hvec.max_size(); @@ -287,6 +288,7 @@ class NaryHeap void deallocate(handle_t & index) noexcept { num_nodes--; + index.hd_u.~hd_u_t(); // shrink the capacity of hvec if num_nodes is sufficiently less than its current capacity. Why // capacity/4? Because in general, capacity must be at least doubled when it is exceeded to get @@ -295,7 +297,7 @@ class NaryHeap // repeatedly as nodes get added and removed. if (num_nodes < hvec.capacity() / 4) { - hvec.shrink_to_fit(); + hvec.shrink_to(num_nodes * 2); } } @@ -337,7 +339,7 @@ class NaryHeap return hvec.empty(); } - bool is_queued(handle_t hnd) + bool is_queued(handle_t & hnd) { return hnd.heap_index != (hindex_t) -1; } @@ -360,6 +362,13 @@ class NaryHeap return bubble_down(heap_index); } } + + nary_heap() + { + // Nothing required + } + + nary_heap(const nary_heap &) = delete; }; } diff --git a/src/dasynq/dasynq-stableheap.h b/src/dasynq/dasynq-stableheap.h new file mode 100644 index 0000000..a93ae79 --- /dev/null +++ b/src/dasynq/dasynq-stableheap.h @@ -0,0 +1,122 @@ +#ifndef DASYNQ_STABLE_HEAP_INCLUDED +#define DASYNQ_STABLE_HEAP_INCLUDED 1 + +// Convert an "unstable" priority queue (which doesn't use FIFO ordering for same-priority elements) +// into a "stable" queue (which does deliver same-priority elements in FIFO order). This is done by +// adding a generation counter to each element added to the queue, and using it as a second-order +// priority key (after the original key). +// +// The generation counter is a 64-bit integer and can not realistically overflow. + +#include + +namespace dasynq { + +template +class stable_prio +{ + public: + P p; + uint64_t order; + + template + stable_prio(uint64_t o, U... u) : p(u...), order(o) + { + } + + // zero-argument constructor should not really be needed, but some + // heap implementations aren't yet perfect. + stable_prio() + { + } +}; + +template +class compare_stable_prio +{ + public: + bool operator()(const stable_prio

&a, const stable_prio

&b) + { + C lt; + if (lt(a.p, b.p)) { + return true; + } + if (lt(b.p, a.p)) { + return false; + } + + return a.order < b.order; + } +}; + + +template