libs/corosio/include/boost/corosio/tcp_server.hpp
79.9% Lines (107/134)
91.7% Functions (33/36)
60.0% Branches (21/35)
libs/corosio/include/boost/corosio/tcp_server.hpp
| Line | Branch | Hits | Source Code |
|---|---|---|---|
| 1 | // | ||
| 2 | // Copyright (c) 2026 Vinnie Falco (vinnie dot falco at gmail dot com) | ||
| 3 | // | ||
| 4 | // Distributed under the Boost Software License, Version 1.0. (See accompanying | ||
| 5 | // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) | ||
| 6 | // | ||
| 7 | // Official repository: https://github.com/cppalliance/corosio | ||
| 8 | // | ||
| 9 | |||
| 10 | #ifndef BOOST_COROSIO_TCP_SERVER_HPP | ||
| 11 | #define BOOST_COROSIO_TCP_SERVER_HPP | ||
| 12 | |||
| 13 | #include <boost/corosio/detail/config.hpp> | ||
| 14 | #include <boost/corosio/detail/except.hpp> | ||
| 15 | #include <boost/corosio/tcp_acceptor.hpp> | ||
| 16 | #include <boost/corosio/tcp_socket.hpp> | ||
| 17 | #include <boost/corosio/io_context.hpp> | ||
| 18 | #include <boost/corosio/endpoint.hpp> | ||
| 19 | #include <boost/capy/task.hpp> | ||
| 20 | #include <boost/capy/concept/execution_context.hpp> | ||
| 21 | #include <boost/capy/concept/io_awaitable.hpp> | ||
| 22 | #include <boost/capy/concept/executor.hpp> | ||
| 23 | #include <boost/capy/ex/any_executor.hpp> | ||
| 24 | #include <boost/capy/ex/run_async.hpp> | ||
| 25 | |||
| 26 | #include <coroutine> | ||
| 27 | #include <memory> | ||
| 28 | #include <ranges> | ||
| 29 | #include <vector> | ||
| 30 | |||
| 31 | namespace boost::corosio { | ||
| 32 | |||
| 33 | #ifdef _MSC_VER | ||
| 34 | #pragma warning(push) | ||
| 35 | #pragma warning(disable: 4251) // class needs to have dll-interface | ||
| 36 | #endif | ||
| 37 | |||
| 38 | /** TCP server with pooled workers. | ||
| 39 | |||
| 40 | This class manages a pool of reusable worker objects that handle | ||
| 41 | incoming connections. When a connection arrives, an idle worker | ||
| 42 | is dispatched to handle it. After the connection completes, the | ||
| 43 | worker returns to the pool for reuse, avoiding allocation overhead | ||
| 44 | per connection. | ||
| 45 | |||
| 46 | Workers are set via @ref set_workers as a forward range of | ||
| 47 | pointer-like objects (e.g., `unique_ptr<worker_base>`). The server | ||
| 48 | takes ownership of the container via type erasure. | ||
| 49 | |||
| 50 | @par Thread Safety | ||
| 51 | Distinct objects: Safe. | ||
| 52 | Shared objects: Unsafe. | ||
| 53 | |||
| 54 | @par Lifecycle | ||
| 55 | The server operates in three states: | ||
| 56 | |||
| 57 | - **Stopped**: Initial state, or after @ref join completes. | ||
| 58 | - **Running**: After @ref start, actively accepting connections. | ||
| 59 | - **Stopping**: After @ref stop, draining active work. | ||
| 60 | |||
| 61 | State transitions: | ||
| 62 | @code | ||
| 63 | [Stopped] --start()--> [Running] --stop()--> [Stopping] --join()--> [Stopped] | ||
| 64 | @endcode | ||
| 65 | |||
| 66 | @par Running the Server | ||
| 67 | @code | ||
| 68 | io_context ioc; | ||
| 69 | tcp_server srv(ioc, ioc.get_executor()); | ||
| 70 | srv.set_workers(make_workers(ioc, 100)); | ||
| 71 | srv.bind(endpoint{address_v4::any(), 8080}); | ||
| 72 | srv.start(); | ||
| 73 | ioc.run(); // Blocks until all work completes | ||
| 74 | @endcode | ||
| 75 | |||
| 76 | @par Graceful Shutdown | ||
| 77 | To shut down gracefully, call @ref stop then drain the io_context: | ||
| 78 | @code | ||
| 79 | // From a signal handler or timer callback: | ||
| 80 | srv.stop(); | ||
| 81 | |||
| 82 | // ioc.run() returns after pending work drains. | ||
| 83 | // Then from the thread that called ioc.run(): | ||
| 84 | srv.join(); // Wait for accept loops to finish | ||
| 85 | @endcode | ||
| 86 | |||
| 87 | @par Restart After Stop | ||
| 88 | The server can be restarted after a complete shutdown cycle. | ||
| 89 | You must drain the io_context and call @ref join before restarting: | ||
| 90 | @code | ||
| 91 | srv.start(); | ||
| 92 | ioc.run_for( 10s ); // Run for a while | ||
| 93 | srv.stop(); // Signal shutdown | ||
| 94 | ioc.run(); // REQUIRED: drain pending completions | ||
| 95 | srv.join(); // REQUIRED: wait for accept loops | ||
| 96 | |||
| 97 | // Now safe to restart | ||
| 98 | srv.start(); | ||
| 99 | ioc.run(); | ||
| 100 | @endcode | ||
| 101 | |||
| 102 | @par WARNING: What NOT to Do | ||
| 103 | - Do NOT call @ref join from inside a worker coroutine (deadlock). | ||
| 104 | - Do NOT call @ref join from a thread running `ioc.run()` (deadlock). | ||
| 105 | - Do NOT call @ref start without completing @ref join after @ref stop. | ||
| 106 | - Do NOT call `ioc.stop()` for graceful shutdown; use @ref stop instead. | ||
| 107 | |||
| 108 | @par Example | ||
| 109 | @code | ||
| 110 | class my_worker : public tcp_server::worker_base | ||
| 111 | { | ||
| 112 | corosio::tcp_socket sock_; | ||
| 113 | capy::any_executor ex_; | ||
| 114 | public: | ||
| 115 | my_worker(io_context& ctx) | ||
| 116 | : sock_(ctx) | ||
| 117 | , ex_(ctx.get_executor()) | ||
| 118 | { | ||
| 119 | } | ||
| 120 | |||
| 121 | corosio::tcp_socket& socket() override { return sock_; } | ||
| 122 | |||
| 123 | void run(launcher launch) override | ||
| 124 | { | ||
| 125 | launch(ex_, [](corosio::tcp_socket* sock) -> capy::task<> | ||
| 126 | { | ||
| 127 | // handle connection using sock | ||
| 128 | co_return; | ||
| 129 | }(&sock_)); | ||
| 130 | } | ||
| 131 | }; | ||
| 132 | |||
| 133 | auto make_workers(io_context& ctx, int n) | ||
| 134 | { | ||
| 135 | std::vector<std::unique_ptr<tcp_server::worker_base>> v; | ||
| 136 | v.reserve(n); | ||
| 137 | for(int i = 0; i < n; ++i) | ||
| 138 | v.push_back(std::make_unique<my_worker>(ctx)); | ||
| 139 | return v; | ||
| 140 | } | ||
| 141 | |||
| 142 | io_context ioc; | ||
| 143 | tcp_server srv(ioc, ioc.get_executor()); | ||
| 144 | srv.set_workers(make_workers(ioc, 100)); | ||
| 145 | @endcode | ||
| 146 | |||
| 147 | @see worker_base, set_workers, launcher | ||
| 148 | */ | ||
| 149 | class BOOST_COROSIO_DECL | ||
| 150 | tcp_server | ||
| 151 | { | ||
| 152 | public: | ||
| 153 | class worker_base; ///< Abstract base for connection handlers. | ||
| 154 | class launcher; ///< Move-only handle to launch worker coroutines. | ||
| 155 | |||
| 156 | private: | ||
| 157 | struct waiter | ||
| 158 | { | ||
| 159 | waiter* next; | ||
| 160 | std::coroutine_handle<> h; | ||
| 161 | worker_base* w; | ||
| 162 | }; | ||
| 163 | |||
| 164 | struct impl; | ||
| 165 | |||
| 166 | static impl* make_impl(capy::execution_context& ctx); | ||
| 167 | |||
| 168 | impl* impl_; | ||
| 169 | capy::any_executor ex_; | ||
| 170 | waiter* waiters_ = nullptr; | ||
| 171 | worker_base* idle_head_ = nullptr; // Forward list: available workers | ||
| 172 | worker_base* active_head_ = nullptr; // Doubly linked: workers handling connections | ||
| 173 | worker_base* active_tail_ = nullptr; // Tail for O(1) push_back | ||
| 174 | std::size_t active_accepts_ = 0; // Number of active do_accept coroutines | ||
| 175 | std::shared_ptr<void> storage_; // Owns the worker container (type-erased) | ||
| 176 | bool running_ = false; | ||
| 177 | |||
| 178 | // Idle list (forward/singly linked) - push front, pop front | ||
| 179 | 37 | void idle_push(worker_base* w) noexcept | |
| 180 | { | ||
| 181 | 37 | w->next_ = idle_head_; | |
| 182 | 37 | idle_head_ = w; | |
| 183 | 37 | } | |
| 184 | |||
| 185 | 9 | worker_base* idle_pop() noexcept | |
| 186 | { | ||
| 187 | 9 | auto* w = idle_head_; | |
| 188 |
1/2✓ Branch 0 taken 9 times.
✗ Branch 1 not taken.
|
9 | if(w) idle_head_ = w->next_; |
| 189 | 9 | return w; | |
| 190 | } | ||
| 191 | |||
| 192 | 9 | bool idle_empty() const noexcept { return idle_head_ == nullptr; } | |
| 193 | |||
| 194 | // Active list (doubly linked) - push back, remove anywhere | ||
| 195 | 3 | void active_push(worker_base* w) noexcept | |
| 196 | { | ||
| 197 | 3 | w->next_ = nullptr; | |
| 198 | 3 | w->prev_ = active_tail_; | |
| 199 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3 times.
|
3 | if(active_tail_) |
| 200 | ✗ | active_tail_->next_ = w; | |
| 201 | else | ||
| 202 | 3 | active_head_ = w; | |
| 203 | 3 | active_tail_ = w; | |
| 204 | 3 | } | |
| 205 | |||
| 206 | 9 | void active_remove(worker_base* w) noexcept | |
| 207 | { | ||
| 208 | // Skip if not in active list (e.g., after failed accept) | ||
| 209 |
3/4✓ Branch 0 taken 6 times.
✓ Branch 1 taken 3 times.
✓ Branch 2 taken 6 times.
✗ Branch 3 not taken.
|
9 | if(w != active_head_ && w->prev_ == nullptr) |
| 210 | 6 | return; | |
| 211 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3 times.
|
3 | if(w->prev_) |
| 212 | ✗ | w->prev_->next_ = w->next_; | |
| 213 | else | ||
| 214 | 3 | active_head_ = w->next_; | |
| 215 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3 times.
|
3 | if(w->next_) |
| 216 | ✗ | w->next_->prev_ = w->prev_; | |
| 217 | else | ||
| 218 | 3 | active_tail_ = w->prev_; | |
| 219 | 3 | w->prev_ = nullptr; // Mark as not in active list | |
| 220 | } | ||
| 221 | |||
| 222 | template<capy::Executor Ex> | ||
| 223 | struct launch_wrapper | ||
| 224 | { | ||
| 225 | struct promise_type | ||
| 226 | { | ||
| 227 | Ex ex; // Stored directly in frame, no allocation | ||
| 228 | std::stop_token st; | ||
| 229 | |||
| 230 | // For regular coroutines: first arg is executor, second is stop token | ||
| 231 | template<class E, class S, class... Args> | ||
| 232 | requires capy::Executor<std::decay_t<E>> | ||
| 233 | promise_type(E e, S s, Args&&...) | ||
| 234 | : ex(std::move(e)) | ||
| 235 | , st(std::move(s)) | ||
| 236 | { | ||
| 237 | } | ||
| 238 | |||
| 239 | // For lambda coroutines: first arg is closure, second is executor, third is stop token | ||
| 240 | template<class Closure, class E, class S, class... Args> | ||
| 241 | requires (!capy::Executor<std::decay_t<Closure>> && | ||
| 242 | capy::Executor<std::decay_t<E>>) | ||
| 243 | 3 | promise_type(Closure&&, E e, S s, Args&&...) | |
| 244 | 3 | : ex(std::move(e)) | |
| 245 | 3 | , st(std::move(s)) | |
| 246 | { | ||
| 247 | 3 | } | |
| 248 | |||
| 249 | 3 | launch_wrapper get_return_object() noexcept { | |
| 250 | 3 | return {std::coroutine_handle<promise_type>::from_promise(*this)}; | |
| 251 | } | ||
| 252 | 3 | std::suspend_always initial_suspend() noexcept { return {}; } | |
| 253 | 3 | std::suspend_never final_suspend() noexcept { return {}; } | |
| 254 | 3 | void return_void() noexcept {} | |
| 255 | ✗ | void unhandled_exception() { std::terminate(); } | |
| 256 | |||
| 257 | // Pass through simple awaitables, inject executor/stop_token for IoAwaitable | ||
| 258 | template<class Awaitable> | ||
| 259 | 6 | auto await_transform(Awaitable&& a) | |
| 260 | { | ||
| 261 | using AwaitableT = std::decay_t<Awaitable>; | ||
| 262 | // Simple awaitable: has await_suspend(coroutine_handle<>) but not IoAwaitable | ||
| 263 | if constexpr ( | ||
| 264 | requires { a.await_suspend(std::declval<std::coroutine_handle<>>()); } && | ||
| 265 | !capy::IoAwaitable<AwaitableT>) | ||
| 266 | { | ||
| 267 | return std::forward<Awaitable>(a); | ||
| 268 | } | ||
| 269 | else | ||
| 270 | { | ||
| 271 | struct adapter | ||
| 272 | { | ||
| 273 | AwaitableT aw; | ||
| 274 | Ex* ex_ptr; | ||
| 275 | std::stop_token* st_ptr; | ||
| 276 | |||
| 277 | bool await_ready() { return aw.await_ready(); } | ||
| 278 | decltype(auto) await_resume() { return aw.await_resume(); } | ||
| 279 | |||
| 280 | auto await_suspend(std::coroutine_handle<promise_type> h) | ||
| 281 | { | ||
| 282 | static_assert(capy::IoAwaitable<AwaitableT>); | ||
| 283 | return aw.await_suspend(h, *ex_ptr, *st_ptr); | ||
| 284 | } | ||
| 285 | }; | ||
| 286 | 9 | return adapter{std::forward<Awaitable>(a), &ex, &st}; | |
| 287 | } | ||
| 288 | 3 | } | |
| 289 | }; | ||
| 290 | |||
| 291 | std::coroutine_handle<promise_type> h; | ||
| 292 | |||
| 293 | 3 | launch_wrapper(std::coroutine_handle<promise_type> handle) noexcept | |
| 294 | 3 | : h(handle) | |
| 295 | { | ||
| 296 | 3 | } | |
| 297 | |||
| 298 | 3 | ~launch_wrapper() | |
| 299 | { | ||
| 300 |
1/2✗ Branch 1 not taken.
✓ Branch 2 taken 3 times.
|
3 | if(h) |
| 301 | ✗ | h.destroy(); | |
| 302 | 3 | } | |
| 303 | |||
| 304 | launch_wrapper(launch_wrapper&& o) noexcept | ||
| 305 | : h(std::exchange(o.h, nullptr)) | ||
| 306 | { | ||
| 307 | } | ||
| 308 | |||
| 309 | launch_wrapper(launch_wrapper const&) = delete; | ||
| 310 | launch_wrapper& operator=(launch_wrapper const&) = delete; | ||
| 311 | launch_wrapper& operator=(launch_wrapper&&) = delete; | ||
| 312 | }; | ||
| 313 | |||
| 314 | // Named functor to avoid incomplete lambda type in coroutine promise | ||
| 315 | template<class Executor> | ||
| 316 | struct launch_coro | ||
| 317 | { | ||
| 318 |
1/1✓ Branch 1 taken 3 times.
|
3 | launch_wrapper<Executor> operator()( |
| 319 | Executor, | ||
| 320 | std::stop_token, | ||
| 321 | tcp_server* self, | ||
| 322 | capy::task<void> t, | ||
| 323 | worker_base* wp) | ||
| 324 | { | ||
| 325 | // Executor and stop token stored in promise via constructor | ||
| 326 | co_await std::move(t); | ||
| 327 | co_await self->push(*wp); | ||
| 328 | 6 | } | |
| 329 | }; | ||
| 330 | |||
| 331 | class push_awaitable | ||
| 332 | { | ||
| 333 | tcp_server& self_; | ||
| 334 | worker_base& w_; | ||
| 335 | |||
| 336 | public: | ||
| 337 | 9 | push_awaitable( | |
| 338 | tcp_server& self, | ||
| 339 | worker_base& w) noexcept | ||
| 340 | 9 | : self_(self) | |
| 341 | 9 | , w_(w) | |
| 342 | { | ||
| 343 | 9 | } | |
| 344 | |||
| 345 | 9 | bool await_ready() const noexcept | |
| 346 | { | ||
| 347 | 9 | return false; | |
| 348 | } | ||
| 349 | |||
| 350 | template<class Ex> | ||
| 351 | std::coroutine_handle<> | ||
| 352 | 9 | await_suspend( | |
| 353 | std::coroutine_handle<> h, | ||
| 354 | Ex const&, std::stop_token) noexcept | ||
| 355 | { | ||
| 356 | // Dispatch to server's executor before touching shared state | ||
| 357 | 9 | self_.ex_.dispatch(h); | |
| 358 | 9 | return std::noop_coroutine(); | |
| 359 | } | ||
| 360 | |||
| 361 | 9 | void await_resume() noexcept | |
| 362 | { | ||
| 363 | // Running on server executor - safe to modify lists | ||
| 364 | // Remove from active (if present), then wake waiter or add to idle | ||
| 365 | 9 | self_.active_remove(&w_); | |
| 366 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 9 times.
|
9 | if(self_.waiters_) |
| 367 | { | ||
| 368 | ✗ | auto* wait = self_.waiters_; | |
| 369 | ✗ | self_.waiters_ = wait->next; | |
| 370 | ✗ | wait->w = &w_; | |
| 371 | ✗ | self_.ex_.post(wait->h); | |
| 372 | } | ||
| 373 | else | ||
| 374 | { | ||
| 375 | 9 | self_.idle_push(&w_); | |
| 376 | } | ||
| 377 | 9 | } | |
| 378 | }; | ||
| 379 | |||
| 380 | class pop_awaitable | ||
| 381 | { | ||
| 382 | tcp_server& self_; | ||
| 383 | waiter wait_; | ||
| 384 | |||
| 385 | public: | ||
| 386 | 9 | pop_awaitable(tcp_server& self) noexcept | |
| 387 | 9 | : self_(self) | |
| 388 | 9 | , wait_{} | |
| 389 | { | ||
| 390 | 9 | } | |
| 391 | |||
| 392 | 9 | bool await_ready() const noexcept | |
| 393 | { | ||
| 394 | 9 | return !self_.idle_empty(); | |
| 395 | } | ||
| 396 | |||
| 397 | template<class Ex> | ||
| 398 | bool | ||
| 399 | ✗ | await_suspend( | |
| 400 | std::coroutine_handle<> h, | ||
| 401 | Ex const&, std::stop_token) noexcept | ||
| 402 | { | ||
| 403 | // Running on server executor (do_accept runs there) | ||
| 404 | ✗ | wait_.h = h; | |
| 405 | ✗ | wait_.w = nullptr; | |
| 406 | ✗ | wait_.next = self_.waiters_; | |
| 407 | ✗ | self_.waiters_ = &wait_; | |
| 408 | ✗ | return true; | |
| 409 | } | ||
| 410 | |||
| 411 | 9 | worker_base& await_resume() noexcept | |
| 412 | { | ||
| 413 | // Running on server executor | ||
| 414 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 9 times.
|
9 | if(wait_.w) |
| 415 | ✗ | return *wait_.w; // Woken by push_awaitable | |
| 416 | 9 | return *self_.idle_pop(); | |
| 417 | } | ||
| 418 | }; | ||
| 419 | |||
| 420 | 9 | push_awaitable push(worker_base& w) | |
| 421 | { | ||
| 422 | 9 | return push_awaitable{*this, w}; | |
| 423 | } | ||
| 424 | |||
| 425 | // Synchronous version for destructor/guard paths | ||
| 426 | // Must be called from server executor context | ||
| 427 | ✗ | void push_sync(worker_base& w) noexcept | |
| 428 | { | ||
| 429 | ✗ | active_remove(&w); | |
| 430 | ✗ | if(waiters_) | |
| 431 | { | ||
| 432 | ✗ | auto* wait = waiters_; | |
| 433 | ✗ | waiters_ = wait->next; | |
| 434 | ✗ | wait->w = &w; | |
| 435 | ✗ | ex_.post(wait->h); | |
| 436 | } | ||
| 437 | else | ||
| 438 | { | ||
| 439 | ✗ | idle_push(&w); | |
| 440 | } | ||
| 441 | ✗ | } | |
| 442 | |||
| 443 | 9 | pop_awaitable pop() | |
| 444 | { | ||
| 445 | 9 | return pop_awaitable{*this}; | |
| 446 | } | ||
| 447 | |||
| 448 | capy::task<void> do_accept(tcp_acceptor& acc); | ||
| 449 | |||
| 450 | public: | ||
| 451 | /** Abstract base class for connection handlers. | ||
| 452 | |||
| 453 | Derive from this class to implement custom connection handling. | ||
| 454 | Each worker owns a socket and is reused across multiple | ||
| 455 | connections to avoid per-connection allocation. | ||
| 456 | |||
| 457 | @see tcp_server, launcher | ||
| 458 | */ | ||
| 459 | class BOOST_COROSIO_DECL | ||
| 460 | worker_base | ||
| 461 | { | ||
| 462 | // Ordered largest to smallest for optimal packing | ||
| 463 | std::stop_source stop_; // ~16 bytes | ||
| 464 | worker_base* next_ = nullptr; // 8 bytes - used by idle and active lists | ||
| 465 | worker_base* prev_ = nullptr; // 8 bytes - used only by active list | ||
| 466 | |||
| 467 | friend class tcp_server; | ||
| 468 | |||
| 469 | public: | ||
| 470 | /// Destroy the worker. | ||
| 471 | 28 | virtual ~worker_base() = default; | |
| 472 | |||
| 473 | /** Handle an accepted connection. | ||
| 474 | |||
| 475 | Called when this worker is dispatched to handle a new | ||
| 476 | connection. The implementation must invoke the launcher | ||
| 477 | exactly once to start the handling coroutine. | ||
| 478 | |||
| 479 | @param launch Handle to launch the connection coroutine. | ||
| 480 | */ | ||
| 481 | virtual void run(launcher launch) = 0; | ||
| 482 | |||
| 483 | /// Return the socket used for connections. | ||
| 484 | virtual corosio::tcp_socket& socket() = 0; | ||
| 485 | }; | ||
| 486 | |||
| 487 | /** Move-only handle to launch a worker coroutine. | ||
| 488 | |||
| 489 | Passed to @ref worker_base::run to start the connection-handling | ||
| 490 | coroutine. The launcher ensures the worker returns to the idle | ||
| 491 | pool when the coroutine completes or if launching fails. | ||
| 492 | |||
| 493 | The launcher must be invoked exactly once via `operator()`. | ||
| 494 | If destroyed without invoking, the worker is returned to the | ||
| 495 | idle pool automatically. | ||
| 496 | |||
| 497 | @see worker_base::run | ||
| 498 | */ | ||
| 499 | class BOOST_COROSIO_DECL | ||
| 500 | launcher | ||
| 501 | { | ||
| 502 | tcp_server* srv_; | ||
| 503 | worker_base* w_; | ||
| 504 | |||
| 505 | friend class tcp_server; | ||
| 506 | |||
| 507 | 3 | launcher(tcp_server& srv, worker_base& w) noexcept | |
| 508 | 3 | : srv_(&srv) | |
| 509 | 3 | , w_(&w) | |
| 510 | { | ||
| 511 | 3 | } | |
| 512 | |||
| 513 | public: | ||
| 514 | /// Return the worker to the pool if not launched. | ||
| 515 | 3 | ~launcher() | |
| 516 | { | ||
| 517 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3 times.
|
3 | if(w_) |
| 518 | ✗ | srv_->push_sync(*w_); | |
| 519 | 3 | } | |
| 520 | |||
| 521 | launcher(launcher&& o) noexcept | ||
| 522 | : srv_(o.srv_) | ||
| 523 | , w_(std::exchange(o.w_, nullptr)) | ||
| 524 | { | ||
| 525 | } | ||
| 526 | launcher(launcher const&) = delete; | ||
| 527 | launcher& operator=(launcher const&) = delete; | ||
| 528 | launcher& operator=(launcher&&) = delete; | ||
| 529 | |||
| 530 | /** Launch the connection-handling coroutine. | ||
| 531 | |||
| 532 | Starts the given coroutine on the specified executor. When | ||
| 533 | the coroutine completes, the worker is automatically returned | ||
| 534 | to the idle pool. | ||
| 535 | |||
| 536 | @param ex The executor to run the coroutine on. | ||
| 537 | @param task The coroutine to execute. | ||
| 538 | |||
| 539 | @throws std::logic_error If this launcher was already invoked. | ||
| 540 | */ | ||
| 541 | template<class Executor> | ||
| 542 | 3 | void operator()(Executor const& ex, capy::task<void> task) | |
| 543 | { | ||
| 544 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3 times.
|
3 | if(! w_) |
| 545 | ✗ | detail::throw_logic_error(); // launcher already invoked | |
| 546 | |||
| 547 | 3 | auto* w = std::exchange(w_, nullptr); | |
| 548 | |||
| 549 | // Worker is being dispatched - add to active list | ||
| 550 | 3 | srv_->active_push(w); | |
| 551 | |||
| 552 | // Return worker to pool if coroutine setup throws | ||
| 553 | struct guard_t { | ||
| 554 | tcp_server* srv; | ||
| 555 | worker_base* w; | ||
| 556 |
1/2✗ Branch 0 not taken.
✓ Branch 1 taken 3 times.
|
3 | ~guard_t() { if(w) srv->push_sync(*w); } |
| 557 | 3 | } guard{srv_, w}; | |
| 558 | |||
| 559 | // Reset worker's stop source for this connection | ||
| 560 |
1/1✓ Branch 1 taken 3 times.
|
3 | w->stop_ = {}; |
| 561 | 3 | auto st = w->stop_.get_token(); | |
| 562 | |||
| 563 |
1/1✓ Branch 3 taken 3 times.
|
3 | auto wrapper = launch_coro<Executor>{}( |
| 564 | 3 | ex, st, srv_, std::move(task), w); | |
| 565 | |||
| 566 | // Executor and stop token stored in promise via constructor | ||
| 567 |
1/1✓ Branch 3 taken 3 times.
|
3 | ex.post(std::exchange(wrapper.h, nullptr)); // Release before post |
| 568 | 3 | guard.w = nullptr; // Success - dismiss guard | |
| 569 | 3 | } | |
| 570 | }; | ||
| 571 | |||
| 572 | /** Construct a TCP server. | ||
| 573 | |||
| 574 | @tparam Ctx Execution context type satisfying ExecutionContext. | ||
| 575 | @tparam Ex Executor type satisfying Executor. | ||
| 576 | |||
| 577 | @param ctx The execution context for socket operations. | ||
| 578 | @param ex The executor for dispatching coroutines. | ||
| 579 | |||
| 580 | @par Example | ||
| 581 | @code | ||
| 582 | tcp_server srv(ctx, ctx.get_executor()); | ||
| 583 | srv.set_workers(make_workers(ctx, 100)); | ||
| 584 | srv.bind(endpoint{...}); | ||
| 585 | srv.start(); | ||
| 586 | @endcode | ||
| 587 | */ | ||
| 588 | template< | ||
| 589 | capy::ExecutionContext Ctx, | ||
| 590 | capy::Executor Ex> | ||
| 591 | 7 | tcp_server(Ctx& ctx, Ex ex) | |
| 592 | 7 | : impl_(make_impl(ctx)) | |
| 593 | 7 | , ex_(std::move(ex)) | |
| 594 | { | ||
| 595 | 7 | } | |
| 596 | |||
| 597 | public: | ||
| 598 | ~tcp_server(); | ||
| 599 | tcp_server(tcp_server const&) = delete; | ||
| 600 | tcp_server& operator=(tcp_server const&) = delete; | ||
| 601 | tcp_server(tcp_server&& o) noexcept; | ||
| 602 | tcp_server& operator=(tcp_server&& o) noexcept; | ||
| 603 | |||
| 604 | /** Bind to a local endpoint. | ||
| 605 | |||
| 606 | Creates an acceptor listening on the specified endpoint. | ||
| 607 | Multiple endpoints can be bound by calling this method | ||
| 608 | multiple times before @ref start. | ||
| 609 | |||
| 610 | @param ep The local endpoint to bind to. | ||
| 611 | |||
| 612 | @return The error code if binding fails. | ||
| 613 | */ | ||
| 614 | std::error_code | ||
| 615 | bind(endpoint ep); | ||
| 616 | |||
| 617 | /** Set the worker pool. | ||
| 618 | |||
| 619 | Replaces any existing workers with the given range. Any | ||
| 620 | previous workers are released and the idle/active lists | ||
| 621 | are cleared before populating with new workers. | ||
| 622 | |||
| 623 | @tparam Range Forward range of pointer-like objects to worker_base. | ||
| 624 | |||
| 625 | @param workers Range of workers to manage. Each element must | ||
| 626 | support `std::to_address()` yielding `worker_base*`. | ||
| 627 | |||
| 628 | @par Example | ||
| 629 | @code | ||
| 630 | std::vector<std::unique_ptr<my_worker>> workers; | ||
| 631 | for(int i = 0; i < 100; ++i) | ||
| 632 | workers.push_back(std::make_unique<my_worker>(ctx)); | ||
| 633 | srv.set_workers(std::move(workers)); | ||
| 634 | @endcode | ||
| 635 | */ | ||
| 636 | template<std::ranges::forward_range Range> | ||
| 637 | requires std::convertible_to< | ||
| 638 | decltype(std::to_address( | ||
| 639 | std::declval<std::ranges::range_value_t<Range>&>())), | ||
| 640 | worker_base*> | ||
| 641 | void | ||
| 642 | 7 | set_workers(Range&& workers) | |
| 643 | { | ||
| 644 | // Clear existing state | ||
| 645 | 7 | storage_.reset(); | |
| 646 | 7 | idle_head_ = nullptr; | |
| 647 | 7 | active_head_ = nullptr; | |
| 648 | 7 | active_tail_ = nullptr; | |
| 649 | |||
| 650 | // Take ownership and populate idle list | ||
| 651 | using StorageType = std::decay_t<Range>; | ||
| 652 | 7 | auto* p = new StorageType(std::forward<Range>(workers)); | |
| 653 |
1/1✓ Branch 1 taken 7 times.
|
7 | storage_ = std::shared_ptr<void>(p, [](void* ptr) { |
| 654 |
1/2✓ Branch 0 taken 7 times.
✗ Branch 1 not taken.
|
7 | delete static_cast<StorageType*>(ptr); |
| 655 | }); | ||
| 656 |
2/2✓ Branch 5 taken 28 times.
✓ Branch 6 taken 7 times.
|
35 | for(auto&& elem : *static_cast<StorageType*>(p)) |
| 657 | 28 | idle_push(std::to_address(elem)); | |
| 658 | 7 | } | |
| 659 | |||
| 660 | /** Start accepting connections. | ||
| 661 | |||
| 662 | Launches accept loops for all bound endpoints. Incoming | ||
| 663 | connections are dispatched to idle workers from the pool. | ||
| 664 | |||
| 665 | Calling `start()` on an already-running server has no effect. | ||
| 666 | |||
| 667 | @par Preconditions | ||
| 668 | - At least one endpoint bound via @ref bind. | ||
| 669 | - Workers provided to the constructor. | ||
| 670 | - If restarting, @ref join must have completed first. | ||
| 671 | |||
| 672 | @par Effects | ||
| 673 | Creates one accept coroutine per bound endpoint. Each coroutine | ||
| 674 | runs on the server's executor, waiting for connections and | ||
| 675 | dispatching them to idle workers. | ||
| 676 | |||
| 677 | @par Restart Sequence | ||
| 678 | To restart after stopping, complete the full shutdown cycle: | ||
| 679 | @code | ||
| 680 | srv.start(); | ||
| 681 | ioc.run_for( 1s ); | ||
| 682 | srv.stop(); // 1. Signal shutdown | ||
| 683 | ioc.run(); // 2. Drain remaining completions | ||
| 684 | srv.join(); // 3. Wait for accept loops | ||
| 685 | |||
| 686 | // Now safe to restart | ||
| 687 | srv.start(); | ||
| 688 | ioc.run(); | ||
| 689 | @endcode | ||
| 690 | |||
| 691 | @par Thread Safety | ||
| 692 | Not thread safe. | ||
| 693 | |||
| 694 | @throws std::logic_error If a previous session has not been | ||
| 695 | joined (accept loops still active). | ||
| 696 | */ | ||
| 697 | void start(); | ||
| 698 | |||
| 699 | /** Stop accepting connections. | ||
| 700 | |||
| 701 | Signals all listening ports to stop accepting new connections | ||
| 702 | and requests cancellation of active workers via their stop tokens. | ||
| 703 | |||
| 704 | This function returns immediately; it does not wait for workers | ||
| 705 | to finish. Pending I/O operations complete asynchronously. | ||
| 706 | |||
| 707 | Calling `stop()` on a non-running server has no effect. | ||
| 708 | |||
| 709 | @par Effects | ||
| 710 | - Closes all acceptors (pending accepts complete with error). | ||
| 711 | - Requests stop on each active worker's stop token. | ||
| 712 | - Workers observing their stop token should exit promptly. | ||
| 713 | |||
| 714 | @par Postconditions | ||
| 715 | No new connections will be accepted. Active workers continue | ||
| 716 | until they observe their stop token or complete naturally. | ||
| 717 | |||
| 718 | @par What Happens Next | ||
| 719 | After calling `stop()`: | ||
| 720 | 1. Let `ioc.run()` return (drains pending completions). | ||
| 721 | 2. Call @ref join to wait for accept loops to finish. | ||
| 722 | 3. Only then is it safe to restart or destroy the server. | ||
| 723 | |||
| 724 | @par Thread Safety | ||
| 725 | Not thread safe. | ||
| 726 | |||
| 727 | @see join, start | ||
| 728 | */ | ||
| 729 | void stop(); | ||
| 730 | |||
| 731 | /** Block until all accept loops complete. | ||
| 732 | |||
| 733 | Blocks the calling thread until all accept coroutines launched | ||
| 734 | by @ref start have finished executing. This synchronizes the | ||
| 735 | shutdown sequence, ensuring the server is fully stopped before | ||
| 736 | restarting or destroying it. | ||
| 737 | |||
| 738 | @par Preconditions | ||
| 739 | @ref stop has been called and `ioc.run()` has returned. | ||
| 740 | |||
| 741 | @par Postconditions | ||
| 742 | All accept loops have completed. The server is in the stopped | ||
| 743 | state and may be restarted via @ref start. | ||
| 744 | |||
| 745 | @par Example (Correct Usage) | ||
| 746 | @code | ||
| 747 | // main thread | ||
| 748 | srv.start(); | ||
| 749 | ioc.run(); // Blocks until work completes | ||
| 750 | srv.join(); // Safe: called after ioc.run() returns | ||
| 751 | @endcode | ||
| 752 | |||
| 753 | @par WARNING: Deadlock Scenarios | ||
| 754 | Calling `join()` from the wrong context causes deadlock: | ||
| 755 | |||
| 756 | @code | ||
| 757 | // WRONG: calling join() from inside a worker coroutine | ||
| 758 | void run( launcher launch ) override | ||
| 759 | { | ||
| 760 | launch( ex, [this]() -> capy::task<> | ||
| 761 | { | ||
| 762 | srv_.join(); // DEADLOCK: blocks the executor | ||
| 763 | co_return; | ||
| 764 | }()); | ||
| 765 | } | ||
| 766 | |||
| 767 | // WRONG: calling join() while ioc.run() is still active | ||
| 768 | std::thread t( [&]{ ioc.run(); } ); | ||
| 769 | srv.stop(); | ||
| 770 | srv.join(); // DEADLOCK: ioc.run() still running in thread t | ||
| 771 | @endcode | ||
| 772 | |||
| 773 | @par Thread Safety | ||
| 774 | May be called from any thread, but will deadlock if called | ||
| 775 | from within the io_context event loop or from a worker coroutine. | ||
| 776 | |||
| 777 | @see stop, start | ||
| 778 | */ | ||
| 779 | void join(); | ||
| 780 | |||
| 781 | private: | ||
| 782 | capy::task<> do_stop(); | ||
| 783 | }; | ||
| 784 | |||
| 785 | #ifdef _MSC_VER | ||
| 786 | #pragma warning(pop) | ||
| 787 | #endif | ||
| 788 | |||
| 789 | } // namespace boost::corosio | ||
| 790 | |||
| 791 | #endif | ||
| 792 |