From 37ebb8662f25636754b552c533914057f440d0d3 Mon Sep 17 00:00:00 2001 From: Vinnie Falco Date: Sun, 11 Jan 2026 11:02:12 -0800 Subject: [PATCH 1/2] Add execution_context, executor utilities, intrusive containers, and async support --- CLAUDE.md | 1 + doc/modules/ROOT/nav.adoc | 9 + doc/modules/ROOT/pages/advanced.adoc | 383 +++++ .../ROOT/pages/concepts/affine_awaitable.adoc | 107 ++ .../ROOT/pages/concepts/dispatcher.adoc | 94 ++ doc/modules/ROOT/pages/concepts/executor.adoc | 153 ++ .../ROOT/pages/concepts/frame_allocator.adoc | 128 ++ .../pages/concepts/is_execution_context.adoc | 123 ++ .../pages/concepts/stoppable_awaitable.adoc | 135 ++ doc/modules/ROOT/pages/coroutines.adoc | 390 ++--- doc/modules/ROOT/pages/execution.adoc | 314 ++++ include/boost/capy.hpp | 18 +- include/boost/capy/affine.hpp | 262 +++- include/boost/capy/async_op.hpp | 12 +- include/boost/capy/async_run.hpp | 489 ++++++ include/boost/capy/config.hpp | 15 + include/boost/capy/coro.hpp | 32 + include/boost/capy/detail/config.hpp | 30 +- .../capy/detail/recycling_frame_allocator.hpp | 153 ++ include/boost/capy/execution_context.hpp | 645 ++++++++ include/boost/capy/executor.hpp | 789 +--------- include/boost/capy/executor_work_guard.hpp | 253 +++ include/boost/capy/frame_allocator.hpp | 421 +++++ include/boost/capy/intrusive_list.hpp | 196 +++ include/boost/capy/intrusive_queue.hpp | 170 ++ include/boost/capy/make_affine.hpp | 24 + include/boost/capy/path.hpp | 1389 +++++++++++++++++ include/boost/capy/run_on.hpp | 139 ++ include/boost/capy/task.hpp | 854 ++-------- include/boost/capy/thread_local_ptr.hpp | 200 +++ include/boost/capy/thread_pool.hpp | 12 - src/execution_context.cpp | 144 ++ src/executor.cpp | 9 - src/thread_pool.cpp | 32 +- test/unit/affine.cpp | 25 - test/unit/async_op.cpp | 460 ------ test/unit/execution_context.cpp | 387 +++++ test/unit/executor.cpp | 803 ++-------- test/unit/executor_work_guard.cpp | 222 +++ test/unit/intrusive_list.cpp | 308 ++++ test/unit/intrusive_queue.cpp | 228 +++ test/unit/task.cpp | 1210 ++++++-------- test/unit/thread_local_ptr.cpp | 158 ++ test/unit/thread_pool.cpp | 198 --- 44 files changed, 8317 insertions(+), 3807 deletions(-) create mode 100644 doc/modules/ROOT/pages/advanced.adoc create mode 100644 doc/modules/ROOT/pages/concepts/affine_awaitable.adoc create mode 100644 doc/modules/ROOT/pages/concepts/dispatcher.adoc create mode 100644 doc/modules/ROOT/pages/concepts/executor.adoc create mode 100644 doc/modules/ROOT/pages/concepts/frame_allocator.adoc create mode 100644 doc/modules/ROOT/pages/concepts/is_execution_context.adoc create mode 100644 doc/modules/ROOT/pages/concepts/stoppable_awaitable.adoc create mode 100644 doc/modules/ROOT/pages/execution.adoc create mode 100644 include/boost/capy/async_run.hpp create mode 100644 include/boost/capy/config.hpp create mode 100644 include/boost/capy/coro.hpp create mode 100644 include/boost/capy/detail/recycling_frame_allocator.hpp create mode 100644 include/boost/capy/execution_context.hpp create mode 100644 include/boost/capy/executor_work_guard.hpp create mode 100644 include/boost/capy/frame_allocator.hpp create mode 100644 include/boost/capy/intrusive_list.hpp create mode 100644 include/boost/capy/intrusive_queue.hpp create mode 100644 include/boost/capy/make_affine.hpp create mode 100644 include/boost/capy/path.hpp create mode 100644 include/boost/capy/run_on.hpp create mode 100644 include/boost/capy/thread_local_ptr.hpp create mode 100644 src/execution_context.cpp delete mode 100644 src/executor.cpp create mode 100644 test/unit/execution_context.cpp create mode 100644 test/unit/executor_work_guard.cpp create mode 100644 test/unit/intrusive_list.cpp create mode 100644 test/unit/intrusive_queue.cpp create mode 100644 test/unit/thread_local_ptr.cpp diff --git a/CLAUDE.md b/CLAUDE.md index e69de29b..cac8c23f 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -0,0 +1 @@ +Look in context/ for information diff --git a/doc/modules/ROOT/nav.adoc b/doc/modules/ROOT/nav.adoc index e37acb55..c851c760 100644 --- a/doc/modules/ROOT/nav.adoc +++ b/doc/modules/ROOT/nav.adoc @@ -1,2 +1,11 @@ * xref:coroutines.adoc[Coroutines] +* xref:execution.adoc[Execution Model] +* xref:advanced.adoc[Advanced Topics] +* Concepts +** xref:concepts/dispatcher.adoc[dispatcher] +** xref:concepts/affine_awaitable.adoc[affine_awaitable] +** xref:concepts/stoppable_awaitable.adoc[stoppable_awaitable] +** xref:concepts/executor.adoc[executor] +** xref:concepts/frame_allocator.adoc[frame_allocator] +** xref:concepts/is_execution_context.adoc[is_execution_context] * xref:reference:boost/capy.adoc[Reference] diff --git a/doc/modules/ROOT/pages/advanced.adoc b/doc/modules/ROOT/pages/advanced.adoc new file mode 100644 index 00000000..7cce0f87 --- /dev/null +++ b/doc/modules/ROOT/pages/advanced.adoc @@ -0,0 +1,383 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie.falco@gmail.com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + += Advanced Topics + +This page covers customization points for power users: frame allocation, +cancellation with stop tokens, and implementing custom affine awaitables. + +== Frame Allocation + +Every coroutine requires memory for its *frame*—the compiler-generated +structure holding local variables, parameters, and suspension state. +By default, frames are allocated with `::operator new`. For +high-frequency coroutine creation, custom allocators can significantly +reduce allocation overhead. + +=== The frame_allocator Concept + +A type satisfying xref:concepts/frame_allocator.adoc[`frame_allocator`] +provides: + +[source,cpp] +---- +void* allocate(std::size_t n); +void deallocate(void* p, std::size_t n); +---- + +Frame allocators must be cheaply copyable handles to an underlying +memory resource. + +=== Custom Allocators with async_run + +Pass a custom allocator as the second argument to `async_run`: + +[source,cpp] +---- +#include + +my_pool_allocator alloc{pool}; + +async_run(ex, alloc)(my_task()); +---- + +The allocator is used for all coroutine frames in the launched call +tree. The library embeds the allocator in the first frame, making it +available for child coroutines. + +=== The Recycling Frame Allocator + +By default, `async_run` uses a *recycling frame allocator* that caches +deallocated frames for reuse. This eliminates most allocation overhead +for typical coroutine patterns where frames are created and destroyed +in LIFO order. + +The recycling allocator: + +* Maintains a thread-local free list +* Reuses frames of matching size +* Falls back to global new/delete for mismatched sizes + +For custom allocation strategies, implement the `frame_allocator` +concept and pass your allocator to `async_run`. + +=== Memory Layout + +Coroutine frames have this layout: + +---- +First frame: [coroutine frame | tagged_ptr | allocator_wrapper] +Child frames: [coroutine frame | ptr] +---- + +The pointer at the end of each frame enables correct deallocation +regardless of which allocator was active at allocation time. A tag +bit distinguishes the first frame (with embedded wrapper) from child +frames (with pointer to wrapper). + +== Stop Token Propagation + +Capy coroutines support cooperative cancellation through `std::stop_token`. +When a task is launched with stop support, the token propagates through +the entire call chain. + +=== Receiving Stop Tokens + +Awaitables that support cancellation implement the +xref:concepts/stoppable_awaitable.adoc[`stoppable_awaitable`] concept. +Their `await_suspend` receives both a dispatcher and a stop token: + +[source,cpp] +---- +template +auto await_suspend( + std::coroutine_handle<> h, + Dispatcher const& d, + std::stop_token token) +{ + if (token.stop_requested()) + { + // Handle cancellation + return d(h); // Resume immediately + } + + // Start async operation with cancellation support + start_async([h, &d, token] { + if (token.stop_requested()) + { + // Cancelled during operation + } + d(h); + }); + return std::noop_coroutine(); +} +---- + +=== Stop Token in Tasks + +The `task` type automatically propagates stop tokens through its +`await_transform`. When you `co_await` a stoppable awaitable inside +a task, the task's stop token is forwarded: + +[source,cpp] +---- +task cancellable_work() +{ + // If this task has a stop token, it's automatically + // passed to any stoppable awaitables we co_await + co_await some_stoppable_operation(); +} +---- + +== The Affine Awaitable Protocol + +The *affine awaitable protocol* enables zero-overhead dispatcher +propagation. Awaitables that implement this protocol receive the +caller's dispatcher in `await_suspend`, allowing them to resume +through the correct execution context. + +=== The dispatcher Concept + +A xref:concepts/dispatcher.adoc[`dispatcher`] is a callable that +schedules coroutine resumption: + +[source,cpp] +---- +template +concept dispatcher = requires(D const& d, std::coroutine_handle

h) { + { d(h) } -> std::convertible_to; +}; +---- + +Calling `d(h)` schedules `h` for resumption and returns a handle +suitable for symmetric transfer. The dispatcher may resume inline +(returning `h`) or queue the work (returning `std::noop_coroutine()`). + +=== The affine_awaitable Concept + +An xref:concepts/affine_awaitable.adoc[`affine_awaitable`] provides +an extended `await_suspend` that receives the dispatcher: + +[source,cpp] +---- +template +concept affine_awaitable = + dispatcher && + requires(A a, std::coroutine_handle

h, D const& d) { + a.await_suspend(h, d); + }; +---- + +=== Implementing a Custom Affine Awaitable + +Here's a complete example of an affine awaitable that wraps an +asynchronous timer: + +[source,cpp] +---- +struct async_timer +{ + std::chrono::milliseconds duration_; + + bool await_ready() const noexcept + { + return duration_.count() <= 0; + } + + template + auto await_suspend( + std::coroutine_handle<> h, + Dispatcher const& d) + { + // Start timer, resume through dispatcher when done + start_timer(duration_, [h, &d] { + d(h); // Resume via dispatcher + }); + return std::noop_coroutine(); + } + + void await_resume() const noexcept + { + // Timer completed, nothing to return + } +}; + +// Usage in a task: +task delayed_work() +{ + co_await async_timer{std::chrono::seconds{1}}; + // Resumes on the task's executor after 1 second +} +---- + +=== Type-Erased Dispatchers + +The `any_dispatcher` class provides type erasure for dispatchers, +enabling runtime polymorphism without virtual functions: + +[source,cpp] +---- +#include + +using boost::capy::any_dispatcher; + +void store_dispatcher(any_dispatcher d) +{ + // Can store any dispatcher type uniformly + d(some_handle); // Invoke through type-erased interface +} +---- + +`task` uses `any_dispatcher` internally to store the inherited +dispatcher, enabling tasks to work with any executor type. + +== The Stoppable Awaitable Protocol + +The xref:concepts/stoppable_awaitable.adoc[`stoppable_awaitable`] +protocol extends `affine_awaitable` with stop token support: + +[source,cpp] +---- +template +concept stoppable_awaitable = + affine_awaitable && + requires(A a, std::coroutine_handle

h, D const& d, std::stop_token token) { + a.await_suspend(h, d, token); + }; +---- + +A stoppable awaitable provides _both_ overloads of `await_suspend`. +The task's `await_transform` automatically selects the appropriate +overload based on whether a stop token is available. + +=== Example: Stoppable Timer + +[source,cpp] +---- +struct stoppable_timer +{ + std::chrono::milliseconds duration_; + + bool await_ready() const noexcept + { + return duration_.count() <= 0; + } + + // Affine path (no cancellation) + template + auto await_suspend( + std::coroutine_handle<> h, + Dispatcher const& d) + { + start_timer(duration_, [h, &d] { d(h); }); + return std::noop_coroutine(); + } + + // Stoppable path (with cancellation) + template + auto await_suspend( + std::coroutine_handle<> h, + Dispatcher const& d, + std::stop_token token) + { + if (token.stop_requested()) + { + cancelled_ = true; + return d(h); // Resume immediately + } + + auto timer = start_timer(duration_, [h, &d] { d(h); }); + + // Register stop callback + stop_callback_ = std::stop_callback(token, [timer] { + timer.cancel(); + }); + + return std::noop_coroutine(); + } + + void await_resume() const + { + if (cancelled_) + throw operation_cancelled{}; + } + +private: + bool cancelled_ = false; + std::optional> stop_callback_; +}; +---- + +== Legacy Awaitable Compatibility + +Not all awaitables implement the affine protocol. For standard library +awaitables or third-party types, capy provides automatic compatibility +through a *trampoline coroutine*. + +When `await_transform` encounters an awaitable that doesn't satisfy +`affine_awaitable`, it wraps it in `make_affine`: + +[source,cpp] +---- +// Inside task's await_transform: +template +auto await_transform(Awaitable&& a) +{ + using A = std::decay_t; + if constexpr (affine_awaitable) + { + // Zero-overhead path + return transform_awaiter{std::forward(a), this}; + } + else + { + // Trampoline fallback + return make_affine(std::forward(a), ex_); + } +} +---- + +The trampoline: + +1. Awaits the legacy awaitable normally +2. After completion, dispatches through the executor +3. Resumes the caller on the correct context + +This adds one extra coroutine frame but ensures correct affinity +for any awaitable type. Prefer implementing the affine protocol +for performance-critical awaitables. + +== Summary + +[cols="1,2"] +|=== +| Feature | Use Case + +| Custom frame allocator +| High-frequency coroutine creation, memory pools + +| Stop token propagation +| Cooperative cancellation of async operations + +| Affine awaitable protocol +| Zero-overhead dispatcher propagation + +| Stoppable awaitable protocol +| Cancellable async operations + +| Legacy compatibility +| Using non-affine awaitables in tasks +|=== + +== See Also + +* xref:concepts/dispatcher.adoc[dispatcher concept] +* xref:concepts/affine_awaitable.adoc[affine_awaitable concept] +* xref:concepts/stoppable_awaitable.adoc[stoppable_awaitable concept] +* xref:concepts/frame_allocator.adoc[frame_allocator concept] diff --git a/doc/modules/ROOT/pages/concepts/affine_awaitable.adoc b/doc/modules/ROOT/pages/concepts/affine_awaitable.adoc new file mode 100644 index 00000000..5de62537 --- /dev/null +++ b/doc/modules/ROOT/pages/concepts/affine_awaitable.adoc @@ -0,0 +1,107 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie.falco@gmail.com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + += affine_awaitable + +An awaitable is affine if it participates in the affine awaitable +protocol by accepting a dispatcher in its `await_suspend` method. + +== Synopsis + +Defined in header `` + +[source,cpp] +---- +namespace boost::capy { + +template +concept affine_awaitable = + dispatcher && + requires(A a, std::coroutine_handle

h, D const& d) { + a.await_suspend(h, d); + }; + +} // namespace boost::capy +---- + +== Description + +The affine awaitable protocol enables zero-overhead scheduler affinity +without requiring the full sender/receiver protocol. When an awaitable +is affine, it receives the caller's dispatcher in `await_suspend` and +uses it to resume the caller on the correct execution context. + +The awaitable must use the dispatcher `d` to resume the caller when +the operation completes. Typically this looks like `return d(h);` for +symmetric transfer or calling `d(h)` before returning +`std::noop_coroutine()`. + +== Valid Expressions + +Given: + +* `a` — a value of type `A` +* `h` — a value of type `std::coroutine_handle

` +* `d` — a const value of type `D` satisfying `dispatcher` + +[cols="2,1,3"] +|=== +| Expression | Return Type | Description + +| `a.await_ready()` +| `bool` +| Returns `true` if the operation has already completed + +| `a.await_suspend(h, d)` +| (unspecified) +| Suspends and starts the async operation, using `d` for resumption + +| `a.await_resume()` +| (unspecified) +| Returns the operation result or rethrows any exception +|=== + +== Example + +[source,cpp] +---- +struct my_async_op +{ + bool await_ready() const noexcept + { + return false; + } + + template + auto await_suspend(coro h, Dispatcher const& d) + { + start_async([h, &d] { + // Operation completed, resume through dispatcher + d(h); + }); + return std::noop_coroutine(); + } + + int await_resume() + { + return result_; + } + +private: + int result_ = 42; +}; + +static_assert(affine_awaitable); +---- + +== See Also + +* xref:concepts/dispatcher.adoc[dispatcher] +* xref:concepts/stoppable_awaitable.adoc[stoppable_awaitable] +* xref:advanced.adoc[Advanced Topics] diff --git a/doc/modules/ROOT/pages/concepts/dispatcher.adoc b/doc/modules/ROOT/pages/concepts/dispatcher.adoc new file mode 100644 index 00000000..ef123cbd --- /dev/null +++ b/doc/modules/ROOT/pages/concepts/dispatcher.adoc @@ -0,0 +1,94 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie.falco@gmail.com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + += dispatcher + +A dispatcher is a callable object that accepts a coroutine handle and +schedules it for resumption. + +== Synopsis + +Defined in header `` + +[source,cpp] +---- +namespace boost::capy { + +template +concept dispatcher = requires(D const& d, std::coroutine_handle

h) { + { d(h) } -> std::convertible_to; +}; + +} // namespace boost::capy +---- + +== Description + +A dispatcher encapsulates the rules for how and where a coroutine +resumes. When invoked with a coroutine handle, the dispatcher +schedules the handle for resumption and returns a handle suitable +for symmetric transfer. + +Dispatchers must be const-callable, enabling thread-safe concurrent +dispatch from multiple coroutines. The dispatcher may resume the +handle inline (by returning the handle itself) or queue it for later +execution (by returning `std::noop_coroutine()`). + +Since `coro` (an alias for `std::coroutine_handle`) has +`operator()` which invokes `resume()`, the handle itself is callable +and can be dispatched directly. + +== Valid Expressions + +Given: + +* `d` — a const value of type `D` +* `h` — a value of type `std::coroutine_handle

` + +[cols="2,1,3"] +|=== +| Expression | Return Type | Description + +| `d(h)` +| convertible to `coro` +| Schedules `h` for resumption and returns a handle for symmetric transfer +|=== + +== Example + +[source,cpp] +---- +struct inline_dispatcher +{ + coro operator()(coro h) const + { + return h; // Resume inline via symmetric transfer + } +}; + +struct queuing_dispatcher +{ + work_queue* queue_; + + coro operator()(coro h) const + { + queue_->push(h); + return std::noop_coroutine(); // Caller returns to event loop + } +}; + +static_assert(dispatcher); +static_assert(dispatcher); +---- + +== See Also + +* xref:concepts/affine_awaitable.adoc[affine_awaitable] +* xref:concepts/stoppable_awaitable.adoc[stoppable_awaitable] +* xref:advanced.adoc[Advanced Topics] diff --git a/doc/modules/ROOT/pages/concepts/executor.adoc b/doc/modules/ROOT/pages/concepts/executor.adoc new file mode 100644 index 00000000..bdc29d90 --- /dev/null +++ b/doc/modules/ROOT/pages/concepts/executor.adoc @@ -0,0 +1,153 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie.falco@gmail.com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + += executor + +An executor provides mechanisms for scheduling work for execution. + +== Synopsis + +Defined in header `` + +[source,cpp] +---- +namespace boost::capy { + +template +concept executor = + std::copy_constructible && + std::equality_comparable && + requires(E& e, E const& ce, std::coroutine_handle<> h) { + { ce.context() } -> std::same_as; + { ce.on_work_started() } noexcept; + { ce.on_work_finished() } noexcept; + { ce.dispatch(h) } -> std::convertible_to>; + { ce.post(h) }; + { ce.defer(h) }; + }; + +} // namespace boost::capy +---- + +== Description + +A type meeting the executor requirements embodies a set of rules for +determining how submitted coroutines are to be executed. An executor +is a lightweight, copyable handle to an execution context such as a +thread pool, I/O context, or strand. + +The executor provides three scheduling operations: + +* **dispatch** — Run inline if allowed, else queue. Cheapest path. +* **post** — Always queue, never inline. Guaranteed asynchrony. +* **defer** — Always queue with continuation hint. Enables optimizations. + +=== No-Throw Guarantee + +The following operations shall not exit via an exception: constructors, +comparison operators, copy/move operations, swap, `context()`, +`on_work_started()`, and `on_work_finished()`. + +=== Thread Safety + +The executor copy constructor, comparison operators, and other member +functions shall not introduce data races as a result of concurrent +calls from different threads. + +=== Executor Validity + +Let `ctx` be the execution context returned by `context()`. An executor +becomes invalid when the first call to `ctx.shutdown()` returns. The +effect of calling `dispatch`, `post`, or `defer` on an invalid executor +is undefined. + +== Valid Expressions + +Given: + +* `e` — a value of type `E` +* `ce` — a const value of type `E` +* `h` — a value of type `std::coroutine_handle<>` + +[cols="2,1,3"] +|=== +| Expression | Return Type | Description + +| `ce.context()` +| `Context&` +| Returns a reference to the associated execution context + +| `ce.on_work_started()` +| — +| Informs the executor that work is beginning. Must not throw. + +| `ce.on_work_finished()` +| — +| Informs the executor that work has completed. Must not throw. + +| `ce.dispatch(h)` +| convertible to `std::coroutine_handle<>` +| Execute inline if permitted, otherwise queue + +| `ce.post(h)` +| — +| Queue for later execution, never inline + +| `ce.defer(h)` +| — +| Queue with continuation hint for optimization +|=== + +== Example + +[source,cpp] +---- +class my_executor +{ + my_context* ctx_; + +public: + my_executor(my_context& ctx) : ctx_(&ctx) {} + + my_context& context() const noexcept { return *ctx_; } + + void on_work_started() const noexcept { ctx_->work_++; } + void on_work_finished() const noexcept { ctx_->work_--; } + + std::coroutine_handle<> dispatch(std::coroutine_handle<> h) const + { + if (ctx_->running_in_this_thread()) + return h; // Inline execution + post(h); + return std::noop_coroutine(); + } + + void post(std::coroutine_handle<> h) const + { + ctx_->queue_.push(h); + } + + void defer(std::coroutine_handle<> h) const + { + ctx_->local_queue_.push(h); // Thread-local optimization + } + + bool operator==(my_executor const& other) const noexcept + { + return ctx_ == other.ctx_; + } +}; + +static_assert(executor); +---- + +== See Also + +* xref:concepts/is_execution_context.adoc[is_execution_context] +* xref:execution.adoc[Execution Model] diff --git a/doc/modules/ROOT/pages/concepts/frame_allocator.adoc b/doc/modules/ROOT/pages/concepts/frame_allocator.adoc new file mode 100644 index 00000000..04a99fed --- /dev/null +++ b/doc/modules/ROOT/pages/concepts/frame_allocator.adoc @@ -0,0 +1,128 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie.falco@gmail.com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + += frame_allocator + +A frame allocator provides memory allocation for coroutine frames. + +== Synopsis + +Defined in header `` + +[source,cpp] +---- +namespace boost::capy { + +template +concept frame_allocator = + std::copy_constructible && + requires(A& a, void* p, std::size_t n) { + { a.allocate(n) } -> std::same_as; + { a.deallocate(p, n) }; + }; + +} // namespace boost::capy +---- + +== Description + +Frame allocators provide memory for coroutine frames—the compiler-generated +structures holding local variables, parameters, and suspension state. +A frame allocator must be a cheaply copyable handle to an underlying +memory resource (e.g., a pointer to a pool). + +The library copies the allocator into the first coroutine frame for +lifetime safety. Subsequent frames in the call tree use the embedded +allocator for both allocation and deallocation. + +=== Default Frame Allocator + +The library provides `default_frame_allocator` which passes through +to `::operator new` and `::operator delete`: + +[source,cpp] +---- +struct default_frame_allocator +{ + void* allocate(std::size_t n) + { + return ::operator new(n); + } + + void deallocate(void* p, std::size_t) + { + ::operator delete(p); + } +}; +---- + +=== Recycling Frame Allocator + +By default, `async_run` uses a recycling frame allocator that caches +deallocated frames for reuse, eliminating most allocation overhead +for typical coroutine patterns. + +== Valid Expressions + +Given: + +* `a` — a value of type `A` +* `p` — a value of type `void*` +* `n` — a value of type `std::size_t` + +[cols="2,1,3"] +|=== +| Expression | Return Type | Description + +| `a.allocate(n)` +| `void*` +| Allocates `n` bytes for a coroutine frame + +| `a.deallocate(p, n)` +| — +| Deallocates memory previously allocated with `allocate(n)` +|=== + +== Example + +[source,cpp] +---- +class pool_frame_allocator +{ + memory_pool* pool_; + +public: + explicit pool_frame_allocator(memory_pool& pool) + : pool_(&pool) + { + } + + void* allocate(std::size_t n) + { + return pool_->allocate(n); + } + + void deallocate(void* p, std::size_t n) + { + pool_->deallocate(p, n); + } +}; + +static_assert(frame_allocator); + +// Usage with async_run: +memory_pool pool; +pool_frame_allocator alloc{pool}; + +async_run(ex, alloc)(my_task()); +---- + +== See Also + +* xref:advanced.adoc#_frame_allocation[Frame Allocation] diff --git a/doc/modules/ROOT/pages/concepts/is_execution_context.adoc b/doc/modules/ROOT/pages/concepts/is_execution_context.adoc new file mode 100644 index 00000000..550574de --- /dev/null +++ b/doc/modules/ROOT/pages/concepts/is_execution_context.adoc @@ -0,0 +1,123 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie.falco@gmail.com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + += is_execution_context + +A type satisfies `is_execution_context` if it derives from +`execution_context` and provides an associated executor type. + +== Synopsis + +Defined in header `` + +[source,cpp] +---- +namespace boost::capy { + +template +concept is_execution_context = + std::derived_from && + requires { typename X::executor_type; } && + executor && + requires(X& x) { + { x.get_executor() } -> std::same_as; + }; + +} // namespace boost::capy +---- + +== Description + +An execution context represents a place where function objects are +executed. It provides: + +* A service registry for polymorphic services +* An associated executor type for scheduling work +* Lifecycle management (shutdown, destroy) + +Derived classes such as `io_context` extend `execution_context` to +provide execution facilities like event loops and thread pools. + +=== Service Management + +Execution contexts own services that provide extensible functionality. +Services are created on first use via `use_service()` or explicitly +via `make_service()`. During destruction, services are shut down +and deleted in reverse order of creation. + +=== Destructor Requirements + +The destructor must destroy all unexecuted work that was submitted +via an executor object associated with the execution context. This +is a semantic requirement that cannot be verified at compile time. + +== Valid Expressions + +Given: + +* `x` — a value of type `X` + +[cols="2,1,3"] +|=== +| Expression | Return Type | Description + +| `X::executor_type` +| type +| The associated executor type, satisfying `executor` + +| `x.get_executor()` +| `X::executor_type` +| Returns an executor for scheduling work on this context +|=== + +== Example + +[source,cpp] +---- +class io_context : public execution_context +{ +public: + class executor_type + { + io_context* ctx_; + + public: + executor_type(io_context& ctx) : ctx_(&ctx) {} + + io_context& context() const noexcept { return *ctx_; } + + void on_work_started() const noexcept { /* ... */ } + void on_work_finished() const noexcept { /* ... */ } + + std::coroutine_handle<> dispatch(std::coroutine_handle<> h) const; + void post(std::coroutine_handle<> h) const; + void defer(std::coroutine_handle<> h) const; + + bool operator==(executor_type const&) const noexcept = default; + }; + + executor_type get_executor() + { + return executor_type{*this}; + } + + ~io_context() + { + shutdown(); + destroy(); + } +}; + +static_assert(is_execution_context); +---- + +== See Also + +* xref:concepts/executor.adoc[executor] +* xref:execution.adoc[Execution Model] diff --git a/doc/modules/ROOT/pages/concepts/stoppable_awaitable.adoc b/doc/modules/ROOT/pages/concepts/stoppable_awaitable.adoc new file mode 100644 index 00000000..5615d594 --- /dev/null +++ b/doc/modules/ROOT/pages/concepts/stoppable_awaitable.adoc @@ -0,0 +1,135 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie.falco@gmail.com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + += stoppable_awaitable + +An awaitable is stoppable if it participates in the stoppable awaitable +protocol by accepting both a dispatcher and a stop token in its +`await_suspend` method. + +== Synopsis + +Defined in header `` + +[source,cpp] +---- +namespace boost::capy { + +template +concept stoppable_awaitable = + affine_awaitable && + requires(A a, std::coroutine_handle

h, D const& d, std::stop_token token) { + a.await_suspend(h, d, token); + }; + +} // namespace boost::capy +---- + +== Description + +The stoppable awaitable protocol extends `affine_awaitable` to enable +automatic stop token propagation through coroutine chains. When a task +has a stop token, it passes the token to any stoppable awaitables it +awaits. + +A stoppable awaitable must provide _both_ overloads of `await_suspend`: + +* `await_suspend(h, d)` — for callers without stop tokens +* `await_suspend(h, d, token)` — for callers with stop tokens + +The awaitable should use the stop token to support cancellation of +the underlying operation. + +== Valid Expressions + +Given: + +* `a` — a value of type `A` +* `h` — a value of type `std::coroutine_handle

` +* `d` — a const value of type `D` satisfying `dispatcher` +* `token` — a value of type `std::stop_token` + +[cols="2,1,3"] +|=== +| Expression | Return Type | Description + +| `a.await_ready()` +| `bool` +| Returns `true` if the operation has already completed + +| `a.await_suspend(h, d)` +| (unspecified) +| Suspends without cancellation support + +| `a.await_suspend(h, d, token)` +| (unspecified) +| Suspends with cancellation support via the stop token + +| `a.await_resume()` +| (unspecified) +| Returns the operation result or rethrows any exception +|=== + +== Example + +[source,cpp] +---- +struct stoppable_timer +{ + std::chrono::milliseconds duration_; + bool cancelled_ = false; + + bool await_ready() const noexcept + { + return duration_.count() <= 0; + } + + // Affine path (no cancellation) + template + auto await_suspend(coro h, Dispatcher const& d) + { + start_timer(duration_, [h, &d] { d(h); }); + return std::noop_coroutine(); + } + + // Stoppable path (with cancellation) + template + auto await_suspend(coro h, Dispatcher const& d, std::stop_token token) + { + if (token.stop_requested()) + { + cancelled_ = true; + return d(h); // Resume immediately + } + + auto timer_handle = start_timer(duration_, [h, &d] { d(h); }); + + // Cancel timer if stop requested + std::stop_callback cb(token, [timer_handle] { + cancel_timer(timer_handle); + }); + + return std::noop_coroutine(); + } + + void await_resume() + { + if (cancelled_) + throw std::runtime_error("cancelled"); + } +}; + +static_assert(stoppable_awaitable); +---- + +== See Also + +* xref:concepts/dispatcher.adoc[dispatcher] +* xref:concepts/affine_awaitable.adoc[affine_awaitable] +* xref:advanced.adoc[Advanced Topics] diff --git a/doc/modules/ROOT/pages/coroutines.adoc b/doc/modules/ROOT/pages/coroutines.adoc index 49e55917..6592f9de 100644 --- a/doc/modules/ROOT/pages/coroutines.adoc +++ b/doc/modules/ROOT/pages/coroutines.adoc @@ -9,337 +9,255 @@ = Coroutines -== Introduction - -Capy provides lightweight coroutine support for C++20, enabling -asynchronous code that reads like synchronous code. The library -offers two awaitable types: `task` for lazy coroutine-based -operations, and `async_op` for bridging callback-based -APIs into the coroutine world. - -This section covers the awaitable types provided by the library, -demonstrates their usage patterns, and presents practical examples -showing how to integrate coroutines into your applications. +This page teaches you how to write coroutine functions using `task` and +launch them for execution using `async_run`. -NOTE: Coroutine features are only available when compiling with -C++20 or later. +NOTE: Coroutine features require C++20 or later. -== Awaitables - -=== task - -xref:reference:boost/capy/task.adoc[`task`] is a lazy coroutine type that produces a value of type `T`. -The coroutine does not begin execution when created; it remains -suspended until awaited. This lazy evaluation enables structured -concurrency where parent coroutines naturally await their children. +== Introduction -A `task` owns its coroutine handle and destroys it automatically. -Exceptions thrown within the coroutine are captured and rethrown -when the result is retrieved via `co_await`. +Capy provides lightweight coroutine support for C++20, enabling asynchronous +code that reads like synchronous code. The primary building blocks are: -Tasks support scheduler affinity through the `on()` method, which -binds the task to an executor. When a task has affinity, all -internal `co_await` expressions resume on the specified executor, -ensuring consistent execution context. +* `task` — A lazy coroutine type that produces a value of type `T` +* `async_run` — Launches a task for detached execution on an executor -The `task` specialization is used for coroutines that perform -work but do not produce a value. These coroutines use `co_return;` -with no argument. +Tasks are _lazy_: they do not begin execution when created. A task remains +suspended until it is either awaited by another coroutine or launched +explicitly with `async_run`. This lazy evaluation enables structured +composition where parent coroutines naturally await their children. -=== async_op +== The task Type -xref:reference:boost/capy/async_op.adoc[`async_op`] bridges traditional callback-based asynchronous -APIs with coroutines. It wraps a deferred operation—a callable that -accepts a completion handler, starts an asynchronous operation, and -invokes the handler with the result. +A `task` represents an asynchronous operation that will eventually +produce a value of type `T`. You create a task by writing a coroutine +function—one that uses `co_await` or `co_return`: -The key advantage of `async_op` is its type-erased design. The -implementation details are hidden behind an abstract interface, -allowing runtime-specific code such as Boost.Asio to be confined -to source files. Headers that return `async_op` do not need -to include Asio or other heavyweight dependencies, keeping compile -times low and interfaces clean. +[source,cpp] +---- +#include -Use xref:reference:boost/capy/make_async_op.adoc[`make_async_op()`] to create an `async_op` from any -callable that follows the deferred operation pattern. +using boost::capy::task; -The `async_op` specialization is used for operations that -signal completion without producing a value, such as timers, write -operations, or connection establishment. The completion handler -takes no arguments. +task compute() +{ + co_return 42; +} +---- -== Usage +The function `compute()` returns immediately with a suspended coroutine. +No code inside the function body executes until the task is started. -=== When to use task +=== Returning Values -Return `task` from a coroutine function—one that uses `co_await` -or `co_return`. The function body contains coroutine logic and the -return type tells the compiler to generate the appropriate coroutine -machinery. +Use `co_return` to produce the task's result: [source,cpp] ---- -task compute() +task greet(std::string name) { - int a = co_await step_one(); - int b = co_await step_two(a); - co_return a + b; + co_return "Hello, " + name + "!"; } ---- -Use `task` when composing asynchronous operations purely within the -coroutine world. Tasks can await other tasks, forming a tree of -dependent operations. - -=== When to use async_op +=== Void Tasks -Return `async_op` from a regular (non-coroutine) function that -wraps an existing callback-based API. The function does not use -`co_await` or `co_return`; instead it constructs and returns an -`async_op` using `make_async_op()`. +For operations that perform work without producing a value, use +`task`: [source,cpp] ---- -async_op async_read(socket& s, buffer& b) +task log_message(std::string msg) { - return make_async_op( - [&](auto handler) { - s.async_read(b, std::move(handler)); - }); + std::cout << msg << std::endl; + co_return; } ---- -Use `async_op` at the boundary between callback-based code and -coroutines. It serves as an adapter that lets coroutines `co_await` -operations implemented with traditional completion handlers. - -=== Choosing between them - -* Writing new asynchronous logic? Use `task`. -* Wrapping an existing callback API? Use `async_op`. -* Composing multiple awaitable operations? Use `task`. -* Exposing a library function without leaking dependencies? Use - `async_op` with the implementation in a source file. +The explicit `co_return;` (or simply reaching the end of the function) +completes a void task. -In practice, application code is primarily `task`-based, while -`async_op` appears at integration points with I/O libraries -and other callback-driven systems. +=== Awaiting Other Tasks -== Examples - -=== Chaining tasks - -This example demonstrates composing multiple tasks into a pipeline. -Each step awaits the previous one, and the final result propagates -back to the caller. +Tasks can await other tasks using `co_await`. The calling coroutine +suspends until the awaited task completes: [source,cpp] ---- -#include -#include - -using boost::capy::task; - -task parse_header(std::string const& data) +task step_one() { - // Extract content length from header - auto pos = data.find("Content-Length: "); - if (pos == std::string::npos) - co_return 0; - co_return std::stoi(data.substr(pos + 16)); + co_return 10; } -task fetch_data() +task step_two(int x) { - // Simulated network response - co_return std::string("Content-Length: 42\r\n\r\nHello"); + co_return x * 2; } -task get_content_length() +task pipeline() { - std::string response = co_await fetch_data(); - int length = co_await parse_header(response); - co_return length; + int a = co_await step_one(); + int b = co_await step_two(a); + co_return a + b; // 10 + 20 = 30 } ---- -=== Wrapping a callback API +Each `co_await` suspends the current coroutine, starts the child task, +and resumes when the child completes. The child's return value becomes +the result of the `co_await` expression. + +== Launching Tasks -This example shows how to wrap a hypothetical callback-based -timer into an awaitable. The implementation details stay in -the source file. +Tasks are lazy and require a driver to execute. The `async_run` function +launches a task for detached execution on an executor: [source,cpp] ---- -// timer.hpp - public header, no Asio includes -#ifndef TIMER_HPP -#define TIMER_HPP +#include -#include +using boost::capy::async_run; -namespace mylib { +void start(executor ex) +{ + async_run(ex)(compute()); +} +---- -// Returns the number of milliseconds actually elapsed -boost::capy::async_op -async_wait(int milliseconds); +The syntax `async_run(ex)(task)` creates a runner bound to the executor, +then immediately launches the task. The task begins executing when the +executor schedules it; if inline execution is permitted, the task runs +synchronously until it suspends on an I/O operation. -} // namespace mylib +=== Fire and Forget -#endif ----- +The simplest form discards the result: [source,cpp] ---- -// timer.cpp - implementation, Asio details hidden here -#include "timer.hpp" -#include - -namespace mylib { - -boost::capy::async_op -async_wait(int milliseconds) -{ - return boost::capy::make_async_op( - [milliseconds](auto handler) - { - // In a real implementation, this would use - // a shared io_context and steady_timer - auto timer = std::make_shared( - get_io_context(), - std::chrono::milliseconds(milliseconds)); - - timer->async_wait( - [timer, milliseconds, h = std::move(handler)] - (boost::system::error_code) mutable - { - h(milliseconds); - }); - }); -} - -} // namespace mylib +async_run(ex)(compute()); ---- -=== Void operations +If the task throws an exception, it is rethrown on the executor's thread. +This is appropriate for top-level tasks where errors should propagate. + +=== Handling Results -This example shows `task` and `async_op` for -operations that complete without producing a value. +To receive the task's result, provide a completion handler: [source,cpp] ---- -#include -#include +async_run(ex)(compute(), [](int result) { + std::cout << "Got: " << result << "\n"; +}); +---- -using boost::capy::task; -using boost::capy::async_op; -using boost::capy::make_async_op; +The handler is called when the task completes successfully. If the task +throws, the exception is rethrown (default behavior). -// Wrap a callback-based timer (void result) -async_op async_sleep(int milliseconds) -{ - return make_async_op( - [milliseconds](auto on_done) - { - // In real code, this would start a timer - // and call on_done() when it expires - start_timer(milliseconds, std::move(on_done)); - }); -} +=== Handling Errors -// A void task that performs work without returning a value -task log_with_delay(std::string message) -{ - co_await async_sleep(100); - std::cout << message << std::endl; - co_return; -} +To handle both success and failure, provide a handler that accepts +`std::exception_ptr`: -task run_sequence() -{ - co_await log_with_delay("Step 1"); - co_await log_with_delay("Step 2"); - co_await log_with_delay("Step 3"); - co_return; -} +[source,cpp] +---- +async_run(ex)(compute(), overloaded{ + [](int result) { + std::cout << "Success: " << result << "\n"; + }, + [](std::exception_ptr ep) { + try { + if (ep) std::rethrow_exception(ep); + } catch (std::exception const& e) { + std::cerr << "Error: " << e.what() << "\n"; + } + } +}); ---- -=== Spawning tasks on an executor - -Tasks are lazy and require a driver to execute. The `spawn()` function -starts a task on an executor and delivers the result to a completion -handler. This is useful for launching tasks from non-coroutine code -or integrating tasks into callback-based systems. +Alternatively, use separate handlers for success and error: [source,cpp] ---- -#include -#include +async_run(ex)(compute(), + [](int result) { std::cout << result << "\n"; }, + [](std::exception_ptr ep) { /* handle error */ } +); +---- -using boost::capy::task; -using boost::capy::executor; -using boost::capy::spawn; +== Exception Handling -task compute() +Exceptions thrown within a task are captured and stored. When the task +is awaited, the exception is rethrown in the awaiting coroutine: + +[source,cpp] +---- +task might_fail() { - co_return 42; + throw std::runtime_error("oops"); + co_return 0; // never reached } -void start_computation(executor ex) +task caller() { - // Spawn a task on the executor with a completion handler - spawn(ex, compute(), [](auto result) { - if (result.has_value()) - std::cout << "Result: " << *result << std::endl; - else - std::cerr << "Error occurred\n"; - }); + try { + int x = co_await might_fail(); + } catch (std::exception const& e) { + std::cerr << "Caught: " << e.what() << "\n"; + } } ---- -The `spawn()` function takes an executor, a task, and a completion handler. -The handler receives `system::result` which holds -either the task's return value or any exception thrown during execution. -The task runs to completion on the executor with proper scheduler affinity. +This enables natural exception handling across coroutine boundaries. -=== Complete request handler +== Complete Example -This example combines tasks and async_op to implement a -request handler that reads a request, processes it, and sends -a response. +This example demonstrates a typical pattern: a chain of tasks that +process data and produce a final result. [source,cpp] ---- #include -#include +#include #include +#include using boost::capy::task; -using boost::capy::async_op; +using boost::capy::async_run; -// Forward declarations - implementations use async_op -// to wrap the underlying I/O library -async_op async_read(int fd); -async_op async_write(int fd, std::string data); - -// Pure coroutine logic using task -task process_request(std::string const& request) +// Simulate fetching data +task fetch_data() { - // Transform the request into a response - co_return "HTTP/1.1 200 OK\r\n\r\nHello, " + request; + co_return "Content-Length: 42\r\n\r\nHello"; } -task handle_connection(int fd) +// Parse content length from headers +task parse_content_length(std::string const& data) { - // Read the incoming request - std::string request = co_await async_read(fd); - - // Process it - std::string response = co_await process_request(request); + auto pos = data.find("Content-Length: "); + if (pos == std::string::npos) + co_return 0; + co_return std::stoi(data.substr(pos + 16)); +} - // Send the response - std::size_t bytes_written = co_await async_write(fd, response); +// Compose the operations +task get_content_length() +{ + std::string data = co_await fetch_data(); + int length = co_await parse_content_length(data); + co_return length; +} - co_return static_cast(bytes_written); +void run_example(executor ex) +{ + async_run(ex)(get_content_length(), [](int length) { + std::cout << "Content length: " << length << "\n"; + }); } ---- +== Next Steps + +Now that you can write and launch coroutines, the next page explains +xref:execution.adoc[where they execute]—how executor affinity works +and how to control which thread or context runs your code. diff --git a/doc/modules/ROOT/pages/execution.adoc b/doc/modules/ROOT/pages/execution.adoc new file mode 100644 index 00000000..787d0d78 --- /dev/null +++ b/doc/modules/ROOT/pages/execution.adoc @@ -0,0 +1,314 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie.falco@gmail.com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + += Execution Model + +This page explains where your coroutines execute, how executor affinity +propagates through call chains, and how to control execution context. + +== Executors and Contexts + +An *executor* is to coroutines what an allocator is to memory. It +encapsulates the rules for where, when, and how a coroutine resumes. +An executor is a lightweight, copyable handle to an *execution context* +such as a thread pool, I/O context, or strand. + +When you launch a task with `async_run(ex)`, the executor `ex` determines: + +* Which thread runs the coroutine +* When resumption occurs (inline vs. queued) +* What ordering guarantees apply + +== Executor Operations + +Executors provide three fundamental operations for scheduling work: + +[cols="1,3"] +|=== +| Operation | Behavior + +| `dispatch` +| Run inline if the executor permits, otherwise queue. This is the +cheapest path and is used when crossing execution context boundaries. + +| `post` +| Always queue, never inline. Use when guaranteed asynchrony is required. + +| `defer` +| Always queue, but hints "this is my continuation." Enables thread-local +optimizations for same-context resumption. +|=== + +In a pure coroutine model, *symmetric transfer* handles most continuation +chaining directly—the compiler generates tail calls between frames with +zero executor involvement. The executor operations become relevant when +crossing context boundaries or enforcing ordering constraints. + +== Flow Diagrams + +To reason about where code executes, we use a compact notation called +*flow diagrams*. These diagrams show the call chain of coroutines and +I/O operations: + +[cols="1,3"] +|=== +| Symbol | Meaning + +| `c`, `c1`, `c2` +| Coroutines (lazy tasks) + +| `io` +| I/O operation on an `io_object` + +| `->` +| `co_await` leading to a coroutine or I/O + +| `!` +| Coroutine with explicit executor affinity + +| `ex`, `ex1`, `ex2` +| Executors +|=== + +=== Simple Chain + +The diagram: + +---- +c -> io +---- + +represents: + +[source,cpp] +---- +task c(io_object& io) +{ + co_await io.async_read(); +} +---- + +The coroutine `c` awaits an I/O operation. When the I/O completes, +`c` resumes. + +=== Nested Coroutines + +The diagram: + +---- +c1 -> c2 -> io +---- + +represents: + +[source,cpp] +---- +task c1(io_object& io) +{ + co_await c2(io); +} + +task c2(io_object& io) +{ + co_await io.async_read(); +} +---- + +When `c1` awaits `c2`, control transfers to `c2`. When the I/O completes, +`c2` resumes and completes, then `c1` resumes. + +== Executor Affinity + +*Affinity* means a coroutine is bound to a specific executor. When a +coroutine has affinity to executor `ex`, all of its resumptions occur +through `ex`. + +In flow diagrams, a `!` prefix indicates explicit affinity: + +---- +!c1 -> io +---- + +This means `c1` has affinity to some executor `ex`. When the I/O +completes, `c1` is resumed through `ex`. + +You establish affinity when launching a task: + +[source,cpp] +---- +async_run(ex)(my_task()); // my_task has affinity to ex +---- + +=== Affinity Propagation + +Affinity propagates forward through `co_await` chains. When a coroutine +with affinity awaits a child task, the child inherits the same affinity: + +---- +!c1 -> c2 -> io +---- + +Here: + +* `c1` has explicit affinity to `ex` +* `c2` inherits affinity from `c1` +* The I/O captures `ex` and resumes through it +* When `c2` completes, `c1` resumes via symmetric transfer (same executor) + +The mechanism is the *affine awaitable protocol*: each `co_await` passes +the current dispatcher to the awaited operation, which stores it and +uses it for resumption. + +=== Why Affinity Matters + +Affinity provides important guarantees: + +1. **Predictable execution context** — Your code always runs where you expect +2. **Thread safety** — No surprise thread hops mid-operation +3. **Strand compatibility** — Strands enforce ordering; affinity ensures + resumption goes through the strand + +Without affinity, an I/O completion might resume your coroutine on an +arbitrary I/O thread, requiring explicit synchronization. + +== Changing Affinity with run_on + +Sometimes you need a child coroutine to run on a _different_ executor. +The `run_on` function changes affinity for a subtree of the call chain: + +[source,cpp] +---- +#include + +using boost::capy::run_on; + +task parent() +{ + // This task runs on ex1 (inherited) + + // Run child on ex2 instead + co_await run_on(ex2, child_task()); + + // Back on ex1 after child completes +} +---- + +In flow diagram notation: + +---- +!c1 -> c2 -> !c3 -> io +---- + +This represents: + +[source,cpp] +---- +task c1(io_object& io) // affinity: ex1 +{ + co_await c2(io); +} + +task c2(io_object& io) // affinity: ex1 (inherited) +{ + co_await run_on(ex2, c3(io)); +} + +task c3(io_object& io) // affinity: ex2 (explicit) +{ + co_await io.async_read(); +} +---- + +The execution sequence: + +1. `c1` launches on `ex1` +2. `c2` continues on `ex1` (inherited) +3. `run_on` binds `c3` to `ex2` +4. I/O captures `ex2` +5. I/O completes → `c3` resumes through `ex2` +6. `c3` completes → `c2` resumes through `ex1` (caller's executor) +7. `c2` completes → `c1` resumes via symmetric transfer (same executor) + +== Symmetric Transfer + +When a child coroutine completes, it must resume its caller. If both +share the same executor, *symmetric transfer* provides a direct tail +call with zero overhead—no executor involvement, no queuing. + +The decision logic: + +1. **Same executor, no constraints** → symmetric transfer +2. **Different executors** → dispatch through caller's executor +3. **Same executor, strand required** → defer through executor + +Symmetric transfer is automatic. The library detects when caller and +callee share the same dispatcher (pointer equality) and optimizes +accordingly. + +== Execution Guarantees + +Capy's coroutine model provides these guarantees: + +[cols="1,3"] +|=== +| Guarantee | Description + +| Affinity preservation +| A coroutine with affinity always resumes through its executor + +| Forward progress +| `post` and `defer` never block; work is queued for later execution + +| Exception safety +| Exceptions propagate cleanly to the awaiting coroutine + +| Completion order +| For a single executor, work completes in submission order (FIFO) +|=== + +== Example: Multi-Context Pipeline + +This example shows a pipeline where different stages run on different +executors: + +[source,cpp] +---- +#include +#include + +// CPU-bound work runs on compute pool +task process(Data input) +{ + // Heavy computation here + co_return transform(input); +} + +// I/O runs on I/O context +task handle_request(io_context& ioc, thread_pool& pool) +{ + auto io_ex = ioc.get_executor(); + auto cpu_ex = pool.get_executor(); + + // Read request (I/O executor) + Data request = co_await read_request(); + + // Process on compute pool + Data response = co_await run_on(cpu_ex, process(request)); + + // Back on I/O executor, write response + co_await write_response(response); +} +---- + +== Next Steps + +You now understand where coroutines execute and how to control execution +context. The next page covers xref:advanced.adoc[advanced topics] including +custom frame allocators, stop token propagation, and implementing your +own affine-aware awaitables. diff --git a/include/boost/capy.hpp b/include/boost/capy.hpp index f80d7429..2f450763 100644 --- a/include/boost/capy.hpp +++ b/include/boost/capy.hpp @@ -13,16 +13,32 @@ #include #include #include +#include +#include +#include +#include +#include #include #include +#include +#include #include -#include +#include #include +#include +#include +#include +#include +#include #include +#include #include #include +#include #include #include +#include #include +#include #endif diff --git a/include/boost/capy/affine.hpp b/include/boost/capy/affine.hpp index 7594de9e..430775cb 100644 --- a/include/boost/capy/affine.hpp +++ b/include/boost/capy/affine.hpp @@ -10,91 +10,261 @@ #ifndef BOOST_CAPY_AFFINE_HPP #define BOOST_CAPY_AFFINE_HPP -#include - -#ifdef BOOST_CAPY_HAS_CORO +#include #include #include #include #include +#include #include #include namespace boost { namespace capy { -/** Concept for types that can dispatch coroutine resumption. +/** Concept for dispatcher types. + + A dispatcher is a callable object that accepts a coroutine handle + and schedules it for resumption. The dispatcher is responsible for + ensuring the handle is eventually resumed on the appropriate execution + context. - A dispatcher is a callable that accepts a coroutine handle - and arranges for it to be resumed on the target execution - context. Since std::coroutine_handle has operator() which - calls resume(), the dispatcher can invoke the handle directly. + @tparam D The dispatcher type. + @tparam P The promise type (defaults to void). + + @par Requirements + @li `d(h)` must be valid where `h` is `std::coroutine_handle

` and + `d` is a const reference to `D` + @li `d(h)` must return a `coro` (or convertible type) + to enable symmetric transfer + @li Calling `d(h)` schedules `h` for resumption (typically by scheduling + it on a specific execution context) and returns a coroutine handle + that the caller may use for symmetric transfer + @li The dispatcher must be const-callable (logical constness), enabling + thread-safe concurrent dispatch from multiple coroutines + + @note Since `coro` has `operator()` which invokes `resume()`, the handle + itself is callable and can be dispatched directly. +*/ +template +concept dispatcher = requires(D const& d, std::coroutine_handle

h) { + { d(h) } -> std::convertible_to; +}; + +/** Concept for affine awaitable types. + + An awaitable is affine if it participates in the affine awaitable protocol + by accepting a dispatcher in its `await_suspend` method. This enables + zero-overhead scheduler affinity without requiring the full sender/receiver + protocol. + + @tparam A The awaitable type. + @tparam D The dispatcher type. + @tparam P The promise type (defaults to void). + + @par Requirements + @li `D` must satisfy `dispatcher` + @li `A` must provide `await_suspend(std::coroutine_handle

h, D const& d)` + @li The awaitable must use the dispatcher `d` to resume the caller, + e.g. `return d(h);` + @li The dispatcher returns a coroutine handle that `await_suspend` may + return for symmetric transfer @par Example @code - struct my_dispatcher + struct my_async_op { - void operator()(std::coroutine_handle<> h) const + template + auto await_suspend(coro h, Dispatcher const& d) { - // Queue h for execution on target context - thread_pool_.post([h] { h(); }); + start_async([h, &d] { + d(h); // Schedule resumption through dispatcher + }); + return std::noop_coroutine(); // Or return d(h) for symmetric transfer } + // ... await_ready, await_resume ... }; @endcode - - @tparam D The dispatcher type to check. - @tparam P The promise type for the coroutine handle (default void). */ -template -concept dispatcher = requires(D d, std::coroutine_handle

h) { d(h); }; +template +concept affine_awaitable = + dispatcher && + requires(A a, std::coroutine_handle

h, D const& d) { + a.await_suspend(h, d); + }; + +/** Concept for stoppable awaitable types. -/** Concept for awaitables that support scheduler affinity. + An awaitable is stoppable if it participates in the stoppable awaitable + protocol by accepting both a dispatcher and a stop_token in its + `await_suspend` method. This extends the affine awaitable protocol to + enable automatic stop token propagation through coroutine chains. - An affine_awaitable is an awaitable that accepts a dispatcher - in its await_suspend method, enabling zero-overhead scheduler - affinity. When an operation completes, it uses the dispatcher - to resume the coroutine on the correct execution context. + @tparam A The awaitable type. + @tparam D The dispatcher type. + @tparam P The promise type (defaults to void). @par Requirements - The type must provide `await_suspend(handle, dispatcher)` - accepting a coroutine handle and a dispatcher reference. - The dispatcher must satisfy the dispatcher concept. - The other awaitable requirements (await_ready, await_resume) - are enforced by the compiler when used in a co_await expression. + @li `A` must satisfy `affine_awaitable` + @li `A` must provide `await_suspend(std::coroutine_handle

h, D const& d, + std::stop_token token)` + @li The awaitable should use the stop_token to support cancellation + @li The awaitable must use the dispatcher `d` to resume the caller @par Example @code - struct affine_async_op + struct my_stoppable_op { - int result_; - - bool await_ready() const noexcept { return false; } - template - void await_suspend(std::coroutine_handle<> h, Dispatcher& d) const + auto await_suspend(coro h, Dispatcher const& d, std::stop_token token) { - // Start async work, then resume via dispatcher - start_async([h, &d]() { - d(h); + start_async([h, &d, token] { + if (token.stop_requested()) { + // Handle cancellation + } + d(h); // Schedule resumption through dispatcher }); + return std::noop_coroutine(); } - - int await_resume() const noexcept { return result_; } + // ... await_ready, await_resume ... }; @endcode - @tparam A The awaitable type to check. - @tparam D The dispatcher type. - @tparam P The promise type for the coroutine handle (default void). + @see affine_awaitable + @see dispatcher */ template -concept affine_awaitable = - dispatcher && - requires(A a, std::coroutine_handle

h, D& d) { - a.await_suspend(h, d); +concept stoppable_awaitable = + affine_awaitable && + requires(A a, std::coroutine_handle

h, D const& d, std::stop_token token) { + a.await_suspend(h, d, token); }; +/** A type-erased wrapper for dispatcher objects. + + This class provides type erasure for any type satisfying the `dispatcher` + concept, enabling runtime polymorphism without virtual functions. It stores + a pointer to the original dispatcher and a function pointer to invoke it, + allowing dispatchers of different types to be stored uniformly. + + @par Thread Safety + The `any_dispatcher` itself is not thread-safe for concurrent modification, + but `operator()` is const and safe to call concurrently if the underlying + dispatcher supports concurrent dispatch. + + @par Lifetime + The `any_dispatcher` stores a pointer to the original dispatcher object. + The caller must ensure the referenced dispatcher outlives the `any_dispatcher` + instance. This is typically satisfied when the dispatcher is an executor + stored in a coroutine promise or service provider. + + @par Example + @code + void store_dispatcher(any_dispatcher d) + { + // Can store any dispatcher type uniformly + auto h = d(some_coroutine); // Invoke through type-erased interface + } + + executor_base const& ex = get_executor(); + store_dispatcher(ex); // Implicitly converts to any_dispatcher + @endcode + + @see dispatcher + @see executor_base +*/ +class any_dispatcher +{ + void const* d_ = nullptr; + coro(*f_)(void const*, coro) = nullptr; + +public: + /** Default constructor. + + Constructs an empty `any_dispatcher`. Calling `operator()` on a + default-constructed instance results in undefined behavior. + */ + any_dispatcher() = default; + + /** Copy constructor. + + Copies the internal pointer and function, preserving identity. + This enables the same-dispatcher optimization when passing + any_dispatcher through coroutine chains. + */ + any_dispatcher(any_dispatcher const&) = default; + + /** Copy assignment operator. */ + any_dispatcher& operator=(any_dispatcher const&) = default; + + /** Constructs from any dispatcher type. + + Captures a reference to the given dispatcher and stores a type-erased + invocation function. The dispatcher must remain valid for the lifetime + of this `any_dispatcher` instance. + + @param d The dispatcher to wrap. Must satisfy the `dispatcher` concept. + A pointer to this object is stored internally; the dispatcher + must outlive this wrapper. + */ + template + requires (!std::same_as, any_dispatcher>) + any_dispatcher(D const& d) + : d_(&d) + , f_([](void const* pd, coro h) { + return static_cast(pd)->operator()(h); + }) + { + } + + /** Returns true if this instance holds a valid dispatcher. + + @return `true` if constructed with a dispatcher, `false` if + default-constructed. + */ + explicit operator bool() const noexcept + { + return d_ != nullptr; + } + + /** Compares two dispatchers for identity. + + Two `any_dispatcher` instances are equal if they wrap the same + underlying dispatcher object (pointer equality). This enables + the affinity optimization: when `caller_dispatcher == my_dispatcher`, + symmetric transfer can proceed without a `running_in_this_thread()` + check. + + @param other The dispatcher to compare against. + + @return `true` if both wrap the same dispatcher object. + */ + bool operator==(any_dispatcher const& other) const noexcept + { + return d_ == other.d_; + } + + /** Dispatches a coroutine handle through the wrapped dispatcher. + + Invokes the stored dispatcher with the given coroutine handle, + returning a handle suitable for symmetric transfer. + + @param h The coroutine handle to dispatch for resumption. + + @return A coroutine handle that the caller may use for symmetric + transfer, or `std::noop_coroutine()` if the dispatcher + posted the work for later execution. + + @pre This instance was constructed with a valid dispatcher + (not default-constructed). + */ + coro operator()(coro h) const + { + return f_(d_, h); + } +}; + /** Wrapper that bridges affine awaitables to standard coroutine machinery. This adapter wraps an affine_awaitable and provides the standard @@ -673,5 +843,3 @@ auto make_affine(Awaitable&& awaitable, Dispatcher& dispatcher) } // boost #endif - -#endif diff --git a/include/boost/capy/async_op.hpp b/include/boost/capy/async_op.hpp index 065e549f..f324347e 100644 --- a/include/boost/capy/async_op.hpp +++ b/include/boost/capy/async_op.hpp @@ -12,8 +12,6 @@ #include -#ifdef BOOST_CAPY_HAS_CORO - #include #include #include @@ -195,9 +193,9 @@ class async_op */ template void - await_suspend(std::coroutine_handle<> h, Dispatcher& dispatcher) + await_suspend(std::coroutine_handle<> h, Dispatcher const& dispatcher) { - impl_->start([h, &dispatcher]{ dispatcher(h); }); + impl_->start([h, &dispatcher]{ dispatcher(h).resume(); }); } /** Return the result after completion. @@ -305,9 +303,9 @@ class async_op */ template void - await_suspend(std::coroutine_handle<> h, Dispatcher& dispatcher) + await_suspend(std::coroutine_handle<> h, Dispatcher const& dispatcher) { - impl_->start([h, &dispatcher]{ dispatcher(h); }); + impl_->start([h, &dispatcher]{ dispatcher(h).resume(); }); } /** Complete the await and check for exceptions. @@ -401,5 +399,3 @@ make_async_op(DeferredOp&& op) } // boost #endif - -#endif diff --git a/include/boost/capy/async_run.hpp b/include/boost/capy/async_run.hpp new file mode 100644 index 00000000..671ddd4a --- /dev/null +++ b/include/boost/capy/async_run.hpp @@ -0,0 +1,489 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie dot falco at gmail dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + +#ifndef BOOST_CAPY_ASYNC_RUN_HPP +#define BOOST_CAPY_ASYNC_RUN_HPP + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace boost { +namespace capy { + +namespace detail { + +// Discards the result on success, rethrows on exception. +struct default_handler +{ + template + void operator()(T&&) const noexcept + { + } + + void operator()() const noexcept + { + } + + void operator()(std::exception_ptr ep) const + { + if(ep) + std::rethrow_exception(ep); + } +}; + +// Combines two handlers into one: h1 for success, h2 for exception. +template +struct handler_pair +{ + H1 h1_; + H2 h2_; + + template + void operator()(T&& v) + { + h1_(std::forward(v)); + } + + void operator()() + { + h1_(); + } + + void operator()(std::exception_ptr ep) + { + h2_(ep); + } +}; + +template +struct async_run_task_result +{ + std::optional result_; + + template + void return_value(V&& value) + { + result_ = std::forward(value); + } +}; + +template<> +struct async_run_task_result +{ + void return_void() + { + } +}; + +// Lifetime storage for the Dispatcher value. +// The Allocator is embedded in the user's coroutine frame. +template< + dispatcher Dispatcher, + typename T, + typename Handler> +struct async_run_task +{ + struct promise_type + : frame_allocating_base + , async_run_task_result + { + Dispatcher d_; + Handler handler_; + std::exception_ptr ep_; + + template + promise_type(D&& d, H&& h, Args&&...) + : d_(std::forward(d)) + , handler_(std::forward(h)) + { + } + + async_run_task get_return_object() + { + return {std::coroutine_handle::from_promise(*this)}; + } + + /** Suspend initially. + + The frame allocator is already set in TLS by the + embedding_frame_allocator when the user's task was created. + No action needed here. + */ + std::suspend_always initial_suspend() noexcept + { + return {}; + } + + auto final_suspend() noexcept + { + struct awaiter + { + promise_type* p_; + + bool await_ready() const noexcept + { + return false; + } + + coro await_suspend(coro h) const noexcept + { + // Save before destroy + auto handler = std::move(p_->handler_); + auto ep = p_->ep_; + + // Clear thread-local before destroy to avoid dangling pointer + frame_allocating_base::clear_frame_allocator(); + + // For non-void, we need to get the result before destroy + if constexpr (!std::is_void_v) + { + auto result = std::move(p_->result_); + h.destroy(); + if(ep) + handler(ep); + else + handler(std::move(*result)); + } + else + { + h.destroy(); + if(ep) + handler(ep); + else + handler(); + } + return std::noop_coroutine(); + } + + void await_resume() const noexcept + { + } + }; + return awaiter{this}; + } + + void unhandled_exception() + { + ep_ = std::current_exception(); + } + + template + struct transform_awaiter + { + std::decay_t a_; + promise_type* p_; + + bool await_ready() + { + return a_.await_ready(); + } + + auto await_resume() + { + return a_.await_resume(); + } + + template + auto await_suspend(std::coroutine_handle h) + { + return a_.await_suspend(h, p_->d_); + } + }; + + template + auto await_transform(Awaitable&& a) + { + using A = std::decay_t; + if constexpr (affine_awaitable) + { + // Zero-overhead path for affine awaitables + return transform_awaiter{ + std::forward(a), this}; + } + else + { + // Trampoline fallback for legacy awaitables + return make_affine(std::forward(a), d_); + } + } + }; + + std::coroutine_handle h_; + + void release() + { + h_ = nullptr; + } + + ~async_run_task() + { + if(h_) + h_.destroy(); + } +}; + +template< + dispatcher Dispatcher, + typename T, + typename Handler> +async_run_task +make_async_run_task(Dispatcher, Handler handler, task t) +{ + if constexpr (std::is_void_v) + co_await std::move(t); + else + co_return co_await std::move(t); +} + +/** Runs the root task with the given dispatcher and handler. +*/ +template< + dispatcher Dispatcher, + typename T, + typename Handler> +void +run_async_run_task(Dispatcher d, task t, Handler handler) +{ + auto root = make_async_run_task( + std::move(d), std::move(handler), std::move(t)); + root.h_.promise().d_(coro{root.h_}).resume(); + root.release(); +} + +/** Runner object returned by async_run(dispatcher). + + Provides operator() overloads to launch tasks with various + handler configurations. The dispatcher is captured and used + to schedule the task execution. + + @par Frame Allocator Activation + The constructor sets the thread-local frame allocator, enabling + coroutine frame recycling for tasks created after construction. + This requires the single-expression usage pattern. + + @par Required Usage Pattern + @code + // CORRECT: Single expression - allocator active when task created + async_run(ex)(make_task()); + async_run(ex)(make_task(), handler); + + // INCORRECT: Split pattern - allocator may be changed between lines + auto runner = async_run(ex); // Sets TLS + // ... other code may change TLS here ... + runner(make_task()); // Won't compile (deleted move) + @endcode + + @par Enforcement Mechanisms + Multiple layers ensure correct usage: + + @li Deleted copy/move constructors - Relies on C++17 guaranteed + copy elision. The runner can only exist as a prvalue constructed + directly at the call site. If this compiles, elision occurred. + + @li Rvalue-qualified operator() - All operator() overloads are + &&-qualified, meaning they can only be called on rvalues. This + forces the idiom `async_run(ex)(task)` as a single expression. + + @see async_run +*/ +template< + dispatcher Dispatcher, + frame_allocator Allocator = detail::recycling_frame_allocator> +struct async_run_awaitable +{ + Dispatcher d_; + detail::embedding_frame_allocator embedder_; + + /** Construct runner and activate frame allocator. + + Sets the thread-local frame allocator to enable recycling + for coroutines created after this call. + + @param d The dispatcher for task execution. + @param a The frame allocator (default: recycling_frame_allocator). + */ + async_run_awaitable(Dispatcher d, Allocator a) + : d_(std::move(d)) + , embedder_(std::move(a)) + { + frame_allocating_base::set_frame_allocator(embedder_); + } + + // Enforce C++17 guaranteed copy elision. + // If this compiles, elision occurred and &embedder_ is stable. + async_run_awaitable(async_run_awaitable const&) = delete; + async_run_awaitable(async_run_awaitable&&) = delete; + async_run_awaitable& operator=(async_run_awaitable const&) = delete; + async_run_awaitable& operator=(async_run_awaitable&&) = delete; + + /** Launch task with default handler (fire-and-forget). + + Uses default_handler which discards results and rethrows + exceptions. + + @param t The task to execute. + */ + template + void operator()(task t) && + { + // Note: TLS now points to embedded wrapper in user's task frame, + // not to embedder_. This is expected behavior. + run_async_run_task( + std::move(d_), std::move(t), default_handler{}); + } + + /** Launch task with completion handler. + + The handler is called on success with the result value (non-void) + or no arguments (void tasks). If the handler also provides an + overload for `std::exception_ptr`, it handles exceptions directly. + Otherwise, exceptions are automatically rethrown (default behavior). + + @code + // Success-only handler (exceptions rethrow automatically) + async_run(ex)(my_task(), [](int result) { + std::cout << result; + }); + + // Full handler with exception support + async_run(ex)(my_task(), overloaded{ + [](int result) { std::cout << result; }, + [](std::exception_ptr) { } + }); + @endcode + + @param t The task to execute. + @param h The completion handler. + */ + template + void operator()(task t, Handler h) && + { + if constexpr (std::is_invocable_v) + { + // Handler handles exceptions itself + run_async_run_task( + std::move(d_), std::move(t), std::move(h)); + } + else + { + // Handler only handles success - pair with default exception handler + using combined = handler_pair; + run_async_run_task( + std::move(d_), std::move(t), + combined{std::move(h), default_handler{}}); + } + } + + /** Launch task with separate success/error handlers. + + @param t The task to execute. + @param h1 Handler called on success with the result value + (or no args for void tasks). + @param h2 Handler called on error with exception_ptr. + */ + template + void operator()(task t, H1 h1, H2 h2) && + { + using combined = handler_pair; + run_async_run_task( + std::move(d_), std::move(t), + combined{std::move(h1), std::move(h2)}); + } +}; + +} // namespace detail + +/** Creates a runner to launch lazy tasks for detached execution. + + Returns an async_run_awaitable that captures the dispatcher and provides + operator() overloads to launch tasks. This is analogous to Asio's + `co_spawn`. The task begins executing when the dispatcher schedules + it; if the dispatcher permits inline execution, the task runs + immediately until it awaits an I/O operation. + + The dispatcher controls where and how the task resumes after each + suspension point. Tasks deal only with type-erased dispatchers + (`coro(coro)` signature), not typed executors. This leverages the + coroutine handle's natural type erasure. + + @par Dispatcher Behavior + The dispatcher is invoked to start the task and propagated through + the coroutine chain via the affine awaitable protocol. When the task + completes, the handler runs on the same dispatcher context. If inline + execution is permitted, the call chain proceeds synchronously until + an I/O await suspends execution. + + @par Usage + @code + io_context ioc; + auto ex = ioc.get_executor(); + + // Fire and forget (uses default_handler) + async_run(ex)(my_coroutine()); + + // Single overloaded handler + async_run(ex)(compute_value(), overload{ + [](int result) { std::cout << "Got: " << result << "\n"; }, + [](std::exception_ptr) { } + }); + + // Separate handlers: h1 for value, h2 for exception + async_run(ex)(compute_value(), + [](int result) { std::cout << result; }, + [](std::exception_ptr ep) { if (ep) std::rethrow_exception(ep); } + ); + + // Donate thread to run queued work + ioc.run(); + @endcode + + @param d The dispatcher that schedules and resumes the task. + + @return An async_run_awaitable object with operator() to launch tasks. + + @see async_run_awaitable + @see task + @see dispatcher +*/ +template +[[nodiscard]] auto async_run(Dispatcher d) +{ + return detail::async_run_awaitable{std::move(d), {}}; +} + +/** Creates a runner with an explicit frame allocator. + + @param d The dispatcher that schedules and resumes the task. + @param alloc The allocator for coroutine frame allocation. + + @return An async_run_awaitable object with operator() to launch tasks. + + @see async_run_awaitable +*/ +template< + dispatcher Dispatcher, + frame_allocator Allocator> +[[nodiscard]] auto async_run(Dispatcher d, Allocator alloc) +{ + return detail::async_run_awaitable< + Dispatcher, Allocator>{std::move(d), std::move(alloc)}; +} + +} // namespace capy +} // namespace boost + +#endif diff --git a/include/boost/capy/config.hpp b/include/boost/capy/config.hpp new file mode 100644 index 00000000..7e08560e --- /dev/null +++ b/include/boost/capy/config.hpp @@ -0,0 +1,15 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie dot falco at gmail dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + +#ifndef BOOST_CAPY_CONFIG_HPP +#define BOOST_CAPY_CONFIG_HPP + +#include + +#endif diff --git a/include/boost/capy/coro.hpp b/include/boost/capy/coro.hpp new file mode 100644 index 00000000..57210cbe --- /dev/null +++ b/include/boost/capy/coro.hpp @@ -0,0 +1,32 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie dot falco at gmail dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + +#ifndef BOOST_CAPY_CORO_HPP +#define BOOST_CAPY_CORO_HPP + +#include +#include + +#if defined(__clang__) && !defined(__apple_build_version__) && __clang_major__ >= 20 +#define BOOST_CAPY_CORO_AWAIT_ELIDABLE [[clang::coro_await_elidable]] +#else +#define BOOST_CAPY_CORO_AWAIT_ELIDABLE +#endif + +namespace boost { +namespace capy { + +/** Alias for a type-erased coroutine handle +*/ +using coro = std::coroutine_handle<>; + +} // capy +} // boost + +#endif diff --git a/include/boost/capy/detail/config.hpp b/include/boost/capy/detail/config.hpp index d8974fa5..70a75ed2 100644 --- a/include/boost/capy/detail/config.hpp +++ b/include/boost/capy/detail/config.hpp @@ -16,6 +16,28 @@ # include #endif +// Detect thread-local storage mechanism +// Cascade: compiler keyword > thread_local > OS API +#if !defined(BOOST_CAPY_TLS_KEYWORD) +# if defined(_MSC_VER) +# define BOOST_CAPY_TLS_KEYWORD __declspec(thread) +# elif defined(__GNUC__) || defined(__clang__) +# define BOOST_CAPY_TLS_KEYWORD __thread +# endif +#endif + +#if !defined(BOOST_CAPY_HAS_THREAD_LOCAL) +# if defined(_MSC_VER) && _MSC_VER >= 1900 +# define BOOST_CAPY_HAS_THREAD_LOCAL 1 +# elif defined(__clang__) && __has_feature(cxx_thread_local) +# define BOOST_CAPY_HAS_THREAD_LOCAL 1 +# elif defined(__GNUC__) && __GNUC__ >= 5 +# define BOOST_CAPY_HAS_THREAD_LOCAL 1 +# else +# define BOOST_CAPY_HAS_THREAD_LOCAL 0 +# endif +#endif + namespace boost { namespace capy { @@ -44,14 +66,6 @@ namespace capy { //------------------------------------------------ -#if defined(__cpp_lib_coroutine) && __cpp_lib_coroutine >= 201902L -# define BOOST_CAPY_HAS_CORO 1 -#elif defined(__cpp_impl_coroutine) && __cpp_impl_coroutines >= 201902L -# define BOOST_CAPY_HAS_CORO 1 -#endif - -//------------------------------------------------ - // Add source location to error codes #ifdef BOOST_CAPY_NO_SOURCE_LOCATION # define BOOST_CAPY_ERR(ev) (::boost::system::error_code(ev)) diff --git a/include/boost/capy/detail/recycling_frame_allocator.hpp b/include/boost/capy/detail/recycling_frame_allocator.hpp new file mode 100644 index 00000000..e58f16a2 --- /dev/null +++ b/include/boost/capy/detail/recycling_frame_allocator.hpp @@ -0,0 +1,153 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie dot falco at gmail dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/corosio +// + +#ifndef BOOST_CAPY_DETAIL_RECYCLING_FRAME_ALLOCATOR_HPP +#define BOOST_CAPY_DETAIL_RECYCLING_FRAME_ALLOCATOR_HPP + +#include + +#include +#include + +namespace boost { +namespace capy { +namespace detail { + +/** Recycling frame allocator with thread-local and global pools. + + This allocator recycles memory blocks to reduce allocation overhead. + It maintains a thread-local pool for fast lock-free access and a + global pool for cross-thread block sharing. + + Blocks are tracked by size to avoid returning undersized blocks. + + This type satisfies the frame_allocator concept and is cheaply + copyable (all instances share the same static pools). +*/ +class recycling_frame_allocator +{ + struct block + { + block* next; + std::size_t size; + }; + + struct global_pool + { + std::mutex mtx; + block* head = nullptr; + + ~global_pool() + { + while(head) + { + auto p = head; + head = head->next; + ::operator delete(p); + } + } + + void push(block* b) + { + std::lock_guard lock(mtx); + b->next = head; + head = b; + } + + block* pop(std::size_t n) + { + std::lock_guard lock(mtx); + block** pp = &head; + while(*pp) + { + // block->size stores total allocated size (including header) + if((*pp)->size >= n + sizeof(block)) + { + block* p = *pp; + *pp = p->next; + return p; + } + pp = &(*pp)->next; + } + return nullptr; + } + }; + + struct local_pool + { + block* head = nullptr; + + void push(block* b) + { + b->next = head; + head = b; + } + + block* pop(std::size_t n) + { + block** pp = &head; + while(*pp) + { + // block->size stores total allocated size (including header) + if((*pp)->size >= n + sizeof(block)) + { + block* p = *pp; + *pp = p->next; + return p; + } + pp = &(*pp)->next; + } + return nullptr; + } + }; + + static local_pool& local() + { + static thread_local local_pool local; + return local; + } + + static global_pool& global() + { + static global_pool pool; + return pool; + } + +public: + void* allocate(std::size_t n) + { + std::size_t total = n + sizeof(block); + + if(auto* b = local().pop(n)) + return static_cast(static_cast(b)) + sizeof(block); + + if(auto* b = global().pop(n)) + return static_cast(static_cast(b)) + sizeof(block); + + auto* b = static_cast(::operator new(total)); + b->next = nullptr; + b->size = total; + return static_cast(static_cast(b)) + sizeof(block); + } + + void deallocate(void* p, std::size_t) + { + auto* b = static_cast(static_cast(static_cast(p) - sizeof(block))); + b->next = nullptr; + local().push(b); + } +}; + +static_assert(frame_allocator); + +} // namespace detail +} // namespace capy +} // namespace boost + +#endif diff --git a/include/boost/capy/execution_context.hpp b/include/boost/capy/execution_context.hpp new file mode 100644 index 00000000..8b115339 --- /dev/null +++ b/include/boost/capy/execution_context.hpp @@ -0,0 +1,645 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie dot falco at gmail dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + +#ifndef BOOST_CAPY_EXECUTION_CONTEXT_HPP +#define BOOST_CAPY_EXECUTION_CONTEXT_HPP + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace boost { +namespace capy { + +/** Base class for I/O object containers providing service management. + + An execution context represents a place where function objects are + executed. It provides a service registry where polymorphic services + can be stored and retrieved by type. Each service type may be stored + at most once. Services may specify a nested `key_type` to enable + lookup by a base class type. + + Derived classes such as `io_context` extend this to provide + execution facilities like event loops and thread pools. Derived + class destructors must call `shutdown()` and `destroy()` to ensure + proper service cleanup before member destruction. + + @par Service Lifecycle + Services are created on first use via `use_service()` or explicitly + via `make_service()`. During destruction, `shutdown()` is called on + each service in reverse order of creation, then `destroy()` deletes + them. Both functions are idempotent. + + @par Thread Safety + Service registration and lookup functions are thread-safe. + The `shutdown()` and `destroy()` functions are not thread-safe + and must only be called during destruction. + + @par Example + @code + struct file_service : execution_context::service + { + protected: + void shutdown() override {} + }; + + struct posix_file_service : file_service + { + using key_type = file_service; + + explicit posix_file_service(execution_context&) {} + }; + + class io_context : public execution_context + { + public: + ~io_context() + { + shutdown(); + destroy(); + } + }; + + io_context ctx; + ctx.make_service(); + ctx.find_service(); // returns posix_file_service* + ctx.find_service(); // also works + @endcode + + @see service, is_execution_context +*/ +class BOOST_CAPY_DECL + execution_context +{ + template + struct get_key : std::false_type + {}; + + template + struct get_key> : std::true_type + { + using type = typename T::key_type; + }; + +public: + //------------------------------------------------ + + /** Abstract base class for services owned by an execution context. + + Services provide extensible functionality to an execution context. + Each service type can be registered at most once. Services are + created via `use_service()` or `make_service()` and are owned by + the execution context for their lifetime. + + Derived classes must implement the pure virtual `shutdown()` member + function, which is called when the owning execution context is + being destroyed. The `shutdown()` function should release resources + and cancel outstanding operations without blocking. + + @par Deriving from service + @li Implement `shutdown()` to perform cleanup. + @li Accept `execution_context&` as the first constructor parameter. + @li Optionally define `key_type` to enable base-class lookup. + + @par Example + @code + struct my_service : execution_context::service + { + explicit my_service(execution_context&) {} + + protected: + void shutdown() override + { + // Cancel pending operations, release resources + } + }; + @endcode + + @see execution_context + */ + class service + { + public: + virtual ~service() = default; + + protected: + service() = default; + + /** Called when the owning execution context shuts down. + + Implementations should release resources and cancel any + outstanding asynchronous operations. This function must + not block and must not throw exceptions. Services are + shut down in reverse order of creation. + + @par Exception Safety + No-throw guarantee. + */ + virtual void shutdown() = 0; + + private: + friend class execution_context; + + service* next_ = nullptr; + std::type_index t0_ = typeid(void); + std::type_index t1_ = typeid(void); + }; + + //------------------------------------------------ + + /** Abstract base class for completion handlers. + + Handlers are continuations that execute after an asynchronous + operation completes. They can be queued for deferred invocation, + allowing callbacks and coroutine resumptions to be posted to an + executor. + + Handlers should execute quickly - typically just initiating + another I/O operation or suspending on a foreign task. Heavy + computation should be avoided in handlers to prevent blocking + the event loop. + + Handlers may be heap-allocated or may be data members of an + enclosing object. The allocation strategy is determined by the + creator of the handler. + + @par Ownership Contract + + Callers must invoke exactly ONE of `operator()` or `destroy()`, + never both: + + @li `operator()` - Invokes the handler. The handler is + responsible for its own cleanup (typically `delete this` + for heap-allocated handlers). The caller must not call + `destroy()` after invoking this. + + @li `destroy()` - Destroys an uninvoked handler. This is + called when a queued handler must be discarded without + execution, such as during shutdown or exception cleanup. + For heap-allocated handlers, this typically calls + `delete this`. + + @par Exception Safety + + Implementations of `operator()` must perform cleanup before + any operation that might throw. This ensures that if the handler + throws, the exception propagates cleanly to the caller of + `run()` without leaking resources. Typical pattern: + + @code + void operator()() override + { + auto coro = coro_; + delete this; // cleanup FIRST + coro.resume(); // then resume (may throw) + } + @endcode + + This "delete-before-invoke" pattern also enables memory + recycling - the handler's memory can be reused immediately + by subsequent allocations. + + @note Callers must never delete handlers directly with `delete`; + use `operator()` for normal invocation or `destroy()` for cleanup. + + @note Heap-allocated handlers are typically allocated with + custom allocators to minimize allocation overhead in + high-frequency async operations. + + @note Some handlers (such as those owned by containers like + `std::unique_ptr` or embedded as data members) are not meant to + be destroyed and should implement both functions as no-ops + (for `operator()`, invoke the continuation but don't delete). + + @see queue + */ + class handler : public intrusive_queue::node + { + public: + virtual void operator()() = 0; + virtual void destroy() = 0; + + /** Returns the user-defined data pointer. + + Derived classes may set this to store auxiliary data + such as a pointer to the most-derived object. + + @par Postconditions + @li Initially returns `nullptr` for newly constructed handlers. + @li Returns the current value of `data_` if modified by a derived class. + + @return The user-defined data pointer, or `nullptr` if not set. + */ + void* data() const noexcept + { + return data_; + } + + protected: + ~handler() = default; + + void* data_ = nullptr; + }; + + //------------------------------------------------ + + /** An intrusive FIFO queue of handlers. + + This queue stores handlers using an intrusive linked list, + avoiding additional allocations for queue nodes. Handlers + are popped in the order they were pushed (first-in, first-out). + + The destructor calls `destroy()` on any remaining handlers. + + @note This is not thread-safe. External synchronization is + required for concurrent access. + + @see handler + */ + class queue + { + intrusive_queue q_; + + public: + /** Default constructor. + + Creates an empty queue. + + @post `empty() == true` + */ + queue() = default; + + /** Move constructor. + + Takes ownership of all handlers from `other`, + leaving `other` empty. + + @param other The queue to move from. + + @post `other.empty() == true` + */ + queue(queue&& other) noexcept + : q_(std::move(other.q_)) + { + } + + queue(queue const&) = delete; + queue& operator=(queue const&) = delete; + queue& operator=(queue&&) = delete; + + /** Destructor. + + Calls `destroy()` on any remaining handlers in the queue. + */ + ~queue() + { + while(auto* h = q_.pop()) + h->destroy(); + } + + /** Return true if the queue is empty. + + @return `true` if the queue contains no handlers. + */ + bool + empty() const noexcept + { + return q_.empty(); + } + + /** Add a handler to the back of the queue. + + @param h Pointer to the handler to add. + + @pre `h` is not null and not already in a queue. + */ + void + push(handler* h) noexcept + { + q_.push(h); + } + + /** Splice all handlers from another queue to the back. + + All handlers from `other` are moved to the back of this + queue. After this call, `other` is empty. + + @param other The queue to splice from. + + @post `other.empty() == true` + */ + void + push(queue& other) noexcept + { + q_.splice(other.q_); + } + + /** Remove and return the front handler. + + @return Pointer to the front handler, or `nullptr` + if the queue is empty. + */ + handler* + pop() noexcept + { + return q_.pop(); + } + }; + + //------------------------------------------------ + + execution_context(execution_context const&) = delete; + + execution_context& operator=(execution_context const&) = delete; + + /** Destructor. + + Calls `shutdown()` then `destroy()` to clean up all services. + + @par Effects + All services are shut down and deleted in reverse order + of creation. + + @par Exception Safety + No-throw guarantee. + */ + BOOST_CAPY_DECL + ~execution_context(); + + /** Default constructor. + + @par Exception Safety + Strong guarantee. + */ + BOOST_CAPY_DECL + execution_context(); + + /** Return true if a service of type T exists. + + @par Thread Safety + Thread-safe. + + @tparam T The type of service to check. + + @return `true` if the service exists. + */ + template + bool has_service() const noexcept + { + return find_service() != nullptr; + } + + /** Return a pointer to the service of type T, or nullptr. + + @par Thread Safety + Thread-safe. + + @tparam T The type of service to find. + + @return A pointer to the service, or `nullptr` if not present. + */ + template + T* find_service() const noexcept + { + std::lock_guard lock(mutex_); + return static_cast(find_impl(typeid(T))); + } + + /** Return a reference to the service of type T, creating it if needed. + + If no service of type T exists, one is created by calling + `T(execution_context&)`. If T has a nested `key_type`, the + service is also indexed under that type. + + @par Constraints + @li `T` must derive from `service`. + @li `T` must be constructible from `execution_context&`. + + @par Exception Safety + Strong guarantee. If service creation throws, the container + is unchanged. + + @par Thread Safety + Thread-safe. + + @tparam T The type of service to retrieve or create. + + @return A reference to the service. + */ + template + T& use_service() + { + static_assert(std::is_base_of::value, + "T must derive from service"); + static_assert(std::is_constructible::value, + "T must be constructible from execution_context&"); + + struct impl : factory + { + impl() + : factory( + typeid(T), + get_key::value + ? typeid(typename get_key::type) + : typeid(T)) + { + } + + service* create(execution_context& ctx) override + { + return new T(ctx); + } + }; + + impl f; + return static_cast(use_service_impl(f)); + } + + /** Construct and add a service. + + A new service of type T is constructed using the provided + arguments and added to the container. If T has a nested + `key_type`, the service is also indexed under that type. + + @par Constraints + @li `T` must derive from `service`. + @li `T` must be constructible from `execution_context&, Args...`. + @li If `T::key_type` exists, `T&` must be convertible to `key_type&`. + + @par Exception Safety + Strong guarantee. If service creation throws, the container + is unchanged. + + @par Thread Safety + Thread-safe. + + @throws std::invalid_argument if a service of the same type + or `key_type` already exists. + + @tparam T The type of service to create. + + @param args Arguments forwarded to the constructor of T. + + @return A reference to the created service. + */ + template + T& make_service(Args&&... args) + { + static_assert(std::is_base_of::value, + "T must derive from service"); + if constexpr(get_key::value) + { + static_assert( + std::is_convertible::type&>::value, + "T& must be convertible to key_type&"); + } + + struct impl : factory + { + std::tuple args_; + + explicit impl(Args&&... a) + : factory( + typeid(T), + get_key::value + ? typeid(typename get_key::type) + : typeid(T)) + , args_(std::forward(a)...) + { + } + + service* create(execution_context& ctx) override + { + return std::apply([&ctx](auto&&... a) { + return new T(ctx, std::forward(a)...); + }, std::move(args_)); + } + }; + + impl f(std::forward(args)...); + return static_cast(make_service_impl(f)); + } + +protected: + /** Shut down all services. + + Calls `shutdown()` on each service in reverse order of creation. + After this call, services remain allocated but are in a stopped + state. Derived classes should call this in their destructor + before any members are destroyed. This function is idempotent; + subsequent calls have no effect. + + @par Effects + Each service's `shutdown()` member function is invoked once. + + @par Postconditions + @li All services are in a stopped state. + + @par Exception Safety + No-throw guarantee. + + @par Thread Safety + Not thread-safe. Must not be called concurrently with other + operations on this execution_context. + */ + BOOST_CAPY_DECL + void shutdown() noexcept; + + /** Destroy all services. + + Deletes all services in reverse order of creation. Derived + classes should call this as the final step of destruction. + This function is idempotent; subsequent calls have no effect. + + @par Preconditions + @li `shutdown()` has been called. + + @par Effects + All services are deleted and removed from the container. + + @par Postconditions + @li The service container is empty. + + @par Exception Safety + No-throw guarantee. + + @par Thread Safety + Not thread-safe. Must not be called concurrently with other + operations on this execution_context. + */ + BOOST_CAPY_DECL + void destroy() noexcept; + +private: + struct factory + { + std::type_index t0; + std::type_index t1; + + factory(std::type_index t0_, std::type_index t1_) + : t0(t0_), t1(t1_) + { + } + + virtual service* create(execution_context&) = 0; + + protected: + ~factory() = default; + }; + + service* find_impl(std::type_index ti) const noexcept; + service& use_service_impl(factory& f); + service& make_service_impl(factory& f); + + mutable std::mutex mutex_; + service* head_ = nullptr; + bool shutdown_ = false; +}; + +//------------------------------------------------ + +/** Concept for types meeting ExecutionContext requirements. + + A type X satisfies is_execution_context if it is publicly + and unambiguously derived from execution_context and + provides the following: + + @li `X::executor_type` - A nested type meeting + the executor concept requirements. + + @li `x.get_executor()` - Returns an executor + object associated with the execution context. + + @li `x.~X()` - The destructor must destroy all + unexecuted work that was submitted via an executor + object associated with the execution context. This + is a semantic requirement that cannot be verified + at compile time. + + @see executor +*/ +template +concept is_execution_context = + std::derived_from && + requires { typename X::executor_type; } && + executor && + requires(X& x) { + { x.get_executor() } -> std::same_as; + }; + +} // namespace capy +} // namespace boost + +#endif diff --git a/include/boost/capy/executor.hpp b/include/boost/capy/executor.hpp index 5fdfd639..55fd0a32 100644 --- a/include/boost/capy/executor.hpp +++ b/include/boost/capy/executor.hpp @@ -11,761 +11,86 @@ #define BOOST_CAPY_EXECUTOR_HPP #include -#include -#include -#include -#include -#include -#include -#include -#include -#include + +#include +#include namespace boost { namespace capy { -#if 0 -class execution_context -{ -public: -private: - void post(work* w); -}; -#endif - -/** A lightweight handle for submitting work to an execution context. - - This class provides a value-type interface for submitting - work to be executed asynchronously. It supports two modes: - - @li **Reference mode**: Non-owning reference to an execution - context. The caller must ensure the context outlives all - executors that reference it. Created via the constructor. - - @li **Owning mode**: Shared ownership of a value-type executor. - The executor is stored internally and its lifetime is - managed automatically. Created via the `wrap()` factory. - - @par Thread Safety - Distinct objects may be accessed concurrently. Shared objects - require external synchronization. - - @par Implementing an Execution Context - - Both execution contexts (for reference mode) and value-type - executors (for owning mode) must declare - `friend struct executor::access` and provide three private - member functions: - - @li `void* allocate(std::size_t size, std::size_t align)` — - Allocate storage for a work item. May throw. - - @li `void deallocate(void* p, std::size_t size, std::size_t align)` — - Free storage previously returned by allocate. Must not throw. - - @li `void submit(executor::work* w)` — - Take ownership of the work item and arrange for execution. - The context must eventually call `w->invoke()`, then - `w->~work()`, then deallocate the storage. - - All three functions must be safe to call concurrently. - - @par Example (Reference Mode) - @code - class my_pool - { - friend struct executor::access; - - std::mutex mutex_; - std::queue queue_; - - public: - void run_one() - { - executor::work* w = nullptr; - { - std::lock_guard lock(mutex_); - if(!queue_.empty()) - { - w = queue_.front(); - queue_.pop(); - } - } - if(w) - { - w->invoke(); - std::size_t size = w->size; - std::size_t align = w->align; - w->~work(); - deallocate(w, size, align); - } - } - - private: - void* allocate(std::size_t size, std::size_t) - { - return std::malloc(size); - } - - void deallocate(void* p, std::size_t, std::size_t) - { - std::free(p); - } - - void submit(executor::work* w) - { - std::lock_guard lock(mutex_); - queue_.push(w); - } - }; - - // Usage: reference mode - my_pool pool; - executor ex(pool); // pool must outlive ex - @endcode - - @par Example (Owning Mode) - @code - struct my_strand - { - friend struct executor::access; - - // ... internal state ... - - private: - void* allocate(std::size_t size, std::size_t) - { - return std::malloc(size); - } - - void deallocate(void* p, std::size_t, std::size_t) - { - std::free(p); - } - - void submit(executor::work* w) - { - // ... queue and serialize work ... - } - }; - - // Usage: owning mode - executor ex = executor::from(my_strand{}); // executor owns the strand - @endcode -*/ -class executor -{ - struct ops; - - template - struct ops_for; - - template - struct holder; - - std::shared_ptr ops_; - void* obj_; - -public: - /** Abstract base for type-erased work. - - Implementations derive from this to wrap callable - objects for submission through the executor. - - @par Lifecycle - - When work is submitted via an executor: - @li Storage is allocated via the context's allocate() - @li A work-derived object is constructed in place - @li Ownership transfers to the context via submit() - @li The context calls invoke() to execute the work - @li The context destroys and deallocates the work - - @note Work objects must not be copied or moved after - construction. They are always destroyed in place. - - @note Execution contexts are responsible for tracking - the size and alignment of allocated work objects for - deallocation. A common pattern is to prepend metadata - to the allocation. - */ - struct BOOST_SYMBOL_VISIBLE work - { - virtual ~work() = default; - virtual void invoke() = 0; - }; - - class factory; - - /** Accessor for execution context private members. - - Execution contexts should declare this as a friend to - allow the executor machinery to call their private - allocate, deallocate, and submit members: - - @code - class my_context - { - friend struct executor::access; - // ... - private: - void* allocate(std::size_t, std::size_t); - void deallocate(void*, std::size_t, std::size_t); - void submit(executor::work*); - }; - @endcode - */ - struct access - { - template - static void* - allocate(T& ctx, std::size_t size, std::size_t align) - { - return ctx.allocate(size, align); - } - - template - static void - deallocate(T& ctx, void* p, std::size_t size, std::size_t align) - { - ctx.deallocate(p, size, align); - } - - template - static void - submit(T& ctx, work* w) - { - ctx.submit(w); - } - }; - - /** Construct an executor referencing an execution context. - - Creates an executor in reference mode. The executor holds - a non-owning reference to the context. - - The implementation type must provide: - - `void* allocate(std::size_t size, std::size_t align)` - - `void deallocate(void* p, std::size_t size, std::size_t align)` - - `void submit(executor::work* w)` - - @param ctx The execution context to reference. - The context must outlive this executor and all copies. - - @see from - */ - template< - class T, - class = typename std::enable_if< - !std::is_same< - typename std::decay::type, - executor>::value>::type> - executor(T& ctx) noexcept; - - /** Constructor - - Default-constructed executors are empty. - */ - executor() noexcept - : ops_() - , obj_(nullptr) - { - } - - /** Create an executor with shared ownership of a value-type executor. - - Creates an executor in owning mode. The provided executor - is moved into shared storage and its lifetime is managed - automatically via reference counting. - - The executor type must provide: - - `void* allocate(std::size_t size, std::size_t align)` - - `void deallocate(void* p, std::size_t size, std::size_t align)` - - `void submit(executor::work* w)` - - @param ex The executor to wrap (moved). - - @return An executor that shares ownership of the wrapped executor. - - @par Example - @code - // Wrap a value-type executor - executor ex = executor::wrap(my_strand{}); - - // Copies share ownership (reference counted) - executor exec2 = ex; // both reference the same strand - @endcode - */ - template - static executor - wrap(Exec ex); - - /** Return true if the executor references an execution context. - */ - explicit - operator bool() const noexcept - { - return ops_ != nullptr; - } - - /** Submit work for execution (fire-and-forget). - - This overload uses the allocation-aware factory - mechanism, allowing the implementation to control - memory allocation strategy. - - @param f The callable to execute. - */ - template - void - post(F&& f) const; - - /** Submit work and invoke a handler on completion. - - The work function is executed asynchronously. When it - completes, the handler is invoked with the result or - any exception that was thrown. - - The handler must be invocable with the signature: - @code - void handler( system::result ); - @endcode - where `T` is the return type of `f`. - - @param f The work function to execute. +/** Concept for executor types. - @param handler The completion handler invoked with - the result or exception. - */ - template - auto - submit(F&& f, Handler&& handler) const -> - typename std::enable_if::type>::return_type>::value>::type; + An executor provides mechanisms for scheduling work for + execution. A type meeting the executor requirements embodies + a set of rules for determining how submitted function objects + are to be executed. - /** Submit work and invoke a handler on completion. + @par Required Operations - The work function is executed asynchronously. When it - completes, the handler is invoked with success or any - exception that was thrown. + @li `context()` - Returns a reference to the associated + execution context. - The handler must be invocable with the signature: - @code - void handler( system::result ); - @endcode + @li `on_work_started()` - Informs the executor that work is + beginning. Must be paired with `on_work_finished()`. - @param f The work function to execute. + @li `on_work_finished()` - Informs the executor that work has + completed. Precondition: a preceding call to + `on_work_started()` on an equal executor. - @param handler The completion handler invoked with - the result or exception. - */ - template - auto - submit(F&& f, Handler&& handler) const -> - typename std::enable_if::type - >::return_type>::value>::type; + @li `dispatch(h)` - Execute a coroutine, potentially immediately + if the executor determines it is safe to do so. -#ifdef BOOST_CAPY_HAS_CORO + @li `post(h)` - Queue a coroutine for later execution. Shall not + block forward progress of the caller. - /** Submit work and return an awaitable result. + @li `defer(h)` - Queue a coroutine for later execution, with + a hint that the caller prefers deferral. Semantically + identical to `post`, but conveys that the coroutine is a + continuation of the current call context. - The work function is executed asynchronously. The - returned async_op can be awaited in a coroutine - to obtain the result. + @par No-Throw Guarantee - @param f The work function to execute. + The following operations shall not exit via an exception: + constructors, comparison operators, copy/move operations, + swap, `context()`, `on_work_started()`, and `on_work_finished()`. - @return An awaitable that produces the result of the work. - */ - template - auto - submit(F&& f) const -> - async_op>> - requires (!std::is_void_v>>); - - /** Submit work and return an awaitable result. - - The work function is executed asynchronously. The returned - async_op can be awaited in a coroutine to wait - for completion. - - @param f The work function to execute. - - @return An awaitable that completes when the work finishes. - */ - template - auto - submit(F&& f) const -> - async_op - requires std::is_void_v>>; - -#endif -}; - -//----------------------------------------------------------------------------- - -/** Static vtable for type-erased executor operations. -*/ -struct executor::ops -{ - void* (*allocate)(void* obj, std::size_t size, std::size_t align); - void (*deallocate)(void* obj, void* p, std::size_t size, std::size_t align); - void (*submit)(void* obj, work* w); -}; - -/** Type-specific operation implementations. - - For each concrete type T, this provides static functions - that cast the void* back to T* and forward via access. -*/ -template -struct executor::ops_for -{ - static void* - allocate(void* obj, std::size_t size, std::size_t align) - { - return access::allocate(*static_cast(obj), size, align); - } - - static void - deallocate(void* obj, void* p, std::size_t size, std::size_t align) - { - access::deallocate(*static_cast(obj), p, size, align); - } - - static void - submit(void* obj, work* w) - { - access::submit(*static_cast(obj), w); - } - - static constexpr ops table = { - &allocate, - &deallocate, - &submit - }; -}; - -template -constexpr executor::ops executor::ops_for::table; - -//----------------------------------------------------------------------------- - -/** Holder for value-type executors in owning mode. - - Stores the executor by value and provides the vtable - implementation that forwards to the held executor. -*/ -template -struct executor::holder -{ - Exec ex; - - explicit - holder(Exec e) - : ex(std::move(e)) - { - } - - static void* - allocate(void* obj, std::size_t size, std::size_t align) - { - return access::allocate( - static_cast(obj)->ex, size, align); - } - - static void - deallocate(void* obj, void* p, std::size_t size, std::size_t align) - { - access::deallocate( - static_cast(obj)->ex, p, size, align); - } - - static void - submit(void* obj, work* w) - { - access::submit( - static_cast(obj)->ex, w); - } - - static constexpr ops table = { - &allocate, - &deallocate, - &submit - }; -}; - -template -constexpr executor::ops executor::holder::table; - -//----------------------------------------------------------------------------- - -namespace detail { - -// Null deleter for shared_ptr pointing to static storage -struct null_deleter -{ - void operator()(const void*) const noexcept {} -}; - -} // detail - -template -executor:: -executor(T& ctx) noexcept - : ops_( - &ops_for::type>::table, - detail::null_deleter()) - , obj_(const_cast(static_cast(std::addressof(ctx)))) -{ -} - -template -executor -executor:: -wrap(Exec ex0) -{ - typedef typename std::decay::type exec_type; - typedef holder holder_type; - - std::shared_ptr h = - std::make_shared(std::move(ex0)); - - executor ex; - // Use aliasing constructor: share ownership with h, - // but point to the static vtable - ex.ops_ = std::shared_ptr(h, &holder_type::table); - ex.obj_ = h.get(); - return ex; -} + @par Thread Safety -//----------------------------------------------------------------------------- + The executor copy constructor, comparison operators, and other + member functions shall not introduce data races as a result of + concurrent calls from different threads. -/** RAII factory for constructing and submitting work. + @par Executor Validity - This class manages the multi-phase process of: - 1. Allocating storage from the executor implementation - 2. Constructing work in-place via placement-new - 3. Submitting the work for execution + Let `ctx` be the execution context returned by `context()`. + An executor becomes invalid when the first call to + `ctx.shutdown()` returns. The effect of calling + `on_work_started`, `on_work_finished`, `dispatch`, `post`, + or `defer` on an invalid executor is undefined. - If an exception occurs before commit(), the destructor - will clean up any allocated resources. + @note The copy constructor, comparison operators, and `context()` + remain valid until `ctx` is destroyed. - @par Exception Safety - Strong guarantee. If any operation throws, all resources - are properly released. + @tparam E The type to check for executor conformance. */ -class executor::factory -{ - ops const* ops_; - void* obj_; - void* storage_; - std::size_t size_; - std::size_t align_; - bool committed_; - -public: - /** Construct a factory bound to an executor. - - @param ex The executor to submit work to. - */ - explicit - factory(executor const& ex) noexcept - : ops_(ex.ops_.get()) - , obj_(ex.obj_) - , storage_(nullptr) - , size_(0) - , align_(0) - , committed_(false) - { - } - - /** Destructor. Releases resources if not committed. - */ - ~factory() - { - if(storage_ && !committed_) - ops_->deallocate(obj_, storage_, size_, align_); - } - - factory(factory const&) = delete; - factory& operator=(factory const&) = delete; - - /** Allocate storage for work of given size and alignment. - - @param size The size in bytes required. - @param align The alignment required. - @return Pointer to uninitialized storage. - */ - void* - allocate(std::size_t size, std::size_t align) - { - storage_ = ops_->allocate(obj_, size, align); - size_ = size; - align_ = align; - return storage_; - } - - /** Submit constructed work for execution. - - After calling commit(), the factory releases ownership - and the destructor becomes a no-op. - - @param w Pointer to the constructed work object - (must reside in the allocated storage). - */ - void - commit(work* w) - { - committed_ = true; - ops_->submit(obj_, w); - } -}; - -//----------------------------------------------------------------------------- - -template -void -executor:: -post(F&& f) const -{ - struct callable : work - { - typename std::decay::type f_; - - explicit - callable(F&& f) - : f_(std::forward(f)) - { - } - - void - invoke() override - { - f_(); - } - }; - - factory fac(*this); - void* p = fac.allocate(sizeof(callable), alignof(callable)); - callable* w = ::new(p) callable(std::forward(f)); - fac.commit(w); -} - -//----------------------------------------------------------------------------- - -template -auto -executor:: -submit(F&& f, Handler&& handler) const -> - typename std::enable_if::type - >::return_type>::value>::type -{ - using T = typename detail::call_traits< - typename std::decay::type>::return_type; - using result_type = system::result; - - struct callable - { - typename std::decay::type f; - typename std::decay::type handler; - - void operator()() - { - try - { - handler(result_type(f())); - } - catch(...) - { - handler(result_type(std::current_exception())); - } - } +template +concept executor = + std::copy_constructible && + std::equality_comparable && + requires(E& e, E const& ce, std::coroutine_handle<> h) { + // Execution context access + { ce.context() } -> std::same_as; + + // Work tracking (must not throw) + { ce.on_work_started() } noexcept; + { ce.on_work_finished() } noexcept; + + // Work submission + { ce.dispatch(h) } -> std::convertible_to>; + { ce.post(h) }; + { ce.defer(h) }; }; - post(callable{std::forward(f), std::forward(handler)}); -} - -template -auto -executor:: -submit(F&& f, Handler&& handler) const -> - typename std::enable_if::type - >::return_type>::value>::type -{ - using result_type = system::result; - - struct callable - { - typename std::decay::type f; - typename std::decay::type handler; - - void operator()() - { - try - { - f(); - handler(result_type()); - } - catch(...) - { - handler(result_type(std::current_exception())); - } - } - }; - - post(callable{std::forward(f), std::forward(handler)}); -} - -#ifdef BOOST_CAPY_HAS_CORO - -template -auto -executor:: -submit(F&& f) const -> - async_op>> - requires (!std::is_void_v>>) -{ - using T = std::invoke_result_t>; - - return make_async_op( - [ex = *this, f = std::forward(f)](auto on_done) mutable - { - ex.post( - [f = std::move(f), - on_done = std::move(on_done)]() mutable - { - on_done(f()); - }); - }); -} - -template -auto -executor:: -submit(F&& f) const -> - async_op - requires std::is_void_v>> -{ - return make_async_op( - [ex = *this, f = std::forward(f)](auto on_done) mutable - { - ex.post( - [f = std::move(f), - on_done = std::move(on_done)]() mutable - { - f(); - on_done(); - }); - }); -} - -#endif - } // capy } // boost diff --git a/include/boost/capy/executor_work_guard.hpp b/include/boost/capy/executor_work_guard.hpp new file mode 100644 index 00000000..7958ad5e --- /dev/null +++ b/include/boost/capy/executor_work_guard.hpp @@ -0,0 +1,253 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie dot falco at gmail dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + +#ifndef BOOST_CAPY_EXECUTOR_WORK_GUARD_HPP +#define BOOST_CAPY_EXECUTOR_WORK_GUARD_HPP + +#include +#include +#include + +#include + +namespace boost { +namespace capy { + +/** RAII guard that keeps an executor's context from completing. + + This class holds "work" on an executor, preventing the associated + execution context's `run()` function from returning due to lack of + work. It calls `on_work_started()` on construction and + `on_work_finished()` on destruction, ensuring proper work tracking. + + The guard is useful when you need to keep an execution context + running while waiting for external events or when work will be + posted later. + + @par RAII Semantics + + @li Construction calls `ex.on_work_started()`. + @li Destruction calls `ex.on_work_finished()` if `owns_work()`. + @li Copy construction creates a new work reference (calls + `on_work_started()` again). + @li Move construction transfers ownership without additional calls. + + @par Thread Safety + + Distinct objects may be accessed concurrently. Access to a single + object requires external synchronization. + + @par Example + @code + io_context ctx; + + // Keep context running while we set things up + auto guard = make_work_guard(ctx); + + std::thread t([&ctx]{ ctx.run(); }); + + // ... post work to ctx ... + + // Allow context to complete when work is done + guard.reset(); + + t.join(); + @endcode + + @tparam Executor A type satisfying the executor concept. + + @see make_work_guard, executor +*/ +template +class executor_work_guard +{ + Executor ex_; + bool owns_; + +public: + /** The underlying executor type. */ + using executor_type = Executor; + + /** Construct a work guard. + + Calls `ex.on_work_started()` to inform the executor that + work is outstanding. + + @par Exception Safety + No-throw guarantee. + + @par Postconditions + @li `owns_work() == true` + @li `get_executor() == ex` + + @param ex The executor to hold work on. Moved into the guard. + */ + explicit + executor_work_guard(Executor ex) noexcept + : ex_(std::move(ex)) + , owns_(true) + { + ex_.on_work_started(); + } + + /** Copy constructor. + + Creates a new work guard holding work on the same executor. + Calls `on_work_started()` on the executor. + + @par Exception Safety + No-throw guarantee. + + @par Postconditions + @li `owns_work() == other.owns_work()` + @li `get_executor() == other.get_executor()` + + @param other The work guard to copy from. + */ + executor_work_guard(executor_work_guard const& other) noexcept + : ex_(other.ex_) + , owns_(other.owns_) + { + if(owns_) + ex_.on_work_started(); + } + + /** Move constructor. + + Transfers work ownership from `other` to `*this`. Does not + call `on_work_started()` or `on_work_finished()`. + + @par Exception Safety + No-throw guarantee. + + @par Postconditions + @li `owns_work()` equals the prior value of `other.owns_work()` + @li `other.owns_work() == false` + + @param other The work guard to move from. + */ + executor_work_guard(executor_work_guard&& other) noexcept + : ex_(std::move(other.ex_)) + , owns_(other.owns_) + { + other.owns_ = false; + } + + /** Destructor. + + If `owns_work()` is `true`, calls `on_work_finished()` on + the executor. + + @par Exception Safety + No-throw guarantee. + */ + ~executor_work_guard() + { + if(owns_) + ex_.on_work_finished(); + } + + executor_work_guard& operator=(executor_work_guard const&) = delete; + + /** Return the underlying executor. + + @par Exception Safety + No-throw guarantee. + + @return A copy of the stored executor. + */ + executor_type + get_executor() const noexcept + { + return ex_; + } + + /** Return whether the guard owns work. + + @par Exception Safety + No-throw guarantee. + + @return `true` if this guard will call `on_work_finished()` + on destruction, `false` otherwise. + */ + bool + owns_work() const noexcept + { + return owns_; + } + + /** Release ownership of the work. + + If `owns_work()` is `true`, calls `on_work_finished()` on + the executor and sets ownership to `false`. Otherwise, has + no effect. + + @par Exception Safety + No-throw guarantee. + + @par Postconditions + @li `owns_work() == false` + */ + void + reset() noexcept + { + if(owns_) + { + ex_.on_work_finished(); + owns_ = false; + } + } +}; + +//------------------------------------------------ + +/** Create a work guard from an executor. + + @par Exception Safety + No-throw guarantee. + + @param ex The executor to create the guard for. + + @return An `executor_work_guard` holding work on `ex`. + + @see executor_work_guard +*/ +template +executor_work_guard +make_work_guard(Executor ex) +{ + return executor_work_guard(std::move(ex)); +} + +/** Create a work guard from an execution context. + + Equivalent to `make_work_guard(ctx.get_executor())`. + + @par Exception Safety + No-throw guarantee. + + @param ctx The execution context to create the guard for. + + @return An `executor_work_guard` holding work on the context's + executor. + + @see executor_work_guard +*/ +template +executor_work_guard +make_work_guard(ExecutionContext& ctx) +{ + return executor_work_guard( + ctx.get_executor()); +} + +} // capy +} // boost + +#endif diff --git a/include/boost/capy/frame_allocator.hpp b/include/boost/capy/frame_allocator.hpp new file mode 100644 index 00000000..181eae11 --- /dev/null +++ b/include/boost/capy/frame_allocator.hpp @@ -0,0 +1,421 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie dot falco at gmail dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + +#ifndef BOOST_CAPY_FRAME_ALLOCATOR_HPP +#define BOOST_CAPY_FRAME_ALLOCATOR_HPP + +#include + +#include +#include +#include +#include + +namespace boost { +namespace capy { + +//---------------------------------------------------------- +// Public API +//---------------------------------------------------------- + +/** A concept for types that can allocate and deallocate coroutine frames. + + Frame allocators must be cheaply copyable handles to an underlying + memory resource (e.g., a pointer to a pool). The framework copies + the allocator into the first coroutine frame for lifetime safety. + + @par Requirements + + Given: + @li `a` a reference to type `A` + @li `p` a `void*` + @li `n` a `std::size_t` + + The following expressions must be valid: + @li `a.allocate(n)` - Returns `void*` + @li `a.deallocate(p, n)` - Returns void + + @tparam A The type to check for frame allocator conformance. +*/ +template +concept frame_allocator = + std::copy_constructible && + requires(A& a, void* p, std::size_t n) { + { a.allocate(n) } -> std::same_as; + { a.deallocate(p, n) }; + }; + +/** A frame allocator that passes through to global new/delete. + + This allocator provides no pooling or recycling—each allocation + goes directly to `::operator new` and each deallocation goes to + `::operator delete`. It serves as a baseline for comparison and + as a fallback when pooling is not desired. +*/ +struct default_frame_allocator +{ + void* allocate(std::size_t n) + { + return ::operator new(n); + } + + void deallocate(void* p, std::size_t) + { + ::operator delete(p); + } +}; + +static_assert(frame_allocator); + +//---------------------------------------------------------- +// Implementation details +//---------------------------------------------------------- + +namespace detail { + +/** Abstract base class for internal frame allocator wrappers. + + This class provides a polymorphic interface used internally + by the frame allocation machinery. User-defined allocators + do not inherit from this class. +*/ +class frame_allocator_base +{ +public: + virtual ~frame_allocator_base() = default; + + /** Allocate memory for a coroutine frame. + + @param n The number of bytes to allocate. + + @return A pointer to the allocated memory. + */ + virtual void* allocate(std::size_t n) = 0; + + /** Deallocate memory for a child coroutine frame. + + @param p Pointer to the memory to deallocate. + @param n The user-requested size (not total allocation). + */ + virtual void deallocate(void* p, std::size_t n) = 0; + + /** Deallocate the first coroutine frame (where this wrapper is embedded). + + This method handles the special case where the wrapper itself + is embedded at the end of the block being deallocated. + + @param block Pointer to the block to deallocate. + @param user_size The user-requested size (not total allocation). + */ + virtual void deallocate_embedded(void* block, std::size_t user_size) = 0; +}; + +// Forward declaration +template +class frame_allocator_wrapper; + +/** Wrapper that embeds a frame_allocator_wrapper in the first allocation. + + This wrapper lives on the stack (in async_run_awaitable) and is used only + for the FIRST coroutine frame allocation. It embeds a copy of + frame_allocator_wrapper at the end of the allocated block, then + updates TLS to point to that embedded wrapper for subsequent + allocations. + + @tparam Allocator The underlying allocator type satisfying frame_allocator. +*/ +template +class embedding_frame_allocator : public frame_allocator_base +{ + Allocator alloc_; + + static constexpr std::size_t alignment = alignof(void*); + + static_assert( + alignof(frame_allocator_wrapper) <= alignment, + "alignment must be at least as strict as wrapper alignment"); + + static std::size_t + aligned_offset(std::size_t n) noexcept + { + return (n + alignment - 1) & ~(alignment - 1); + } + +public: + explicit embedding_frame_allocator(Allocator a) + : alloc_(std::move(a)) + { + } + + void* + allocate(std::size_t n) override; + + void + deallocate(void*, std::size_t) override + { + // Never called - stack wrapper not used for deallocation + } + + void + deallocate_embedded(void*, std::size_t) override + { + // Never called + } +}; + +/** Wrapper embedded in the first coroutine frame. + + This wrapper is constructed at the end of the first coroutine + frame by embedding_frame_allocator. It handles all subsequent + allocations (storing a pointer to itself) and all deallocations. + + @tparam Allocator The underlying allocator type satisfying frame_allocator. +*/ +template +class frame_allocator_wrapper : public frame_allocator_base +{ + Allocator alloc_; + + static constexpr std::size_t alignment = alignof(void*); + + static std::size_t + aligned_offset(std::size_t n) noexcept + { + return (n + alignment - 1) & ~(alignment - 1); + } + +public: + explicit frame_allocator_wrapper(Allocator a) + : alloc_(std::move(a)) + { + } + + void* + allocate(std::size_t n) override + { + // Layout: [frame | ptr] + std::size_t ptr_offset = aligned_offset(n); + std::size_t total = ptr_offset + sizeof(frame_allocator_base*); + + void* raw = alloc_.allocate(total); + + // Store untagged pointer to self at fixed offset + auto* ptr_loc = reinterpret_cast( + static_cast(raw) + ptr_offset); + *ptr_loc = this; + + return raw; + } + + void + deallocate(void* block, std::size_t user_size) override + { + // Child frame deallocation: layout is [frame | ptr] + std::size_t ptr_offset = aligned_offset(user_size); + std::size_t total = ptr_offset + sizeof(frame_allocator_base*); + alloc_.deallocate(block, total); + } + + void + deallocate_embedded(void* block, std::size_t user_size) override + { + // First frame deallocation: layout is [frame | ptr | wrapper] + std::size_t ptr_offset = aligned_offset(user_size); + std::size_t wrapper_offset = ptr_offset + sizeof(frame_allocator_base*); + std::size_t total = wrapper_offset + sizeof(frame_allocator_wrapper); + + Allocator alloc_copy = alloc_; // Copy before destroying self + this->~frame_allocator_wrapper(); + alloc_copy.deallocate(block, total); + } +}; + +} // namespace detail + +/** Mixin base for promise types to support custom frame allocation. + + Derive your promise_type from this class to enable custom coroutine + frame allocation via a thread-local allocator pointer. + + The allocation strategy: + @li If a thread-local allocator is set, use it for allocation + @li Otherwise, fall back to global `::operator new`/`::operator delete` + + A pointer is stored at the end of each allocation to enable correct + deallocation regardless of which allocator was active at allocation time. + + @par Memory Layout + + For the first coroutine frame (allocated via embedding_frame_allocator): + @code + [coroutine frame | tagged_ptr | frame_allocator_wrapper] + @endcode + + For subsequent frames (allocated via frame_allocator_wrapper): + @code + [coroutine frame | ptr] + @endcode + + The tag bit (low bit) distinguishes the two cases during deallocation. + + @see frame_allocator +*/ +struct frame_allocating_base +{ +private: + static constexpr std::size_t alignment = alignof(void*); + + static std::size_t + aligned_offset(std::size_t n) noexcept + { + return (n + alignment - 1) & ~(alignment - 1); + } + + static detail::frame_allocator_base*& + current_allocator() noexcept + { + static thread_local detail::frame_allocator_base* alloc = nullptr; + return alloc; + } + +public: + /** Set the thread-local frame allocator. + + The allocator will be used for subsequent coroutine frame + allocations on this thread until changed or cleared. + + @param alloc The allocator to use. Must outlive all coroutines + allocated with it. + */ + static void + set_frame_allocator(detail::frame_allocator_base& alloc) noexcept + { + current_allocator() = &alloc; + } + + /** Clear the thread-local frame allocator. + + Subsequent allocations will use global `::operator new`. + */ + static void + clear_frame_allocator() noexcept + { + current_allocator() = nullptr; + } + + /** Get the current thread-local frame allocator. + + @return Pointer to current allocator, or nullptr if none set. + */ + static detail::frame_allocator_base* + get_frame_allocator() noexcept + { + return current_allocator(); + } + + static void* + operator new(std::size_t size) + { + auto* alloc = current_allocator(); + if(!alloc) + { + // No allocator: allocate extra space for null pointer marker + std::size_t ptr_offset = aligned_offset(size); + std::size_t total = ptr_offset + sizeof(detail::frame_allocator_base*); + void* raw = ::operator new(total); + + // Store nullptr to indicate global new/delete + auto* ptr_loc = reinterpret_cast( + static_cast(raw) + ptr_offset); + *ptr_loc = nullptr; + + return raw; + } + return alloc->allocate(size); + } + + /** Deallocate a coroutine frame. + + Reads the pointer stored at the end of the frame to find + the allocator. The tag bit (low bit) indicates whether + this is the first frame (with embedded wrapper) or a + child frame (with pointer to external wrapper). + + A null pointer indicates the frame was allocated with + global new/delete (no custom allocator was active). + */ + static void + operator delete(void* ptr, std::size_t size) + { + // Pointer is always at aligned_offset(size) + std::size_t ptr_offset = aligned_offset(size); + auto* ptr_loc = reinterpret_cast( + static_cast(ptr) + ptr_offset); + auto raw_ptr = reinterpret_cast(*ptr_loc); + + // Null pointer means global new/delete + if(raw_ptr == 0) + { + std::size_t total = ptr_offset + sizeof(detail::frame_allocator_base*); + ::operator delete(ptr, total); + return; + } + + // Tag bit distinguishes first frame (embedded) from child frames + bool is_embedded = raw_ptr & 1; + auto* wrapper = reinterpret_cast( + raw_ptr & ~std::uintptr_t(1)); + + if(is_embedded) + wrapper->deallocate_embedded(ptr, size); + else + wrapper->deallocate(ptr, size); + } +}; + +//---------------------------------------------------------- +// embedding_frame_allocator implementation +// (must come after frame_allocating_base is defined) +//---------------------------------------------------------- + +namespace detail { + +template +void* +embedding_frame_allocator::allocate(std::size_t n) +{ + // Layout: [frame | ptr | wrapper] + std::size_t ptr_offset = aligned_offset(n); + std::size_t wrapper_offset = ptr_offset + sizeof(frame_allocator_base*); + std::size_t total = wrapper_offset + sizeof(frame_allocator_wrapper); + + void* raw = alloc_.allocate(total); + + // Construct embedded wrapper after the pointer + auto* wrapper_loc = static_cast(raw) + wrapper_offset; + auto* embedded = new (wrapper_loc) frame_allocator_wrapper(alloc_); + + // Store tagged pointer at fixed offset (bit 0 set = embedded) + auto* ptr_loc = reinterpret_cast( + static_cast(raw) + ptr_offset); + *ptr_loc = reinterpret_cast( + reinterpret_cast(embedded) | 1); + + // Update TLS to embedded wrapper for subsequent allocations + frame_allocating_base::set_frame_allocator(*embedded); + + return raw; +} + +} // namespace detail + +} // namespace capy +} // namespace boost + +#endif diff --git a/include/boost/capy/intrusive_list.hpp b/include/boost/capy/intrusive_list.hpp new file mode 100644 index 00000000..b35d43d0 --- /dev/null +++ b/include/boost/capy/intrusive_list.hpp @@ -0,0 +1,196 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie dot falco at gmail dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + +#ifndef BOOST_CAPY_INTRUSIVE_LIST_HPP +#define BOOST_CAPY_INTRUSIVE_LIST_HPP + +#include + +namespace boost { +namespace capy { + +/** An intrusive doubly linked list. + + This container provides O(1) push and pop operations for + elements that derive from @ref node. Elements are not + copied or moved; they are linked directly into the list. + + @par Usage + @code + struct my_item : intrusive_list::node + { + // user data + }; + + using item_list = intrusive_list; + + my_item item; + item_list q; + q.push_back(&item); + my_item* p = q.pop_front(); // p == &item + @endcode + + @tparam T The element type. Must derive from `intrusive_list::node`. +*/ +template +class intrusive_list +{ +public: + /** Base class for list elements. + + Derive from this class to make a type usable with + @ref intrusive_list. The `next_` and `prev_` pointers + are private and accessible only to the list. + */ + class node + { + friend class intrusive_list; + + private: + T* next_; + T* prev_; + }; + +private: + T* head_ = nullptr; + T* tail_ = nullptr; + +public: + /** Default constructor. + + Creates an empty list. + + @post `empty() == true` + */ + intrusive_list() = default; + + /** Move constructor. + + Takes ownership of all elements from `other`, + leaving `other` empty. + + @param other The list to move from. + + @post `other.empty() == true` + */ + intrusive_list(intrusive_list&& other) noexcept + : head_(other.head_) + , tail_(other.tail_) + { + other.head_ = nullptr; + other.tail_ = nullptr; + } + + intrusive_list(intrusive_list const&) = delete; + intrusive_list& operator=(intrusive_list const&) = delete; + intrusive_list& operator=(intrusive_list&&) = delete; + + /** Return true if the list is empty. + + @return `true` if the list contains no elements. + */ + bool + empty() const noexcept + { + return head_ == nullptr; + } + + /** Add an element to the back of the list. + + @param w Pointer to the element to add. + + @pre `w` is not null and not already in a list. + */ + void + push_back(T* w) noexcept + { + w->next_ = nullptr; + w->prev_ = tail_; + if(tail_) + tail_->next_ = w; + else + head_ = w; + tail_ = w; + } + + /** Splice all elements from another list to the back. + + All elements from `other` are moved to the back of this + list. After this call, `other` is empty. + + @param other The list to splice from. + + @post `other.empty() == true` + */ + void + splice_back(intrusive_list& other) noexcept + { + if(other.empty()) + return; + if(tail_) + { + tail_->next_ = other.head_; + other.head_->prev_ = tail_; + tail_ = other.tail_; + } + else + { + head_ = other.head_; + tail_ = other.tail_; + } + other.head_ = nullptr; + other.tail_ = nullptr; + } + + /** Remove and return the front element. + + @return Pointer to the front element, or `nullptr` + if the list is empty. + */ + T* + pop_front() noexcept + { + if(!head_) + return nullptr; + T* w = head_; + head_ = head_->next_; + if(head_) + head_->prev_ = nullptr; + else + tail_ = nullptr; + return w; + } + + /** Remove a specific element from the list. + + Unlinks the given element from its current position + in the list. The element must be a member of this list. + + @param w Pointer to the element to remove. + + @pre `w` is not null and is currently in this list. + */ + void + remove(T* w) noexcept + { + if(w->prev_) + w->prev_->next_ = w->next_; + else + head_ = w->next_; + if(w->next_) + w->next_->prev_ = w->prev_; + else + tail_ = w->prev_; + } +}; + +} // capy +} // boost + +#endif diff --git a/include/boost/capy/intrusive_queue.hpp b/include/boost/capy/intrusive_queue.hpp new file mode 100644 index 00000000..c7f71b82 --- /dev/null +++ b/include/boost/capy/intrusive_queue.hpp @@ -0,0 +1,170 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie dot falco at gmail dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + +#ifndef BOOST_CAPY_INTRUSIVE_QUEUE_HPP +#define BOOST_CAPY_INTRUSIVE_QUEUE_HPP + +#include + +namespace boost { +namespace capy { + +/** An intrusive singly linked FIFO queue. + + This container provides O(1) push and pop operations for + elements that derive from @ref node. Elements are not + copied or moved; they are linked directly into the queue. + + Unlike @ref intrusive_list, this uses only a single `next_` + pointer per node, saving memory at the cost of not supporting + O(1) removal of arbitrary elements. + + @par Usage + @code + struct my_item : intrusive_queue::node + { + // user data + }; + + using item_queue = intrusive_queue; + + my_item item; + item_queue q; + q.push(&item); + my_item* p = q.pop(); // p == &item + @endcode + + @tparam T The element type. Must derive from `intrusive_queue::node`. + + @see intrusive_list +*/ +template +class intrusive_queue +{ +public: + /** Base class for queue elements. + + Derive from this class to make a type usable with + @ref intrusive_queue. The `next_` pointer is private + and accessible only to the queue. + */ + class node + { + friend class intrusive_queue; + + private: + T* next_; + }; + +private: + T* head_ = nullptr; + T* tail_ = nullptr; + +public: + /** Default constructor. + + Creates an empty queue. + + @post `empty() == true` + */ + intrusive_queue() = default; + + /** Move constructor. + + Takes ownership of all elements from `other`, + leaving `other` empty. + + @param other The queue to move from. + + @post `other.empty() == true` + */ + intrusive_queue(intrusive_queue&& other) noexcept + : head_(other.head_) + , tail_(other.tail_) + { + other.head_ = nullptr; + other.tail_ = nullptr; + } + + intrusive_queue(intrusive_queue const&) = delete; + intrusive_queue& operator=(intrusive_queue const&) = delete; + intrusive_queue& operator=(intrusive_queue&&) = delete; + + /** Return true if the queue is empty. + + @return `true` if the queue contains no elements. + */ + bool + empty() const noexcept + { + return head_ == nullptr; + } + + /** Add an element to the back of the queue. + + @param w Pointer to the element to add. + + @pre `w` is not null and not already in a queue. + */ + void + push(T* w) noexcept + { + w->next_ = nullptr; + if(tail_) + tail_->next_ = w; + else + head_ = w; + tail_ = w; + } + + /** Splice all elements from another queue to the back. + + All elements from `other` are moved to the back of this + queue. After this call, `other` is empty. + + @param other The queue to splice from. + + @post `other.empty() == true` + */ + void + splice(intrusive_queue& other) noexcept + { + if(other.empty()) + return; + if(tail_) + tail_->next_ = other.head_; + else + head_ = other.head_; + tail_ = other.tail_; + other.head_ = nullptr; + other.tail_ = nullptr; + } + + /** Remove and return the front element. + + @return Pointer to the front element, or `nullptr` + if the queue is empty. + */ + T* + pop() noexcept + { + if(!head_) + return nullptr; + T* w = head_; + head_ = head_->next_; + if(!head_) + tail_ = nullptr; + return w; + } +}; + +} // capy +} // boost + +#endif diff --git a/include/boost/capy/make_affine.hpp b/include/boost/capy/make_affine.hpp new file mode 100644 index 00000000..5545c23a --- /dev/null +++ b/include/boost/capy/make_affine.hpp @@ -0,0 +1,24 @@ +// +// make_affine.hpp +// +// Universal trampoline technique for providing scheduler affinity +// to legacy awaitables that don't implement the affine awaitable protocol. +// +// +// Copyright (c) 2025 Vinnie Falco (vinnie dot falco at gmail dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/corosio +// + +#ifndef BOOST_CAPY_MAKE_AFFINE_HPP +#define BOOST_CAPY_MAKE_AFFINE_HPP + +// This header now just re-exports make_affine from affine.hpp +// where the implementation lives to avoid duplicate definitions. + +#include + +#endif // BOOST_CAPY_MAKE_AFFINE_HPP diff --git a/include/boost/capy/path.hpp b/include/boost/capy/path.hpp new file mode 100644 index 00000000..e81bbe96 --- /dev/null +++ b/include/boost/capy/path.hpp @@ -0,0 +1,1389 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie.falco@gmail.com) +// +// Distributed under the Boost Software License, Version 1.0. +// https://www.boost.org/LICENSE_1_0.txt +// + +/******************************************************************************* + + IMPLEMENTATION NOTES + + This header declares `path` and `path_view` classes that follow the + "UTF-8 Everywhere" manifesto (https://utf8everywhere.org/). The design + solves the interface friction present in std::filesystem::path. + +================================================================================ +DESIGN PHILOSOPHY +================================================================================ + + 1. UTF-8 Everywhere + - All strings are UTF-8 encoded, using `char` and `std::string` + - No wchar_t, char8_t, char16_t, or char32_t in the public interface + - Conversion to native wide strings (Windows) happens at API boundaries + + 2. Universal Path Format + - Internal storage always uses forward slashes as separators + - "C:\foo\bar" is stored as "C:/foo/bar" + - This enables cross-platform serialization to files, JSON, databases + - Windows APIs accept forward slashes for most operations anyway + + 3. Simple Invariant + - ALL path and path_view objects hold syntactically valid paths + - Validation happens at construction time + - Once constructed, the path is guaranteed valid + - Empty string is a valid path (consistent with std::filesystem::path) + + 4. Two Error Handling Strategies + - Constructors throw system_error on invalid input + - Free functions return system::result for error-code style + +================================================================================ +VALIDITY RULES +================================================================================ + + A path is valid if and only if: + + 1. UTF-8 Well-Formed + - No invalid UTF-8 sequences + - No overlong encodings + - No surrogate code points (U+D800 to U+DFFF) + + 2. No Embedded NUL + - The string must not contain '\0' except as terminator + + 3. No Platform-Illegal Characters in Filenames + - Reject characters illegal on ANY platform (union of restrictions) + - Windows forbids: < > : " / \ | ? * + - Forward slash is the separator, not illegal + - Colon is allowed only in drive letter position (e.g., "C:") + - These characters in the path component (filename) are rejected: + * (asterisk) + ? (question mark) + < (less than) + > (greater than) + | (pipe) + " (double quote) + - Control characters (0x00-0x1F) are rejected + + 4. No Trailing Dots or Spaces in Components (Windows restriction) + - "foo." and "foo " are invalid as filename components + - This prevents paths that silently fail on Windows + + Validation function signature (internal): + + // Returns error_code, empty on success + system::error_code validate_path(std::string_view s) noexcept; + +================================================================================ +INTERNAL STORAGE +================================================================================ + + class path: + - Stores: std::string s_ + - The string uses forward slashes exclusively + - Null-terminated (std::string guarantees this) + + class path_view: + - Stores: char const* data_, std::size_t size_ + - Points into valid UTF-8 path data + - NOT necessarily null-terminated + +================================================================================ +CONSTRUCTION AND PARSING +================================================================================ + + path_view Construction: + ----------------------- + 1. path_view() noexcept + - Default constructs empty path_view (data_=nullptr, size_=0) + + 2. path_view(std::string_view s) + - Validate s + - If invalid, throw system_error with appropriate error_code + - If valid, store pointer and size + + 3. path_view(char const* s) + - Equivalent to path_view(std::string_view(s)) + + 4. path_view(unchecked_t, char const* data, std::size_t size) [private] + - Used by path class to create views without re-validation + - Caller guarantees validity + + path Construction: + ------------------ + 1. path() noexcept + - Default constructs empty path + + 2. path(std::string_view s) + - Validate s + - If invalid, throw system_error + - If valid, copy into s_ + + 3. path(char const* s) + - Equivalent to path(std::string_view(s)) + + 4. path(std::string&& s) + - Validate s + - If invalid, throw system_error (string not moved) + - If valid, move into s_ + + 5. path(path_view pv) + - Copy pv's data into s_ + - No validation needed (path_view is already valid) + + 6. path(unchecked_t, std::string&& s) [private] + - Move without validation + - Used internally when validity is already established + + Free Function Factories: + ------------------------ + system::result try_parse_path_view(std::string_view s) noexcept + system::result try_parse_path_view(char const* s) noexcept + - Validate s + - Return path_view on success + - Return error_code on failure + + system::result try_parse_path(std::string_view s) noexcept + system::result try_parse_path(char const* s) noexcept + system::result try_parse_path(std::string&& s) noexcept + - Validate s + - Return path on success (moving string in rvalue overload) + - Return error_code on failure + +================================================================================ +ERROR CODES +================================================================================ + + Define a custom error category for path errors: + + enum class path_error + { + invalid_utf8 = 1, // Malformed UTF-8 sequence + embedded_null, // NUL character in path + illegal_character, // Character not allowed in filenames + invalid_drive_spec, // Malformed drive specification + trailing_dot_or_space, // Component ends with dot or space + }; + + Implement error_category for path_error to integrate with system::error_code. + +================================================================================ +NATIVE CONVERSION +================================================================================ + + On POSIX: + --------- + native_string() returns a copy of the internal string (already native). + native_size() returns s_.size(). + to_native(span) copies s_ into the buffer. + + On Windows: + ----------- + native_string() returns a copy with '/' replaced by '\'. + native_wstring() converts UTF-8 to UTF-16 and '/' to '\'. + + native_size() returns s_.size() (same length, just different chars). + native_wsize() returns the number of wchar_t needed for UTF-16. + - This requires scanning the UTF-8 to count code points + - Non-BMP code points require 2 wchar_t (surrogate pair) + + to_native(span) copies with separator replacement. + to_native(span) converts UTF-8 to UTF-16 with separator replacement. + + UTF-8 to UTF-16 Conversion Notes: + - Use MultiByteToWideChar on Windows or manual conversion + - Each UTF-8 code point maps to 1 or 2 UTF-16 code units + - ASCII (including '/') maps 1:1 + + Cost Analysis: + - native_string() on POSIX: O(n) copy, no transformation + - native_string() on Windows: O(n) copy with in-place transform + - native_wstring() on Windows: O(n) allocation + UTF-8 to UTF-16 conversion + - These costs are acceptable because filesystem APIs are I/O bound + +================================================================================ +PATH DECOMPOSITION +================================================================================ + + All decomposition functions return path_view pointing into the original + path's storage. They do NOT allocate. + + Components of a path (using "C:/foo/bar/baz.txt" as example): + + root_name() -> "C:" (drive letter on Windows, empty on POSIX) + root_directory() -> "/" (the separator after root_name) + root_path() -> "C:/" (root_name + root_directory) + relative_path() -> "foo/bar/baz.txt" + parent_path() -> "C:/foo/bar" + filename() -> "baz.txt" + stem() -> "baz" + extension() -> ".txt" (includes the dot) + + For POSIX path "/foo/bar/baz.txt": + root_name() -> "" (empty) + root_directory() -> "/" + root_path() -> "/" + relative_path() -> "foo/bar/baz.txt" + parent_path() -> "/foo/bar" + filename() -> "baz.txt" + stem() -> "baz" + extension() -> ".txt" + + Edge cases: + - filename() of "/" is empty + - filename() of "/foo/" is empty + - filename() of "/foo/." is "." + - filename() of "/foo/.." is ".." + - extension() of ".gitignore" is "" (filename is all stem) + - extension() of "archive.tar.gz" is ".gz" + - parent_path() of "/" is "/" + - parent_path() of "foo" is "" + + Implementation approach: + - Parse from right to left for filename/extension + - Parse from left to right for root_name/root_directory + - Return path_view using unchecked constructor (substrings of valid + paths are valid) + +================================================================================ +PATH MODIFICATION +================================================================================ + + append(path_view p) / operator/=(path_view p): + ---------------------------------------------- + - If p is absolute, replace *this with p + - If p has root_name different from *this, replace *this with p + - Otherwise, append separator (if needed) and p + - The result is always valid (both inputs are valid) + + concat(std::string_view s) / operator+=(std::string_view s): + ------------------------------------------------------------ + - Append s directly without separator + - MUST re-validate the result (s is arbitrary string) + - Throw system_error if result is invalid + + remove_filename(): + ------------------ + - Remove everything after the last separator + - "/foo/bar" -> "/foo/" + - "/foo/bar/" -> "/foo/bar/" (already no filename) + - "bar" -> "" + + replace_filename(path_view replacement): + ---------------------------------------- + - Equivalent to: remove_filename(); append(replacement); + - Throw if result invalid (shouldn't happen if replacement is valid) + + replace_extension(path_view replacement): + ----------------------------------------- + - Remove current extension (if any) + - If replacement is not empty and doesn't start with '.', add '.' + - Append replacement + - Validate and throw if invalid + +================================================================================ +PATH GENERATION +================================================================================ + + lexically_normal(): + ------------------- + - Remove redundant separators ("foo//bar" -> "foo/bar") + - Remove "." components ("foo/./bar" -> "foo/bar") + - Remove ".." and preceding component ("foo/bar/../baz" -> "foo/baz") + - Preserve leading ".." ("../foo" stays "../foo") + - Preserve root ("/../foo" -> "/foo", not "../foo") + - Return new path (does not modify *this) + + lexically_relative(path_view base): + ----------------------------------- + - Return a relative path from base to *this + - Both paths should be normalized first (internally) + - If no relative path exists, return empty path + - Example: "/a/b/c".lexically_relative("/a/d") -> "../b/c" + + lexically_proximate(path_view base): + ------------------------------------ + - Same as lexically_relative, but return *this if no relative path exists + - Never returns empty path + +================================================================================ +COMPONENT ITERATION (path::iterator) +================================================================================ + + The iterator yields path_view for each component. + + For "C:/foo/bar": + *it++ -> "C:" + *it++ -> "/" + *it++ -> "foo" + *it++ -> "bar" + it == end() + + For "/foo/bar": + *it++ -> "/" + *it++ -> "foo" + *it++ -> "bar" + it == end() + + For "foo/bar": + *it++ -> "foo" + *it++ -> "bar" + it == end() + + Implementation: + - Store pointer to path, current position, current component length + - operator* returns path_view(unchecked_t{}, p_->data() + pos_, len_) + - operator++ scans forward to find next component + - operator-- scans backward to find previous component + - begin() initializes to first component + - end() has pos_ == p_->size() (past-the-end position) + + Bidirectional iterator requirements must be satisfied. + +================================================================================ +SEGMENT ITERATION (path::segment_range) +================================================================================ + + Similar to component iteration but yields std::string_view instead of + path_view. This is lighter weight for code that just needs string data. + + The segments are the same as components, just different return type. + + Implementation is identical to path::iterator but operator* returns + std::string_view instead of path_view. + +================================================================================ +COMPARISON AND HASHING +================================================================================ + + Comparison: + ----------- + - Lexicographic comparison of the underlying strings + - Case-sensitive (even on Windows - normalization is separate concern) + - compare() returns negative/zero/positive int + - operator== and operator<=> delegate to string comparison + + Hashing: + -------- + - hash_value() uses std::hash on the path data + - Must produce same hash for path and path_view with same content + - std::hash specializations delegate to hash_value() + + Implementation: + std::size_t hash_value(path const& p) noexcept { + return std::hash{}(p.string_view()); + } + std::size_t hash_value(path_view p) noexcept { + return std::hash{}(p.string_view()); + } + +================================================================================ +STREAM OPERATORS +================================================================================ + + operator<<(ostream&, path const&): + ---------------------------------- + - Output the path string directly + - Use quoted format if path contains spaces? (design choice) + - Simplest: os << p.string(); + + operator>>(istream&, path&): + ---------------------------- + - Read a string from the stream + - Validate and assign + - Set failbit if validation fails + +================================================================================ +NON-MEMBER OPERATORS +================================================================================ + + path operator/(path const& lhs, path_view rhs): + path result(lhs); + result /= rhs; + return result; + + path operator/(path&& lhs, path_view rhs): + lhs /= rhs; + return std::move(lhs); + +================================================================================ +THREAD SAFETY +================================================================================ + + - path and path_view have value semantics + - No shared mutable state + - Safe to use different instances in different threads + - Concurrent reads of same instance are safe + - Concurrent read/write of same instance requires external synchronization + +================================================================================ +EXCEPTION SAFETY +================================================================================ + + - Constructors: Strong guarantee (throw or succeed, no side effects) + - Modifiers: Strong guarantee where possible + - concat() may throw after modifying if validation fails - consider + validating first, then modifying (strong guarantee) + - Decomposition/query functions: noexcept (never throw) + +================================================================================ +DIFFERENCES FROM std::filesystem::path +================================================================================ + + 1. No encoding ambiguity + - Always UTF-8, no platform-dependent interpretation + - No u8string(), u16string(), u32string() - just string() + + 2. No implicit conversions + - Explicit constructors prevent silent conversions + - No implicit conversion to native string types + + 3. Clear separator handling + - Always forward slash internally + - Native conversion is explicit + + 4. Validation at construction + - Invalid paths cannot exist + - No need to check validity later + + 5. path_view for non-owning references + - Similar relationship as string/string_view + - Same invariant guarantee + + 6. Error handling choice + - Constructor throws (like std::filesystem::path) + - Free function returns result (for error code preference) + +*******************************************************************************/ + +#ifndef NET_PATH_HPP +#define NET_PATH_HPP + +#include + +#include +#include +#include +#include +#include +#include + +namespace net { + +namespace system = boost::system; + +// Forward declarations +class path; +class path_view; + +//------------------------------------------------------------------------------ + +/** A non-owning reference to a valid path string. + + Invariant: The referenced string is a syntactically valid path. + The path uses forward slashes as separators (universal format). + The string is UTF-8 encoded. +*/ +class path_view +{ + char const* data_ = nullptr; + std::size_t size_ = 0; + +public: + using value_type = char; + using const_iterator = char const*; + using iterator = const_iterator; + using size_type = std::size_t; + + /** Default constructor (empty path). + */ + path_view() noexcept = default; + + /** Construct from string_view, validates and throws on invalid. + + @throws system_error on invalid path + */ + explicit path_view(std::string_view s); + + /** Construct from null-terminated string, validates and throws on invalid. + + @throws system_error on invalid path + */ + explicit path_view(char const* s); + + //-------------------------------------------- + // + // Observers + // + //-------------------------------------------- + + /** Return a pointer to the path data. + */ + char const* + data() const noexcept + { + return data_; + } + + /** Return the size of the path in bytes. + */ + std::size_t + size() const noexcept + { + return size_; + } + + /** Return true if the path is empty. + */ + bool + empty() const noexcept + { + return size_ == 0; + } + + /** Return an iterator to the beginning. + */ + const_iterator + begin() const noexcept + { + return data_; + } + + /** Return an iterator to the end. + */ + const_iterator + end() const noexcept + { + return data_ + size_; + } + + //-------------------------------------------- + // + // String access + // + //-------------------------------------------- + + /** Return the path as a string_view. + */ + std::string_view + string_view() const noexcept + { + return { data_, size_ }; + } + + /** Return the path as a string. + */ + std::string + string() const + { + return { data_, size_ }; + } + + //-------------------------------------------- + // + // Path decomposition + // + //-------------------------------------------- + + /** Return the root name (e.g., "C:" on Windows). + */ + path_view root_name() const noexcept; + + /** Return the root directory (e.g., "/"). + */ + path_view root_directory() const noexcept; + + /** Return the root path (root_name + root_directory). + */ + path_view root_path() const noexcept; + + /** Return the path relative to the root. + */ + path_view relative_path() const noexcept; + + /** Return the parent path. + */ + path_view parent_path() const noexcept; + + /** Return the filename component. + */ + path_view filename() const noexcept; + + /** Return the stem (filename without extension). + */ + path_view stem() const noexcept; + + /** Return the extension (including the dot). + */ + path_view extension() const noexcept; + + //-------------------------------------------- + // + // Query + // + //-------------------------------------------- + + /** Return true if the path is absolute. + */ + bool is_absolute() const noexcept; + + /** Return true if the path is relative. + */ + bool is_relative() const noexcept; + + /** Return true if the path has a root name. + */ + bool has_root_name() const noexcept; + + /** Return true if the path has a root directory. + */ + bool has_root_directory() const noexcept; + + /** Return true if the path has a root path. + */ + bool has_root_path() const noexcept; + + /** Return true if the path has a relative path. + */ + bool has_relative_path() const noexcept; + + /** Return true if the path has a parent path. + */ + bool has_parent_path() const noexcept; + + /** Return true if the path has a filename. + */ + bool has_filename() const noexcept; + + /** Return true if the path has a stem. + */ + bool has_stem() const noexcept; + + /** Return true if the path has an extension. + */ + bool has_extension() const noexcept; + + //-------------------------------------------- + // + // Comparison + // + //-------------------------------------------- + + /** Compare this path to another. + + @return Negative if this < other, zero if equal, positive if this > other + */ + int compare(path_view other) const noexcept; + + /** Return true if two paths are equal. + */ + friend bool + operator==(path_view lhs, path_view rhs) noexcept; + + /** Three-way comparison of two paths. + */ + friend std::strong_ordering + operator<=>(path_view lhs, path_view rhs) noexcept; + +private: + friend class path; + + // Private unchecked constructor for use by path + struct unchecked_t {}; + + path_view( + unchecked_t, + char const* data, + std::size_t size) noexcept + : data_(data) + , size_(size) + { + } +}; + +/** Parse a string as a path_view without throwing. + + @return The path_view on success, or an error_code on failure +*/ +system::result +try_parse_path_view(std::string_view s) noexcept; + +/** Parse a null-terminated string as a path_view without throwing. + + @return The path_view on success, or an error_code on failure +*/ +system::result +try_parse_path_view(char const* s) noexcept; + +//------------------------------------------------------------------------------ + +/** An owning, mutable path string. + + Invariant: The string is a syntactically valid path. + The path uses forward slashes as separators (universal format). + The string is UTF-8 encoded. +*/ +class path +{ + std::string s_; + +public: + using value_type = char; + using string_type = std::string; + using size_type = std::size_t; + + class iterator; + using const_iterator = iterator; + + //-------------------------------------------- + // + // Construction + // + //-------------------------------------------- + + /** Default constructor (empty path). + */ + path() noexcept = default; + + /** Copy constructor. + */ + path(path const&) = default; + + /** Move constructor. + */ + path(path&&) noexcept = default; + + /** Copy assignment. + */ + path& operator=(path const&) = default; + + /** Move assignment. + */ + path& operator=(path&&) noexcept = default; + + /** Construct from string_view, validates and throws on invalid. + + @throws system_error on invalid path + */ + explicit path(std::string_view s); + + /** Construct from null-terminated string, validates and throws on invalid. + + @throws system_error on invalid path + */ + explicit path(char const* s); + + /** Construct from string (moves if valid), validates and throws on invalid. + + @throws system_error on invalid path + */ + explicit path(std::string&& s); + + /** Construct from path_view. + */ + path(path_view pv); + + //-------------------------------------------- + // + // Conversion + // + //-------------------------------------------- + + /** Convert to path_view. + */ + operator path_view() const noexcept; + + /** Return a path_view of this path. + */ + path_view + view() const noexcept + { + return path_view( + path_view::unchecked_t{}, + s_.data(), + s_.size()); + } + + //-------------------------------------------- + // + // String access (UTF-8, universal separators) + // + //-------------------------------------------- + + /** Return a null-terminated string. + */ + char const* + c_str() const noexcept + { + return s_.c_str(); + } + + /** Return a reference to the underlying string. + */ + std::string const& + string() const noexcept + { + return s_; + } + + /** Return a string_view of the path. + */ + std::string_view + string_view() const noexcept + { + return s_; + } + + //-------------------------------------------- + // + // Native format conversion + // + //-------------------------------------------- + + /** Return the path in native format. + + On POSIX, this returns a copy of the internal string. + On Windows, this converts forward slashes to backslashes. + */ + std::string native_string() const; + +#ifdef _WIN32 + /** Return the path as a wide string in native format. + + Converts UTF-8 to UTF-16 and forward slashes to backslashes. + */ + std::wstring native_wstring() const; +#endif + + /** Return the size needed for native_string(). + */ + std::size_t + native_size() const noexcept + { + return s_.size(); + } + + /** Convert to native format into a caller-provided buffer. + + @param out The output buffer + */ + void to_native(std::span out) const; + +#ifdef _WIN32 + /** Return the size needed for native_wstring(). + */ + std::size_t native_wsize() const noexcept; + + /** Convert to native wide format into a caller-provided buffer. + + @param out The output buffer + */ + void to_native(std::span out) const; +#endif + + //-------------------------------------------- + // + // Modifiers + // + //-------------------------------------------- + + /** Clear the path. + */ + void + clear() noexcept + { + s_.clear(); + } + + /** Swap with another path. + */ + void + swap(path& other) noexcept + { + s_.swap(other.s_); + } + + /** Append a path component with separator. + + @throws system_error if the result is invalid + */ + path& append(path_view p); + + /** Concatenate without separator (re-validates). + + @throws system_error if the result is invalid + */ + path& concat(std::string_view s); + + /** Append a path component with separator. + + @throws system_error if the result is invalid + */ + path& + operator/=(path_view p) + { + return append(p); + } + + /** Concatenate without separator (re-validates). + + @throws system_error if the result is invalid + */ + path& + operator+=(std::string_view s) + { + return concat(s); + } + + /** Remove the filename component. + */ + path& remove_filename(); + + /** Replace the filename component. + + @throws system_error if the result is invalid + */ + path& replace_filename(path_view replacement); + + /** Replace the extension. + + @throws system_error if the result is invalid + */ + path& replace_extension(path_view replacement = {}); + + //-------------------------------------------- + // + // Decomposition (return views into this path) + // + //-------------------------------------------- + + /** Return the root name (e.g., "C:" on Windows). + */ + path_view root_name() const noexcept; + + /** Return the root directory (e.g., "/"). + */ + path_view root_directory() const noexcept; + + /** Return the root path (root_name + root_directory). + */ + path_view root_path() const noexcept; + + /** Return the path relative to the root. + */ + path_view relative_path() const noexcept; + + /** Return the parent path. + */ + path_view parent_path() const noexcept; + + /** Return the filename component. + */ + path_view filename() const noexcept; + + /** Return the stem (filename without extension). + */ + path_view stem() const noexcept; + + /** Return the extension (including the dot). + */ + path_view extension() const noexcept; + + //-------------------------------------------- + // + // Generation (return new paths) + // + //-------------------------------------------- + + /** Return the path in normal form. + */ + path lexically_normal() const; + + /** Return the path relative to a base. + */ + path lexically_relative(path_view base) const; + + /** Return the path relative to a base, or *this if not possible. + */ + path lexically_proximate(path_view base) const; + + //-------------------------------------------- + // + // Query + // + //-------------------------------------------- + + /** Return true if the path is empty. + */ + bool + empty() const noexcept + { + return s_.empty(); + } + + /** Return true if the path is absolute. + */ + bool is_absolute() const noexcept; + + /** Return true if the path is relative. + */ + bool is_relative() const noexcept; + + /** Return true if the path has a root name. + */ + bool has_root_name() const noexcept; + + /** Return true if the path has a root directory. + */ + bool has_root_directory() const noexcept; + + /** Return true if the path has a root path. + */ + bool has_root_path() const noexcept; + + /** Return true if the path has a relative path. + */ + bool has_relative_path() const noexcept; + + /** Return true if the path has a parent path. + */ + bool has_parent_path() const noexcept; + + /** Return true if the path has a filename. + */ + bool has_filename() const noexcept; + + /** Return true if the path has a stem. + */ + bool has_stem() const noexcept; + + /** Return true if the path has an extension. + */ + bool has_extension() const noexcept; + + //-------------------------------------------- + // + // Component iteration (yields path_view) + // + //-------------------------------------------- + + /** Return an iterator to the first component. + */ + const_iterator begin() const noexcept; + + /** Return an iterator past the last component. + */ + const_iterator end() const noexcept; + + //-------------------------------------------- + // + // Segment iteration (yields string_view) + // + //-------------------------------------------- + + class segment_range; + + /** Return a range of segments as string_view. + + This is lighter weight than component iteration + since segments are simple string_views rather + than path_views. + */ + segment_range segments() const noexcept; + + //-------------------------------------------- + // + // Comparison + // + //-------------------------------------------- + + /** Compare this path to another. + + @return Negative if this < other, zero if equal, positive if this > other + */ + int compare(path_view other) const noexcept; + + /** Return true if two paths are equal. + */ + friend bool + operator==(path const& lhs, path const& rhs) noexcept + { + return lhs.s_ == rhs.s_; + } + + /** Three-way comparison of two paths. + */ + friend std::strong_ordering + operator<=>(path const& lhs, path const& rhs) noexcept + { + return lhs.s_ <=> rhs.s_; + } + + /** Return true if a path equals a path_view. + */ + friend bool + operator==(path const& lhs, path_view rhs) noexcept + { + return lhs.string_view() == rhs.string_view(); + } + + /** Return true if a path_view equals a path. + */ + friend bool + operator==(path_view lhs, path const& rhs) noexcept + { + return lhs.string_view() == rhs.string_view(); + } + + /** Three-way comparison of a path and a path_view. + */ + friend std::strong_ordering + operator<=>(path const& lhs, path_view rhs) noexcept + { + return lhs.string_view() <=> rhs.string_view(); + } + + /** Three-way comparison of a path_view and a path. + */ + friend std::strong_ordering + operator<=>(path_view lhs, path const& rhs) noexcept + { + return lhs.string_view() <=> rhs.string_view(); + } + + //-------------------------------------------- + // + // Stream + // + //-------------------------------------------- + + /** Output the path to a stream. + */ + friend std::ostream& + operator<<(std::ostream& os, path const& p); + + /** Input a path from a stream. + */ + friend std::istream& + operator>>(std::istream& is, path& p); + +private: + friend class iterator; + + // Private unchecked constructor + struct unchecked_t {}; + + path(unchecked_t, std::string&& s) noexcept + : s_(std::move(s)) + { + } +}; + +/** Parse a string as a path without throwing. + + @return The path on success, or an error_code on failure +*/ +system::result +try_parse_path(std::string_view s) noexcept; + +/** Parse a null-terminated string as a path without throwing. + + @return The path on success, or an error_code on failure +*/ +system::result +try_parse_path(char const* s) noexcept; + +/** Parse and move a string as a path without throwing. + + @return The path on success, or an error_code on failure + @note The string is moved only on success +*/ +system::result +try_parse_path(std::string&& s) noexcept; + +//------------------------------------------------------------------------------ + +/** Iterator over path components (each component is a path_view). +*/ +class path::iterator +{ + path const* p_ = nullptr; + std::size_t pos_ = 0; + std::size_t len_ = 0; + +public: + using value_type = path_view; + using reference = path_view; + using pointer = void; + using difference_type = std::ptrdiff_t; + using iterator_category = std::bidirectional_iterator_tag; + + /** Default constructor. + */ + iterator() noexcept = default; + + /** Return the current component. + */ + path_view operator*() const noexcept; + + /** Pre-increment. + */ + iterator& operator++() noexcept; + + /** Post-increment. + */ + iterator operator++(int) noexcept; + + /** Pre-decrement. + */ + iterator& operator--() noexcept; + + /** Post-decrement. + */ + iterator operator--(int) noexcept; + + /** Return true if two iterators are equal. + */ + friend bool + operator==(iterator const& lhs, iterator const& rhs) noexcept + { + return lhs.p_ == rhs.p_ && lhs.pos_ == rhs.pos_; + } +}; + +//------------------------------------------------------------------------------ + +/** Range over path segments as string_view. +*/ +class path::segment_range +{ + path const* p_ = nullptr; + +public: + class iterator; + using const_iterator = iterator; + + /** Return an iterator to the first segment. + */ + iterator begin() const noexcept; + + /** Return an iterator past the last segment. + */ + iterator end() const noexcept; + +private: + friend class path; + + explicit + segment_range(path const* p) noexcept + : p_(p) + { + } +}; + +/** Iterator over path segments (each segment is a string_view). +*/ +class path::segment_range::iterator +{ + path const* p_ = nullptr; + std::size_t pos_ = 0; + std::size_t len_ = 0; + +public: + using value_type = std::string_view; + using reference = std::string_view; + using pointer = void; + using difference_type = std::ptrdiff_t; + using iterator_category = std::bidirectional_iterator_tag; + + /** Default constructor. + */ + iterator() noexcept = default; + + /** Return the current segment. + */ + std::string_view operator*() const noexcept; + + /** Pre-increment. + */ + iterator& operator++() noexcept; + + /** Post-increment. + */ + iterator operator++(int) noexcept; + + /** Pre-decrement. + */ + iterator& operator--() noexcept; + + /** Post-decrement. + */ + iterator operator--(int) noexcept; + + /** Return true if two iterators are equal. + */ + friend bool + operator==(iterator const& lhs, iterator const& rhs) noexcept + { + return lhs.p_ == rhs.p_ && lhs.pos_ == rhs.pos_; + } +}; + +//------------------------------------------------------------------------------ +// +// Non-member operations +// +//------------------------------------------------------------------------------ + +/** Concatenate two paths with a separator. +*/ +path operator/(path const& lhs, path_view rhs); + +/** Concatenate two paths with a separator. +*/ +path operator/(path&& lhs, path_view rhs); + +/** Swap two paths. +*/ +inline void +swap(path& lhs, path& rhs) noexcept +{ + lhs.swap(rhs); +} + +/** Return a hash value for a path. +*/ +std::size_t +hash_value(path const& p) noexcept; + +/** Return a hash value for a path_view. +*/ +std::size_t +hash_value(path_view p) noexcept; + +} // namespace net + +//------------------------------------------------------------------------------ + +template<> +struct std::hash +{ + std::size_t + operator()(net::path const& p) const noexcept + { + return net::hash_value(p); + } +}; + +template<> +struct std::hash +{ + std::size_t + operator()(net::path_view p) const noexcept + { + return net::hash_value(p); + } +}; + +#endif diff --git a/include/boost/capy/run_on.hpp b/include/boost/capy/run_on.hpp new file mode 100644 index 00000000..ee846f21 --- /dev/null +++ b/include/boost/capy/run_on.hpp @@ -0,0 +1,139 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie dot falco at gmail dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + +#ifndef BOOST_CAPY_RUN_ON_HPP +#define BOOST_CAPY_RUN_ON_HPP + +#include +#include +#include + +#include + +namespace boost { +namespace capy { +namespace detail { + +/** Awaitable that binds a task to a specific executor. + + Stores the executor by value. When co_awaited, the co_await + expression's lifetime extension keeps the executor alive for + the duration of the operation. + + @tparam T The task's return type + @tparam E The executor type +*/ +template +struct [[nodiscard]] run_on_awaitable +{ + D d_; + std::coroutine_handle::promise_type> h_; + + run_on_awaitable( + D d, + std::coroutine_handle::promise_type> h) + : d_(std::move(d)) + , h_(h) + { + } + + bool await_ready() const noexcept + { + return false; + } + + auto await_resume() + { + if(h_.promise().ep_) + std::rethrow_exception(h_.promise().ep_); + if constexpr (std::is_void_v) + return; + else + return std::move(*h_.promise().result_); + } + + // Affine awaitable: receives caller's dispatcher for completion dispatch + template + coro await_suspend(coro continuation, Caller const& caller_ex) + { + // 'this' is kept alive by co_await until completion + // d_ is valid for the entire operation + h_.promise().ex_ = d_; + h_.promise().caller_ex_ = caller_ex; + h_.promise().continuation_ = continuation; + h_.promise().needs_dispatch_ = true; + return h_; + } + + // Stoppable awaitable: receives caller's dispatcher and stop_token + template + coro await_suspend(coro continuation, Caller const& caller_ex, std::stop_token token) + { + h_.promise().ex_ = d_; + h_.promise().caller_ex_ = caller_ex; + h_.promise().continuation_ = continuation; + h_.promise().stop_token_ = token; + h_.promise().needs_dispatch_ = true; + return h_; + } + + ~run_on_awaitable() + { + if(h_ && !h_.done()) + h_.destroy(); + } + + // Non-copyable + run_on_awaitable(run_on_awaitable const&) = delete; + run_on_awaitable& operator=(run_on_awaitable const&) = delete; + + // Movable + run_on_awaitable(run_on_awaitable&& other) noexcept + : d_(std::move(other.d_)) + , h_(std::exchange(other.h_, nullptr)) + { + } + + run_on_awaitable& operator=(run_on_awaitable&& other) noexcept + { + if(this != &other) + { + if(h_ && !h_.done()) + h_.destroy(); + d_ = std::move(other.d_); + h_ = std::exchange(other.h_, nullptr); + } + return *this; + } +}; + +} // namespace detail + +/** Binds a task to execute on a specific executor. + + The executor is stored by value in the returned awaitable. + When co_awaited, the inner task receives this executor through + direct promise configuration. + + @param ex The executor on which the task should run (copied by value). + @param t The task to bind to the executor. + + @return An awaitable that runs t on the specified executor. +*/ +template +[[nodiscard]] auto run_on(D d, task t) +{ + return detail::run_on_awaitable{ + std::move(d), t.release()}; +} + +} // namespace capy +} // namespace boost + +#endif diff --git a/include/boost/capy/task.hpp b/include/boost/capy/task.hpp index fc68a789..f96971e9 100644 --- a/include/boost/capy/task.hpp +++ b/include/boost/capy/task.hpp @@ -1,789 +1,269 @@ -// Copyright Vinnie Falco -// SPDX-License-Identifier: BSL-1.0 - -/** - @file task.hpp - - Lazy coroutine task type with executor affinity. - - Provides task, a lazy coroutine that produces a value of type T, - and spawn() for running tasks with completion handlers. Tasks support - executor affinity via on() to control which executor resumes the - coroutine after each co_await. -*/ +// +// Copyright (c) 2025 Vinnie Falco (vinnie dot falco at gmail dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/corosio +// #ifndef BOOST_CAPY_TASK_HPP #define BOOST_CAPY_TASK_HPP -#include - -#ifdef BOOST_CAPY_HAS_CORO - +#include #include -#include -#include - -#include +#include +#include -#include #include -#include +#include #include #include +#include namespace boost { namespace capy { namespace detail { -/** Adapter that wraps executor and satisfies the dispatcher concept. - - This struct provides operator() by delegating to executor::post(), - enabling use with the affine awaitable protocol. It is stored as - a data member in the promise to ensure stable lifetime. -*/ -struct executor_dispatcher +// Helper base for result storage and return_void/return_value +template +struct task_return_base { - executor ex_; + std::optional result_; - executor_dispatcher() = default; - - explicit - executor_dispatcher(executor ex) noexcept - : ex_(std::move(ex)) - { - } - - template - void - operator()(F&& f) const + void return_value(T value) { - if (ex_) - ex_.post(std::forward(f)); - else - std::forward(f)(); + result_ = std::move(value); } +}; - explicit - operator bool() const noexcept +template<> +struct task_return_base +{ + void return_void() { - return static_cast(ex_); } }; -} // detail +} // namespace detail -/** A lazy coroutine task that produces a value of type T. +/** A coroutine task type implementing the affine awaitable protocol. - This class template represents an owning handle to a suspended - coroutine that will eventually produce a value of type @ref T. - The coroutine is lazy: it does not begin execution until it is - awaited or manually resumed via its handle. + This task type represents an asynchronous operation that can be awaited. + It implements the affine awaitable protocol where `await_suspend` receives + the caller's executor, enabling proper completion dispatch across executor + boundaries. - @par Thread Safety - Distinct objects may be accessed concurrently. Shared objects - require external synchronization. + @tparam T The return type of the task. Defaults to void. - @par Example - @code - task compute_value() - { - co_return 42; - } - - task example() - { - int result = co_await compute_value(); - } - @endcode + Key features: + @li Lazy execution - the coroutine does not start until awaited + @li Symmetric transfer - uses coroutine handle returns for efficient + resumption + @li Executor inheritance - inherits caller's executor unless explicitly + bound - @tparam T The type of value produced by the coroutine. + The task uses `[[clang::coro_await_elidable]]` (when available) to enable + heap allocation elision optimization (HALO) for nested coroutine calls. - @see async_op, launch + @see any_dispatcher */ -template -class task - : public affine_task, detail::executor_dispatcher> +template +struct [[nodiscard]] BOOST_CAPY_CORO_AWAIT_ELIDABLE + task { -public: - /** The coroutine promise type. - - This nested type satisfies the coroutine promise requirements - and manages the coroutine's result storage and completion - notification. - */ struct promise_type - : affine_promise + : frame_allocating_base + , detail::task_return_base { - /// Storage for the result value or exception (empty exception_ptr = incomplete) - system::result result_{std::exception_ptr{}}; - - /// Dispatcher for await_transform (always present for consistent types) - detail::executor_dispatcher await_dispatcher_{}; - - /** Get the executor for affinity. - - @return The executor used for resumption affinity. - */ - executor - get_executor() const noexcept - { - return await_dispatcher_.ex_; - } - - /** Set the executor for affinity. - - @param ex The executor to resume on after co_await. - */ - void - set_executor(executor ex) noexcept - { - await_dispatcher_ = detail::executor_dispatcher{std::move(ex)}; - // Also set on base class for final_suspend behavior - if (await_dispatcher_) - this->affine_promise::set_dispatcher(await_dispatcher_); - else - this->dispatcher_.reset(); - } - - /** Set the dispatcher for affinity (inheritance). - - Called by affine_task::await_suspend when a parent task - awaits this task with a dispatcher. Only sets the dispatcher - if not already set, so explicit affinity via on() takes - precedence over inherited affinity. - - @param d The dispatcher to use for resumption. - */ - void - set_dispatcher(detail::executor_dispatcher d) - { - // Only inherit if not explicitly set (explicit affinity takes precedence) - if (!await_dispatcher_) - { - await_dispatcher_ = d; - this->affine_promise::set_dispatcher(std::move(d)); - } - } - - /** Transform awaitables for executor affinity. - - Wraps co_await expressions to ensure the coroutine resumes - on the configured executor. Uses affine_awaiter for - affine-aware awaitables (zero overhead) and make_affine - trampoline for legacy awaitables. - - @param a The awaitable to transform. - @return An affinity-wrapped awaitable. - */ - template - auto - await_transform(Awaitable&& a) - { - // Use if constexpr to get consistent return type per branch - if constexpr (affine_awaitable) - { - // Affine-aware: use affine_awaiter (zero overhead) - return affine_awaiter{std::forward(a), &await_dispatcher_}; - } - else - { - // Legacy: use make_affine trampoline - return make_affine(std::forward(a), await_dispatcher_); - } - } - - /** Returns the task object for this coroutine. - - @return A task owning the coroutine handle. - */ - task - get_return_object() + any_dispatcher ex_; + any_dispatcher caller_ex_; + coro continuation_; + std::exception_ptr ep_; + std::stop_token stop_token_; + bool needs_dispatch_ = false; + + task get_return_object() { return task{std::coroutine_handle::from_promise(*this)}; } - /** Suspend the coroutine at the start. - - The coroutine is lazy and does not run until awaited. - - @return An awaitable that always suspends. - */ - std::suspend_always - initial_suspend() noexcept + std::suspend_always initial_suspend() noexcept { return {}; } - /** Store the return value. - - @param v The value to store as the coroutine result. - */ - void - return_value(T v) - { - result_ = std::move(v); - } - - /** Store an unhandled exception. - - Captures the current exception for later rethrowing. - */ - void - unhandled_exception() - { -#if defined(__GNUC__) && __GNUC__ >= 12 && !defined(__clang__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" -#endif - result_ = std::current_exception(); -#if defined(__GNUC__) && __GNUC__ >= 12 && !defined(__clang__) -#pragma GCC diagnostic pop -#endif - } - - /** Retrieve the result for await_resume. - - @return The value produced by the coroutine. - - @throws Any exception that was thrown inside the coroutine. - */ - T - result() - { - if (result_.has_error()) - std::rethrow_exception(result_.error()); - return std::move(*result_); - } - }; - -private: - std::coroutine_handle h_; - -public: - /** Construct a task from a coroutine handle. - - @param h The coroutine handle to take ownership of. - */ - explicit - task(std::coroutine_handle h) - : h_(h) - { - } - - /** Destructor. - - Destroys the owned coroutine if present. - */ - ~task() - { - if (h_) - h_.destroy(); - } - - /** Move constructor. - - @param o The task to move from. After the move, @p o will - be empty. - */ - task(task&& o) noexcept - : h_(std::exchange(o.h_, {})) - { - } - - /// Move assignment is deleted. - task& - operator=(task&&) = delete; - - /** Access the underlying coroutine handle. - - @return The coroutine handle, without transferring ownership. - */ - [[nodiscard]] - std::coroutine_handle - handle() const noexcept - { - return h_; - } - - /** Release ownership of the coroutine handle. - - After calling this function, the task no longer owns the - coroutine and the caller becomes responsible for destroying it. - - @return The coroutine handle. - */ - [[nodiscard]] - std::coroutine_handle - release() noexcept - { - return std::exchange(h_, {}); - } - - /** Bind this task to an executor for affinity. - - Sets the executor so that when this task's internal - co_await expressions complete, the coroutine resumes - on the specified executor. Child tasks without explicit - affinity will inherit this executor. - - @param e The executor to resume on. - - @return A reference to this task for chaining. - - @par Example - @code - task example(executor ex) + auto final_suspend() noexcept { - // parse_request resumes on ex after internal co_awaits - auto data = co_await parse_request().on(ex); + struct awaiter + { + promise_type* p_; + + bool await_ready() const noexcept + { + return false; + } + + coro await_suspend(coro) const noexcept + { + if(p_->continuation_) + { + // Same dispatcher: true symmetric transfer + if(!p_->needs_dispatch_) + return p_->continuation_; + return p_->caller_ex_(p_->continuation_); + } + return std::noop_coroutine(); + } + + void await_resume() const noexcept + { + } + }; + return awaiter{this}; } - @endcode - */ - task& - on(executor ex) & - { - h_.promise().set_executor(std::move(ex)); - return *this; - } - - /// @copydoc on(executor) - task&& - on(executor ex) && - { - h_.promise().set_executor(std::move(ex)); - return std::move(*this); - } -}; - -//----------------------------------------------------------------------------- - -/** A lazy coroutine task that produces no value. - - This specialization of task is used for coroutines that perform - work but do not return a value. It uses `co_return;` with no - argument to complete. - - @par Thread Safety - Distinct objects may be accessed concurrently. Shared objects - require external synchronization. - - @par Example - @code - task log_message(std::string msg) - { - std::cout << msg << std::endl; - co_return; - } - - task example() - { - co_await log_message("Hello, World!"); - } - @endcode - - @see task, async_op, launch -*/ -template<> -class task - : public affine_task, detail::executor_dispatcher> -{ -public: - /** The coroutine promise type for void tasks. - - This nested type satisfies the coroutine promise requirements - and manages exception storage and completion notification. - */ - struct promise_type - : affine_promise - { - /// Storage for exception (nullptr = success) - std::exception_ptr error_; - /// Dispatcher for await_transform (always present for consistent types) - detail::executor_dispatcher await_dispatcher_{}; + // return_void() or return_value() inherited from task_return_base - /** Get the executor for affinity. - - @return The executor used for resumption affinity. - */ - executor - get_executor() const noexcept + void unhandled_exception() { - return await_dispatcher_.ex_; + ep_ = std::current_exception(); } - /** Set the executor for affinity. - - @param ex The executor to resume on after co_await. - */ - void - set_executor(executor ex) noexcept + template + struct transform_awaiter { - await_dispatcher_ = detail::executor_dispatcher{std::move(ex)}; - // Also set on base class for final_suspend behavior - if (await_dispatcher_) - this->affine_promise::set_dispatcher(await_dispatcher_); - else - this->dispatcher_.reset(); - } + std::decay_t a_; + promise_type* p_; - /** Set the dispatcher for affinity (inheritance). - - Called by affine_task::await_suspend when a parent task - awaits this task with a dispatcher. Only sets the dispatcher - if not already set, so explicit affinity via on() takes - precedence over inherited affinity. - - @param d The dispatcher to use for resumption. - */ - void - set_dispatcher(detail::executor_dispatcher d) - { - // Only inherit if not explicitly set (explicit affinity takes precedence) - if (!await_dispatcher_) + bool await_ready() { - await_dispatcher_ = d; - this->affine_promise::set_dispatcher(std::move(d)); + return a_.await_ready(); } - } - /** Transform awaitables for executor affinity. + auto await_resume() + { + return a_.await_resume(); + } - Wraps co_await expressions to ensure the coroutine resumes - on the configured executor. Uses affine_awaiter for - affine-aware awaitables (zero overhead) and make_affine - trampoline for legacy awaitables. + template + auto await_suspend(std::coroutine_handle h) + { + using A = std::decay_t; + if constexpr (stoppable_awaitable) + return a_.await_suspend(h, p_->ex_, p_->stop_token_); + else + return a_.await_suspend(h, p_->ex_); + } + }; - @param a The awaitable to transform. - @return An affinity-wrapped awaitable. - */ - template - auto - await_transform(Awaitable&& a) + template + auto await_transform(Awaitable&& a) { - // Use if constexpr to get consistent return type per branch - if constexpr (affine_awaitable) + using A = std::decay_t; + if constexpr (affine_awaitable) { - // Affine-aware: use affine_awaiter (zero overhead) - return affine_awaiter{std::forward(a), &await_dispatcher_}; + // Zero-overhead path for affine awaitables + return transform_awaiter{ + std::forward(a), this}; } else { - // Legacy: use make_affine trampoline - return make_affine(std::forward(a), await_dispatcher_); + // Trampoline fallback for legacy awaitables + return make_affine(std::forward(a), ex_); } } - - /** Returns the task object for this coroutine. - - @return A task owning the coroutine handle. - */ - task - get_return_object() - { - return task{std::coroutine_handle::from_promise(*this)}; - } - - /** Suspend the coroutine at the start. - - The coroutine is lazy and does not run until awaited. - - @return An awaitable that always suspends. - */ - std::suspend_always - initial_suspend() noexcept - { - return {}; - } - - /** Signal coroutine completion. - - Called when the coroutine executes `co_return;`. - */ - void - return_void() noexcept - { - error_ = nullptr; - } - - /** Store an unhandled exception. - - Captures the current exception for later rethrowing. - */ - void - unhandled_exception() noexcept - { - error_ = std::current_exception(); - } - - /** Retrieve the result for await_resume. - - @throws Any exception that was thrown inside the coroutine. - */ - void - result() - { - if (error_) - std::rethrow_exception(error_); - } }; -private: std::coroutine_handle h_; -public: - /** Construct a task from a coroutine handle. - - @param h The coroutine handle to take ownership of. - */ - explicit - task(std::coroutine_handle h) - : h_(h) - { - } - - /** Destructor. - - Destroys the owned coroutine if present. - */ ~task() { - if (h_) + if(h_) h_.destroy(); } - /** Move constructor. - - @param o The task to move from. After the move, @p o will - be empty. - */ - task(task&& o) noexcept - : h_(std::exchange(o.h_, {})) + bool await_ready() const noexcept { + return false; } - /// Move assignment is deleted. - task& - operator=(task&&) = delete; - - /** Access the underlying coroutine handle. + auto await_resume() + { + if(h_.promise().ep_) + std::rethrow_exception(h_.promise().ep_); + if constexpr (! std::is_void_v) + return std::move(*h_.promise().result_); + else + return; + } - @return The coroutine handle, without transferring ownership. - */ - [[nodiscard]] - std::coroutine_handle - handle() const noexcept + // Affine awaitable: receive caller's dispatcher for completion dispatch + template + coro await_suspend(coro continuation, D const& caller_ex) { + h_.promise().caller_ex_ = caller_ex; + h_.promise().continuation_ = continuation; + h_.promise().ex_ = caller_ex; + h_.promise().needs_dispatch_ = false; return h_; } - /** Release ownership of the coroutine handle. - - After calling this function, the task no longer owns the - coroutine and the caller becomes responsible for destroying it. - - @return The coroutine handle. - */ - [[nodiscard]] - std::coroutine_handle - release() noexcept + // Stoppable awaitable: receive caller's dispatcher and stop_token + template + coro await_suspend(coro continuation, D const& caller_ex, std::stop_token token) { - return std::exchange(h_, {}); + h_.promise().caller_ex_ = caller_ex; + h_.promise().continuation_ = continuation; + h_.promise().ex_ = caller_ex; + h_.promise().stop_token_ = token; + h_.promise().needs_dispatch_ = false; + return h_; } - /** Bind this task to an executor for affinity. - - Sets the executor so that when this task's internal - co_await expressions complete, the coroutine resumes - on the specified executor. Child tasks without explicit - affinity will inherit this executor. - - @param e The executor to resume on. + /** Release ownership of the coroutine handle. - @return A reference to this task for chaining. + After calling this, the task no longer owns the handle and will + not destroy it. The caller is responsible for the handle's lifetime. - @par Example - @code - task example(executor ex) - { - // do_work resumes on ex after internal co_awaits - co_await do_work().on(ex); - } - @endcode + @return The coroutine handle, or nullptr if already released. */ - task& - on(executor ex) & + auto release() noexcept -> + std::coroutine_handle { - h_.promise().set_executor(std::move(ex)); - return *this; + return std::exchange(h_, nullptr); } - /// @copydoc on(executor) - task&& - on(executor ex) && + // Non-copyable + task(task const&) = delete; + task& operator=(task const&) = delete; + + // Movable + task(task&& other) noexcept + : h_(std::exchange(other.h_, nullptr)) { - h_.promise().set_executor(std::move(ex)); - return std::move(*this); } -}; -//----------------------------------------------------------------------------- - -namespace detail { - -/** Fire-and-forget coroutine for spawn(). - - This coroutine runs the spawned task and delivers the result - to the completion handler. It never suspends at final_suspend, - so the frame is destroyed immediately upon completion. -*/ -template -struct spawner -{ - struct promise_type + task& operator=(task&& other) noexcept { - spawner - get_return_object() noexcept - { - return {}; - } - - std::suspend_never - initial_suspend() noexcept + if(this != &other) { - return {}; + if(h_) + h_.destroy(); + h_ = std::exchange(other.h_, nullptr); } - - std::suspend_never - final_suspend() noexcept - { - return {}; - } - - void - return_void() noexcept - { - } - - void - unhandled_exception() - { - // Handler is called with exception in spawn's try/catch - std::terminate(); - } - }; -}; - -} // detail - -/** Spawn a task on an executor with a completion handler. - - This function starts a task running on the specified executor. - When the task completes (with a value or exception), the handler - is invoked with the result. - - The handler receives `system::result` which - holds either the task's return value or any exception that was - thrown during execution. - - The coroutine frame is allocated using the executor's allocator. - - @param ex The executor to run the task on. - @param t The task to spawn. Ownership is transferred. - @param handler The completion handler to invoke with the result. - - @par Handler Signature - @code - void handler(system::result result); - @endcode - - @par Example - @code - task compute() - { - co_return 42; + return *this; } - void start_work(executor ex) +private: + explicit task(std::coroutine_handle h) + : h_(h) { - spawn(ex, compute(), [](auto result) { - if (result.has_value()) - std::cout << "Result: " << *result << std::endl; - else - std::cerr << "Error occurred\n"; - }); } - @endcode - - @see task, executor, system::result -*/ -template -void -spawn(executor ex, task t, Handler&& handler) -{ - using result_type = system::result; - t.on(ex); - auto do_spawn = []( - task t, - std::decay_t h) -> detail::spawner - { -#if defined(__GNUC__) && __GNUC__ >= 12 && !defined(__clang__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" -#endif - try - { - h(result_type(co_await t)); - } - catch (...) - { - h(result_type(std::current_exception())); - } -#if defined(__GNUC__) && __GNUC__ >= 12 && !defined(__clang__) -#pragma GCC diagnostic pop -#endif - }; - do_spawn(std::move(t), std::forward(handler)); -} - -/** Spawn a void task on an executor with a completion handler. - - @copydetails spawn(executor,task,Handler&&) -*/ -template -void -spawn(executor ex, task t, Handler&& handler) -{ - using result_type = system::result; - t.on(ex); - auto do_spawn = []( - task t, - std::decay_t h) -> detail::spawner - { -#if defined(__GNUC__) && __GNUC__ >= 12 && !defined(__clang__) -#pragma GCC diagnostic push -#pragma GCC diagnostic ignored "-Wmaybe-uninitialized" -#endif - try - { - co_await t; - h(result_type()); - } - catch (...) - { - h(result_type(std::current_exception())); - } -#if defined(__GNUC__) && __GNUC__ >= 12 && !defined(__clang__) -#pragma GCC diagnostic pop -#endif - }; - do_spawn(std::move(t), std::forward(handler)); -} - -} // capy -} // boost +}; -#endif +} // namespace capy +} // namespace boost #endif diff --git a/include/boost/capy/thread_local_ptr.hpp b/include/boost/capy/thread_local_ptr.hpp new file mode 100644 index 00000000..20796559 --- /dev/null +++ b/include/boost/capy/thread_local_ptr.hpp @@ -0,0 +1,200 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie.falco@gmail.com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + +#ifndef BOOST_CAPY_THREAD_LOCAL_PTR_HPP +#define BOOST_CAPY_THREAD_LOCAL_PTR_HPP + +#include + +#include + +namespace boost { +namespace capy { + +/** A thread-local pointer. + + This class provides thread-local storage for a pointer to T. + Each thread has its own independent pointer value, initially + nullptr. The user is responsible for managing the lifetime + of the pointed-to objects. + + The storage is static per type T. All instances of + `thread_local_ptr` share the same underlying slot. + + The implementation uses the most efficient available mechanism: + 1. Compiler keyword (__declspec(thread) or __thread) - enforces POD + 2. C++11 thread_local (fallback) + + @tparam T The pointed-to type. + + @par Declaration + + Typically declared at namespace or class scope. The object + is stateless, so local variables work but are redundant. + + @code + // Recommended: namespace scope + namespace { + thread_local_ptr current_session; + } + + // Also works: static class member + class server { + static thread_local_ptr current_request_; + }; + + // Works but unusual: local variable (still accesses static storage) + void foo() { + thread_local_ptr ctx; // same slot on every call + ctx = new context(); + } + @endcode + + @note The user is responsible for deleting pointed-to objects + before threads exit to avoid memory leaks. +*/ +template +class thread_local_ptr; + +//------------------------------------------------------------------------------ + +#if defined(BOOST_CAPY_TLS_KEYWORD) + +// Use compiler-specific keyword (__declspec(thread) or __thread) +// Most efficient: static linkage, no dynamic init, enforces POD + +template +class thread_local_ptr +{ + static BOOST_CAPY_TLS_KEYWORD T* ptr_; + +public: + thread_local_ptr() = default; + ~thread_local_ptr() = default; + + thread_local_ptr(thread_local_ptr const&) = delete; + thread_local_ptr& operator=(thread_local_ptr const&) = delete; + + /** Return the pointer for this thread. + + @return The stored pointer, or nullptr if not set. + */ + T* + get() const noexcept + { + return ptr_; + } + + /** Set the pointer for this thread. + + @param p The pointer to store. The user manages its lifetime. + */ + void + set(T* p) noexcept + { + ptr_ = p; + } + + /** Dereference the stored pointer. + + @pre get() != nullptr + */ + T& + operator*() const noexcept + { + return *ptr_; + } + + /** Member access through the stored pointer. + + @pre get() != nullptr + */ + T* + operator->() const noexcept + requires std::is_class_v + { + return ptr_; + } + + /** Assign a pointer value. + + @param p The pointer to store. + @return The stored pointer. + */ + T* + operator=(T* p) noexcept + { + ptr_ = p; + return p; + } +}; + +template +BOOST_CAPY_TLS_KEYWORD T* thread_local_ptr::ptr_ = nullptr; + +//------------------------------------------------------------------------------ + +#else + +// Use C++11 thread_local keyword (fallback) + +template +class thread_local_ptr +{ + static thread_local T* ptr_; + +public: + thread_local_ptr() = default; + ~thread_local_ptr() = default; + + thread_local_ptr(thread_local_ptr const&) = delete; + thread_local_ptr& operator=(thread_local_ptr const&) = delete; + + T* + get() const noexcept + { + return ptr_; + } + + void + set(T* p) noexcept + { + ptr_ = p; + } + + T& + operator*() const noexcept + { + return *ptr_; + } + + T* + operator->() const noexcept + requires std::is_class_v + { + return ptr_; + } + + T* + operator=(T* p) noexcept + { + ptr_ = p; + return p; + } +}; + +template +thread_local T* thread_local_ptr::ptr_ = nullptr; + +#endif + +} // namespace capy +} // namespace boost + +#endif diff --git a/include/boost/capy/thread_pool.hpp b/include/boost/capy/thread_pool.hpp index 91b27532..1e322457 100644 --- a/include/boost/capy/thread_pool.hpp +++ b/include/boost/capy/thread_pool.hpp @@ -11,7 +11,6 @@ #define BOOST_CAPY_THREAD_POOL_HPP #include -#include #include namespace boost { @@ -55,17 +54,6 @@ class BOOST_CAPY_DECL thread_pool thread_pool(thread_pool const&) = delete; thread_pool& operator=(thread_pool const&) = delete; - - /** Return an executor that references this pool. - - The returned executor is a lightweight handle that - can be copied freely. The caller must ensure this - thread_pool outlives all executors that reference it. - - @return An executor bound to this thread pool. - */ - executor - get_executor() noexcept; }; } // capy diff --git a/src/execution_context.cpp b/src/execution_context.cpp new file mode 100644 index 00000000..c95e8706 --- /dev/null +++ b/src/execution_context.cpp @@ -0,0 +1,144 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie dot falco at gmail dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + +#include +#include + +namespace boost { +namespace capy { + +execution_context:: +execution_context() = default; + +execution_context:: +~execution_context() +{ + shutdown(); + destroy(); +} + +void +execution_context:: +shutdown() noexcept +{ + if(shutdown_) + return; + shutdown_ = true; + + service* p = head_; + while(p) + { + p->shutdown(); + p = p->next_; + } +} + +void +execution_context:: +destroy() noexcept +{ + service* p = head_; + head_ = nullptr; + while(p) + { + service* next = p->next_; + delete p; + p = next; + } +} + +execution_context::service* +execution_context:: +find_impl(std::type_index ti) const noexcept +{ + auto p = head_; + while(p) + { + if(p->t0_ == ti || p->t1_ == ti) + break; + p = p->next_; + } + return p; +} + +execution_context::service& +execution_context:: +use_service_impl(factory& f) +{ + std::unique_lock lock(mutex_); + + if(auto* p = find_impl(f.t0)) + return *p; + + lock.unlock(); + + // Create the service outside lock, enabling nested calls + service* sp = f.create(*this); + sp->t0_ = f.t0; + sp->t1_ = f.t1; + + lock.lock(); + + if(auto* p = find_impl(f.t0)) + { + delete sp; + return *p; + } + + sp->next_ = head_; + head_ = sp; + + return *sp; +} + +execution_context::service& +execution_context:: +make_service_impl(factory& f) +{ + { + std::lock_guard lock(mutex_); + if(find_impl(f.t0)) + detail::throw_invalid_argument(); + if(f.t0 != f.t1 && find_impl(f.t1)) + detail::throw_invalid_argument(); + } + + // Unlocked to allow nested service creation from constructor + service* p = f.create(*this); + + std::lock_guard lock(mutex_); + if(find_impl(f.t0)) + { + delete p; + detail::throw_invalid_argument(); + } + + p->t0_ = f.t0; + if(f.t0 != f.t1) + { + if(find_impl(f.t1)) + { + delete p; + detail::throw_invalid_argument(); + } + p->t1_ = f.t1; + } + else + { + p->t1_ = f.t0; + } + + p->next_ = head_; + head_ = p; + + return *p; +} + +} // namespace capy +} // namespace boost diff --git a/src/executor.cpp b/src/executor.cpp deleted file mode 100644 index b98068b6..00000000 --- a/src/executor.cpp +++ /dev/null @@ -1,9 +0,0 @@ -// -// Copyright (c) 2025 Vinnie Falco (vinnie dot falco at gmail dot com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE or copy at http://www.boost.org/LICENSE_1_0.txt) -// - -#include - diff --git a/src/thread_pool.cpp b/src/thread_pool.cpp index be8de747..1cb0edeb 100644 --- a/src/thread_pool.cpp +++ b/src/thread_pool.cpp @@ -7,8 +7,10 @@ // Official repository: https://github.com/boostorg/capy // +#include "src/work_allocator.hpp" + #include -#include "work_allocator.hpp" +#include #include #include #include @@ -64,8 +66,8 @@ class thread_pool::impl { header* h = head_; head_ = head_->next; - auto* w = static_cast(from_header(h)); - w->~work(); + auto* w = static_cast(from_header(h)); + w->destroy(); arena_.deallocate(h, h->size, h->align); } } @@ -107,17 +109,17 @@ class thread_pool::impl } void - submit(executor::work* w) + submit(execution_context::handler* h) { - header* h = to_header(w); + header* hdr = to_header(h); { std::lock_guard lock(mutex_); - h->next = nullptr; + hdr->next = nullptr; if(tail_) - tail_->next = h; + tail_->next = hdr; else - head_ = h; - tail_ = h; + head_ = hdr; + tail_ = hdr; } cv_.notify_one(); } @@ -144,9 +146,8 @@ class thread_pool::impl tail_ = nullptr; } - auto* w = static_cast(from_header(h)); - w->invoke(); - w->~work(); + auto* w = static_cast(from_header(h)); + (*w)(); { std::lock_guard lock(mutex_); @@ -170,12 +171,5 @@ thread_pool(std::size_t num_threads) { } -executor -thread_pool:: -get_executor() noexcept -{ - return executor(*impl_); -} - } // capy } // boost diff --git a/test/unit/affine.cpp b/test/unit/affine.cpp index f5502df1..23ad5d9f 100644 --- a/test/unit/affine.cpp +++ b/test/unit/affine.cpp @@ -9,28 +9,3 @@ // Test that header file is self-contained. #include - -#ifdef BOOST_CAPY_HAS_CORO - -#include "test_suite.hpp" - -namespace boost { -namespace capy { - -struct affine_test -{ - void - run() - { - } -}; - -TEST_SUITE( - affine_test, - "boost.capy.affine"); - -} // capy -} // boost - -#endif - diff --git a/test/unit/async_op.cpp b/test/unit/async_op.cpp index eb477149..d9b4dd85 100644 --- a/test/unit/async_op.cpp +++ b/test/unit/async_op.cpp @@ -9,463 +9,3 @@ // Test that header file is self-contained. #include - -#ifdef BOOST_CAPY_HAS_CORO - -#include -#include - -#include "test_suite.hpp" - -#include -#include - -namespace boost { -namespace capy { - -template -T run_task(task& t) -{ - while (!t.handle().done()) - t.handle().resume(); - return t.await_resume(); -} - -template<> -void run_task(task& t) -{ - while (!t.handle().done()) - t.handle().resume(); - t.await_resume(); -} - -struct async_test_exception : std::runtime_error -{ - explicit async_test_exception(const char* msg) - : std::runtime_error(msg) - { - } -}; - -struct result_with_error -{ - int value; - system::error_code ec; - - result_with_error() = default; - - result_with_error(int v, system::error_code e = {}) - : value(v) - , ec(e) - { - } -}; - -struct async_op_test -{ - static async_op - async_int_value() - { - return make_async_op( - [](auto cb) { - cb(42); - }); - } - - static async_op - async_string_value() - { - return make_async_op( - [](auto cb) { - cb("hello async"); - }); - } - - static task - task_awaiting_int() - { - int v = co_await async_int_value(); - co_return v; - } - - static task - task_awaiting_string() - { - std::string s = co_await async_string_value(); - co_return s; - } - - void - testBasicValue() - { - // async_op returning int - { - auto t = task_awaiting_int(); - BOOST_TEST_EQ(run_task(t), 42); - } - - // async_op returning string - { - auto t = task_awaiting_string(); - BOOST_TEST_EQ(run_task(t), "hello async"); - } - } - - static async_op - async_returns_success() - { - return make_async_op( - [](auto cb) { - cb(100, system::error_code{}); - }); - } - - static async_op - async_returns_error() - { - return make_async_op( - [](auto cb) { - cb(0, system::errc::make_error_code( - system::errc::invalid_argument)); - }); - } - - static task - task_awaits_success() - { - auto r = co_await async_returns_success(); - co_return r; - } - - static task - task_awaits_error() - { - auto r = co_await async_returns_error(); - co_return r; - } - - static task - task_checks_error_and_returns() - { - auto r = co_await async_returns_error(); - if (r.ec) - co_return -1; - co_return r.value; - } - - void - testErrorHandling() - { - // async_op with success - { - auto t = task_awaits_success(); - auto r = run_task(t); - BOOST_TEST_EQ(r.value, 100); - BOOST_TEST(!r.ec); - } - - // async_op with error - { - auto t = task_awaits_error(); - auto r = run_task(t); - BOOST_TEST_EQ(r.value, 0); - BOOST_TEST(r.ec); - BOOST_TEST_EQ(r.ec, system::errc::invalid_argument); - } - - // task checks error and returns appropriate value - { - auto t = task_checks_error_and_returns(); - BOOST_TEST_EQ(run_task(t), -1); - } - } - - static async_op - async_value_1() - { - return make_async_op( - [](auto cb) { cb(10); }); - } - - static async_op - async_value_2() - { - return make_async_op( - [](auto cb) { cb(20); }); - } - - static async_op - async_value_3() - { - return make_async_op( - [](auto cb) { cb(30); }); - } - - static task - task_awaits_multiple() - { - int v1 = co_await async_value_1(); - int v2 = co_await async_value_2(); - int v3 = co_await async_value_3(); - co_return v1 + v2 + v3; - } - - void - testMultipleAwaits() - { - auto t = task_awaits_multiple(); - BOOST_TEST_EQ(run_task(t), 60); - } - - void - testAwaitReady() - { - auto ar = async_int_value(); - BOOST_TEST(!ar.await_ready()); - } - - void - testMoveOperations() - { - // async_op is move constructible - { - auto ar1 = async_int_value(); - auto ar2 = std::move(ar1); - (void)ar2; - } - - // async_op is move assignable - { - auto ar1 = async_int_value(); - auto ar2 = async_string_value(); - (void)ar1; - (void)ar2; - } - } - - static async_op - async_with_captured_state(int multiplier) - { - return make_async_op( - [multiplier](auto cb) { - cb(10 * multiplier); - }); - } - - static task - task_awaits_with_state() - { - int v1 = co_await async_with_captured_state(2); - int v2 = co_await async_with_captured_state(3); - co_return v1 + v2; - } - - void - testCapturedState() - { - auto t = task_awaits_with_state(); - BOOST_TEST_EQ(run_task(t), 50); - } - - struct complex_result - { - int id; - std::string name; - double value; - - complex_result() = default; - complex_result(int i, std::string n, double v) - : id(i) - , name(std::move(n)) - , value(v) - { - } - }; - - static async_op - async_complex() - { - return make_async_op( - [](auto cb) { - cb(1, "test", 3.14); - }); - } - - static task - task_awaits_complex() - { - auto r = co_await async_complex(); - co_return r; - } - - void - testComplexResult() - { - auto t = task_awaits_complex(); - auto r = run_task(t); - BOOST_TEST_EQ(r.id, 1); - BOOST_TEST_EQ(r.name, "test"); - BOOST_TEST_EQ(r.value, 3.14); - } - - static task - inner_task_with_async() - { - int v = co_await async_int_value(); - co_return v * 2; - } - - static task - outer_task_with_both() - { - int v1 = co_await async_value_1(); - int v2 = co_await inner_task_with_async(); - co_return v1 + v2; - } - - void - testTaskChaining() - { - auto t = outer_task_with_both(); - BOOST_TEST_EQ(run_task(t), 94); - } - - // async_op tests - - static async_op - async_void_basic() - { - return make_async_op( - [](auto on_done) { - on_done(); - }); - } - - static task - task_awaits_void_async() - { - co_await async_void_basic(); - co_return; - } - - void - testVoidAsyncBasic() - { - auto t = task_awaits_void_async(); - while (!t.handle().done()) - t.handle().resume(); - t.await_resume(); - } - - static async_op - async_void_step() - { - return make_async_op( - [](auto on_done) { - on_done(); - }); - } - - static task - task_awaits_void_then_value() - { - co_await async_void_step(); - int v = co_await async_int_value(); - co_await async_void_step(); - co_return v; - } - - void - testVoidAsyncWithValue() - { - auto t = task_awaits_void_then_value(); - BOOST_TEST_EQ(run_task(t), 42); - } - - static task - task_awaits_multiple_void() - { - co_await async_void_step(); - co_await async_void_step(); - co_await async_void_step(); - co_return; - } - - void - testVoidAsyncChain() - { - auto t = task_awaits_multiple_void(); - while (!t.handle().done()) - t.handle().resume(); - t.await_resume(); - } - - void - testVoidAsyncAwaitReady() - { - auto ar = async_void_basic(); - BOOST_TEST(!ar.await_ready()); - } - - void - testVoidAsyncMove() - { - auto ar1 = async_void_basic(); - auto ar2 = std::move(ar1); - (void)ar2; - } - - static async_op - async_void_deferred() - { - return make_async_op( - [](auto on_done) { - // Simulate deferred completion - on_done(); - }); - } - - static task - task_with_deferred_void() - { - co_await async_void_deferred(); - co_return 999; - } - - void - testVoidAsyncDeferred() - { - auto t = task_with_deferred_void(); - BOOST_TEST_EQ(run_task(t), 999); - } - - void - run() - { - testBasicValue(); - testErrorHandling(); - testMultipleAwaits(); - testAwaitReady(); - testMoveOperations(); - testCapturedState(); - testComplexResult(); - testTaskChaining(); - - // async_op tests - testVoidAsyncBasic(); - testVoidAsyncWithValue(); - testVoidAsyncChain(); - testVoidAsyncAwaitReady(); - testVoidAsyncMove(); - testVoidAsyncDeferred(); - } -}; - -TEST_SUITE( - async_op_test, - "boost.capy.async_op"); - -} // capy -} // boost - -#endif diff --git a/test/unit/execution_context.cpp b/test/unit/execution_context.cpp new file mode 100644 index 00000000..63b0ff12 --- /dev/null +++ b/test/unit/execution_context.cpp @@ -0,0 +1,387 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie dot falco at gmail dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + +// Test that header file is self-contained. +#include + +#include "test_suite.hpp" + +#include +#include +#include + +namespace boost { +namespace capy { + +namespace { + +// Test execution context that exposes protected constructor +class test_context : public execution_context +{ +public: + test_context() = default; +}; + +// Simple service for basic tests +struct simple_service : execution_context::service +{ + int value = 0; + + explicit simple_service(execution_context&) + { + } + + simple_service(execution_context&, int v) + : value(v) + { + } + + void shutdown() override {} +}; + +// Service that tracks shutdown +struct tracking_service : execution_context::service +{ + bool& shutdown_called; + + explicit tracking_service(execution_context&, bool& flag) + : shutdown_called(flag) + { + } + + void shutdown() override + { + shutdown_called = true; + } +}; + +// Base service type for key_type tests +struct base_service : execution_context::service +{ + virtual int get_value() const = 0; +}; + +// Derived service with key_type +struct derived_service : base_service +{ + using key_type = base_service; + + int value; + + explicit derived_service(execution_context&) + : value(42) + { + } + + derived_service(execution_context&, int v) + : value(v) + { + } + + int get_value() const override { return value; } + void shutdown() override {} +}; + +// Another derived service with same key_type +struct other_derived_service : base_service +{ + using key_type = base_service; + + explicit other_derived_service(execution_context&) + { + } + + int get_value() const override { return 99; } + void shutdown() override {} +}; + +// Service with multiple constructor args +struct multi_arg_service : execution_context::service +{ + int a; + std::string b; + double c; + + multi_arg_service(execution_context&, int a_, std::string b_, double c_) + : a(a_), b(std::move(b_)), c(c_) + { + } + + void shutdown() override {} +}; + +// Service that creates another service in constructor +struct nested_service : execution_context::service +{ + explicit nested_service(execution_context& ctx) + { + // This should work - nested service creation + ctx.use_service(); + } + + void shutdown() override {} +}; + +// Concrete handler for testing data() behavior +struct test_handler : execution_context::handler +{ + void operator()() override {} + void destroy() override {} + + // Expose data_ for testing + void set_data(void* p) { data_ = p; } +}; + +} // namespace + +struct execution_context_test +{ + void + testConstruct() + { + // Basic construction and destruction + { + test_context ctx; + } + } + + void + testHasService() + { + test_context ctx; + + // Initially no services + BOOST_TEST(!ctx.has_service()); + + // After adding, has_service returns true + ctx.use_service(); + BOOST_TEST(ctx.has_service()); + } + + void + testFindService() + { + test_context ctx; + + // Initially returns nullptr + BOOST_TEST_EQ(ctx.find_service(), nullptr); + + // After adding, returns pointer + ctx.use_service(); + BOOST_TEST_NE(ctx.find_service(), nullptr); + } + + void + testUseService() + { + test_context ctx; + + // Creates service if not present + auto& svc1 = ctx.use_service(); + BOOST_TEST(ctx.has_service()); + + // Returns same instance on subsequent calls + auto& svc2 = ctx.use_service(); + BOOST_TEST_EQ(&svc1, &svc2); + } + + void + testMakeService() + { + test_context ctx; + + // Creates service with value + auto& svc = ctx.make_service(42); + BOOST_TEST_EQ(svc.value, 42); + + // Throws if service already exists + BOOST_TEST_THROWS( + ctx.make_service(100), + std::invalid_argument); + + // Original value unchanged + BOOST_TEST_EQ(ctx.find_service()->value, 42); + } + + void + testMakeServiceMultipleArgs() + { + test_context ctx; + + auto& svc = ctx.make_service( + 123, std::string("hello"), 3.14); + + BOOST_TEST_EQ(svc.a, 123); + BOOST_TEST_EQ(svc.b, "hello"); + BOOST_TEST_EQ(svc.c, 3.14); + } + + void + testKeyType() + { + test_context ctx; + + // Create derived service + auto& svc = ctx.make_service(100); + BOOST_TEST_EQ(svc.value, 100); + + // Can find via derived type + BOOST_TEST(ctx.has_service()); + BOOST_TEST_NE(ctx.find_service(), nullptr); + + // Can find via base type (key_type) + BOOST_TEST(ctx.has_service()); + auto* base = ctx.find_service(); + BOOST_TEST_NE(base, nullptr); + BOOST_TEST_EQ(base->get_value(), 100); + + // Cannot add another service with same key_type + BOOST_TEST_THROWS( + ctx.make_service(), + std::invalid_argument); + } + + void + testKeyTypeUseService() + { + test_context ctx; + + // use_service creates via derived type + auto& svc = ctx.use_service(); + BOOST_TEST_EQ(svc.get_value(), 42); + + // Can lookup via base type + BOOST_TEST(ctx.has_service()); + } + + void + testShutdown() + { + bool shutdown_called = false; + + { + test_context ctx; + ctx.make_service(shutdown_called); + BOOST_TEST(!shutdown_called); + } + + // Shutdown called when context destroyed + BOOST_TEST(shutdown_called); + } + + void + testMultipleServices() + { + test_context ctx; + + ctx.make_service(1); + ctx.make_service(2, "test", 2.0); + + BOOST_TEST(ctx.has_service()); + BOOST_TEST(ctx.has_service()); + + BOOST_TEST_EQ(ctx.find_service()->value, 1); + BOOST_TEST_EQ(ctx.find_service()->a, 2); + } + + void + testNestedServiceCreation() + { + test_context ctx; + + // nested_service creates simple_service in its constructor + ctx.use_service(); + + // Both services should exist + BOOST_TEST(ctx.has_service()); + BOOST_TEST(ctx.has_service()); + } + + void + testConcurrentAccess() + { + test_context ctx; + std::atomic success_count{0}; + constexpr int num_threads = 8; + + std::vector threads; + threads.reserve(num_threads); + + for(int i = 0; i < num_threads; ++i) + { + threads.emplace_back([&ctx, &success_count]{ + // All threads try to use_service simultaneously + auto& svc = ctx.use_service(); + (void)svc; + ++success_count; + }); + } + + for(auto& t : threads) + t.join(); + + // All threads should succeed + BOOST_TEST_EQ(success_count.load(), num_threads); + + // Only one service instance should exist + BOOST_TEST(ctx.has_service()); + } + + void + testHandlerDataInitiallyNull() + { + test_handler h; + BOOST_TEST_EQ(h.data(), nullptr); + } + + void + testHandlerDataReflectsChanges() + { + test_handler h; + int dummy = 42; + + h.set_data(&dummy); + BOOST_TEST_EQ(h.data(), &dummy); + + // Change to different value + double other = 3.14; + h.set_data(&other); + BOOST_TEST_EQ(h.data(), &other); + + // Change back to nullptr + h.set_data(nullptr); + BOOST_TEST_EQ(h.data(), nullptr); + } + + void + run() + { + testConstruct(); + testHasService(); + testFindService(); + testUseService(); + testMakeService(); + testMakeServiceMultipleArgs(); + testKeyType(); + testKeyTypeUseService(); + testShutdown(); + testMultipleServices(); + testNestedServiceCreation(); + testConcurrentAccess(); + testHandlerDataInitiallyNull(); + testHandlerDataReflectsChanges(); + } +}; + +TEST_SUITE( + execution_context_test, + "boost.capy.execution_context"); + +} // capy +} // boost diff --git a/test/unit/executor.cpp b/test/unit/executor.cpp index 6dd6f18a..ce086833 100644 --- a/test/unit/executor.cpp +++ b/test/unit/executor.cpp @@ -9,734 +9,273 @@ // Test that header file is self-contained. #include +#include -#include +#include #include "test_suite.hpp" -#include -#include -#include -#include -#include - namespace boost { namespace capy { -//----------------------------------------------------------------------------- - -/** Simple synchronous executor for testing. - - Executes work immediately in the calling thread. - Uses malloc/free for allocation with a header to track size. -*/ -struct sync_executor +// Test handler implementation +struct test_handler : execution_context::handler { - friend struct executor::access; - - std::atomic alloc_count{0}; - std::atomic submit_count{0}; - -private: - struct header - { - std::size_t size; - }; + int& invoked; + int& destroyed; - void* - allocate(std::size_t size, std::size_t /*align*/) + test_handler(int& i, int& d) + : invoked(i) + , destroyed(d) { - ++alloc_count; - std::size_t total = sizeof(header) + size; - void* p = std::malloc(total); - auto* h = new(p) header{total}; - return h + 1; } - void - deallocate(void* p, std::size_t /*size*/, std::size_t /*align*/) + void operator()() override { - auto* h = static_cast(p) - 1; - std::free(h); + ++invoked; } - void - submit(executor::work* w) + void destroy() override { - ++submit_count; - w->invoke(); - w->~work(); - deallocate(w, 0, 0); + ++destroyed; } }; -/** Entry for queued work. -*/ -struct queued_entry +// Minimal execution context for testing +struct test_context { - executor::work* w; - void* storage; + int id = 0; }; -/** Value-type executor for testing owning mode. - - This executor can be moved and copied, suitable for - use with executor::wrap(). -*/ -struct value_executor +// Test executor that satisfies the concept +struct test_executor { - friend struct executor::access; - - // Shared state to track calls across copies - struct state - { - std::atomic alloc_count{0}; - std::atomic submit_count{0}; - }; - - std::shared_ptr state_; + test_context* ctx_ = nullptr; - value_executor() - : state_(std::make_shared()) - { - } - - // Copyable and movable - value_executor(value_executor const&) = default; - value_executor(value_executor&&) = default; - value_executor& operator=(value_executor const&) = default; - value_executor& operator=(value_executor&&) = default; - - int alloc_count() const { return state_->alloc_count.load(); } - int submit_count() const { return state_->submit_count.load(); } - -private: - struct header - { - std::size_t size; - }; + test_executor() = default; - void* - allocate(std::size_t size, std::size_t /*align*/) + explicit + test_executor(test_context& ctx) noexcept + : ctx_(&ctx) { - ++state_->alloc_count; - std::size_t total = sizeof(header) + size; - void* p = std::malloc(total); - header* h = new(p) header{total}; - return h + 1; } - void - deallocate(void* p, std::size_t /*size*/, std::size_t /*align*/) + // Equality comparison (required by Networking TS) + bool + operator==(test_executor const& other) const noexcept { - header* h = static_cast(p) - 1; - std::free(h); + return ctx_ == other.ctx_; } - void - submit(executor::work* w) + // Execution context access + test_context& + context() const noexcept { - ++state_->submit_count; - w->invoke(); - w->~work(); - deallocate(w, 0, 0); + return *ctx_; } -}; -//----------------------------------------------------------------------------- - -struct execution_test -{ + // Work tracking void - testDefaultConstruct() + on_work_started() const noexcept { - executor exec; - BOOST_TEST(!exec); } void - testConstructFromImpl() + on_work_finished() const noexcept { - sync_executor ctx; - executor exec(ctx); - BOOST_TEST(static_cast(exec)); } - void - testCopyConstruct() + // Work submission + std::coroutine_handle<> + dispatch(std::coroutine_handle<> h) const { - sync_executor ctx; - executor exec1(ctx); - executor exec2(exec1); - BOOST_TEST(static_cast(exec1)); - BOOST_TEST(static_cast(exec2)); + return h; } void - testMoveConstruct() + post(std::coroutine_handle<>) const { - sync_executor ctx; - executor exec1(ctx); - executor exec2(std::move(exec1)); - BOOST_TEST(static_cast(exec2)); } void - testCopyAssign() + defer(std::coroutine_handle<>) const { - sync_executor ctx; - executor exec1(ctx); - executor exec2; - exec2 = exec1; - BOOST_TEST(static_cast(exec1)); - BOOST_TEST(static_cast(exec2)); } +}; - void - testMoveAssign() - { - sync_executor ctx; - executor exec1(ctx); - executor exec2; - exec2 = std::move(exec1); - BOOST_TEST(static_cast(exec2)); - } - - void - testPostLambda() - { - bool called = false; - sync_executor ctx; - executor exec(ctx); - exec.post([&called]{ called = true; }); - BOOST_TEST(called); - } - - void - testPostMultiple() - { - int count = 0; - sync_executor ctx; - executor exec(ctx); - exec.post([&count]{ ++count; }); - exec.post([&count]{ ++count; }); - exec.post([&count]{ ++count; }); - BOOST_TEST_EQ(count, 3); - } - - void - testPostWithCapture() - { - int result = 0; - int a = 10, b = 20; - sync_executor ctx; - executor exec(ctx); - exec.post([&result, a, b]{ result = a + b; }); - BOOST_TEST_EQ(result, 30); - } - - void - testPostWithMoveOnlyCapture() - { - struct callable - { - int& result; - std::unique_ptr ptr; - - void operator()() - { - result = *ptr; - } - }; - - int result = 0; - sync_executor ctx; - executor exec(ctx); - std::unique_ptr ptr(new int(42)); - exec.post(callable{result, std::move(ptr)}); - BOOST_TEST_EQ(result, 42); - } +// Verify executor concept +static_assert(executor); +struct executor_test +{ void - testQueuedExecution() + run() { - // Queued executor using shared state via pointers - struct shared_queue_executor + // handler - invoke operator() { - friend struct executor::access; - - std::vector* queue; - - private: - void* allocate(std::size_t size, std::size_t) - { - return std::malloc(size); - } - - void deallocate(void* p, std::size_t, std::size_t) - { - std::free(p); - } - - void submit(executor::work* w) - { - queue->push_back({w, w}); - } - }; - - int count = 0; - std::vector queue; - shared_queue_executor ctx{&queue}; - executor exec(ctx); - - exec.post([&count]{ ++count; }); - exec.post([&count]{ ++count; }); + int invoked = 0; + int destroyed = 0; + test_handler h(invoked, destroyed); - BOOST_TEST_EQ(count, 0); - BOOST_TEST_EQ(queue.size(), 2u); + h(); - // Run one - if(!queue.empty()) - { - auto e = queue.front(); - queue.erase(queue.begin()); - e.w->invoke(); - e.w->~work(); - std::free(e.storage); + BOOST_TEST(invoked == 1); + BOOST_TEST(destroyed == 0); } - BOOST_TEST_EQ(count, 1); - // Run remaining - while(!queue.empty()) + // handler - invoke destroy() { - auto e = queue.front(); - queue.erase(queue.begin()); - e.w->invoke(); - e.w->~work(); - std::free(e.storage); - } - BOOST_TEST_EQ(count, 2); - } + int invoked = 0; + int destroyed = 0; + test_handler h(invoked, destroyed); - void - testSharedReference() - { - int count = 0; - sync_executor ctx; - executor exec1(ctx); - executor exec2 = exec1; - - exec1.post([&count]{ ++count; }); - exec2.post([&count]{ ++count; }); + h.destroy(); - BOOST_TEST_EQ(count, 2); - // Both executors reference the same context - BOOST_TEST_EQ(ctx.submit_count.load(), 2); - } - - void - testSubmitNonVoid() - { - int result = 0; - bool handler_called = false; - - sync_executor ctx; - executor exec(ctx); - exec.submit( - []{ return 42; }, - [&](system::result r) - { - handler_called = true; - if(r.has_value()) - result = r.value(); - }); - - BOOST_TEST(handler_called); - BOOST_TEST_EQ(result, 42); - } - - void - testSubmitVoid() - { - bool work_called = false; - bool handler_called = false; - - sync_executor ctx; - executor exec(ctx); - exec.submit( - [&work_called]{ work_called = true; }, - [&handler_called](system::result) - { - handler_called = true; - }); - - BOOST_TEST(work_called); - BOOST_TEST(handler_called); - } - - void - testSubmitException() - { - bool handler_called = false; - bool got_exception = false; - - sync_executor ctx; - executor exec(ctx); - exec.submit( - []() -> int { throw std::runtime_error("test"); }, - [&](system::result r) - { - handler_called = true; - if(r.has_error()) - got_exception = true; - }); - - BOOST_TEST(handler_called); - BOOST_TEST(got_exception); - } - - void - testFactoryBasic() - { - sync_executor ctx; - executor exec(ctx); - - struct my_work : executor::work - { - bool& flag; - explicit my_work(bool& f) : flag(f) {} - void invoke() override { flag = true; } - }; + BOOST_TEST(invoked == 0); + BOOST_TEST(destroyed == 1); + } - bool invoked = false; + // queue - default construction { - executor::factory fac(exec); - void* p = fac.allocate(sizeof(my_work), alignof(my_work)); - auto* w = ::new(p) my_work(invoked); - fac.commit(w); + execution_context::queue q; + BOOST_TEST(q.empty()); + BOOST_TEST(q.pop() == nullptr); } - BOOST_TEST(invoked); - } - void - testFactoryRollback() - { - // Use a tracking structure - struct tracking_executor + // queue - push and pop { - friend struct executor::access; - - int alloc_count = 0; - int dealloc_count = 0; - - private: - struct header { std::size_t size; }; - - void* allocate(std::size_t size, std::size_t) - { - ++alloc_count; - std::size_t total = sizeof(header) + size; - void* p = std::malloc(total); - header* h = new(p) header{total}; - return h + 1; - } - - void deallocate(void* p, std::size_t, std::size_t) - { - ++dealloc_count; - header* h = static_cast(p) - 1; - std::free(h); - } + int invoked = 0; + int destroyed = 0; + test_handler h(invoked, destroyed); - void submit(executor::work* w) - { - w->invoke(); - w->~work(); - deallocate(w, 0, 0); - } - }; + execution_context::queue q; + q.push(&h); + BOOST_TEST(!q.empty()); - tracking_executor ctx; - executor exec(ctx); + execution_context::handler* p = q.pop(); + BOOST_TEST(p == &h); + BOOST_TEST(q.empty()); + } + // queue - FIFO order { - executor::factory fac(exec); - fac.allocate(64, 8); - // No commit - destructor should deallocate + int invoked1 = 0, destroyed1 = 0; + int invoked2 = 0, destroyed2 = 0; + int invoked3 = 0, destroyed3 = 0; + test_handler h1(invoked1, destroyed1); + test_handler h2(invoked2, destroyed2); + test_handler h3(invoked3, destroyed3); + + execution_context::queue q; + q.push(&h1); + q.push(&h2); + q.push(&h3); + + BOOST_TEST(q.pop() == &h1); + BOOST_TEST(q.pop() == &h2); + BOOST_TEST(q.pop() == &h3); + BOOST_TEST(q.empty()); } - BOOST_TEST_EQ(ctx.alloc_count, 1); - BOOST_TEST_EQ(ctx.dealloc_count, 1); - } - - //------------------------------------------------------------------------- - // Owning mode tests (executor::wrap) - //------------------------------------------------------------------------- - - void - testWrapBasic() - { - value_executor ve; - executor exec = executor::wrap(ve); - BOOST_TEST(static_cast(exec)); - } - - void - testWrapTemporary() - { - executor exec = executor::wrap(value_executor{}); - BOOST_TEST(static_cast(exec)); - } - - void - testWrapCopyConstruct() - { - value_executor ve; - executor exec1 = executor::wrap(ve); - executor exec2(exec1); - BOOST_TEST(static_cast(exec1)); - BOOST_TEST(static_cast(exec2)); - } - - void - testWrapMoveConstruct() - { - value_executor ve; - executor exec1 = executor::wrap(ve); - executor exec2(std::move(exec1)); - BOOST_TEST(static_cast(exec2)); - } - - void - testWrapCopyAssign() - { - value_executor ve; - executor exec1 = executor::wrap(ve); - executor exec2; - exec2 = exec1; - BOOST_TEST(static_cast(exec1)); - BOOST_TEST(static_cast(exec2)); - } - - void - testWrapMoveAssign() - { - value_executor ve; - executor exec1 = executor::wrap(ve); - executor exec2; - exec2 = std::move(exec1); - BOOST_TEST(static_cast(exec2)); - } - - void - testWrapPostLambda() - { - bool called = false; - value_executor ve; - executor exec = executor::wrap(ve); - exec.post([&called]{ called = true; }); - BOOST_TEST(called); - BOOST_TEST_EQ(ve.submit_count(), 1); - } + // queue - move constructor + { + int invoked = 0; + int destroyed = 0; + test_handler h(invoked, destroyed); - void - testWrapPostMultiple() - { - int count = 0; - value_executor ve; - executor exec = executor::wrap(ve); - exec.post([&count]{ ++count; }); - exec.post([&count]{ ++count; }); - exec.post([&count]{ ++count; }); - BOOST_TEST_EQ(count, 3); - BOOST_TEST_EQ(ve.submit_count(), 3); - } + execution_context::queue q1; + q1.push(&h); - void - testWrapSharedOwnership() - { - // Verify that copies share the same underlying executor - int count = 0; - value_executor ve; - executor exec1 = executor::wrap(ve); - executor exec2 = exec1; - - exec1.post([&count]{ ++count; }); - exec2.post([&count]{ ++count; }); - - BOOST_TEST_EQ(count, 2); - // Both executors should use the same underlying value_executor - BOOST_TEST_EQ(ve.submit_count(), 2); - } + execution_context::queue q2(std::move(q1)); + BOOST_TEST(q1.empty()); + BOOST_TEST(!q2.empty()); + BOOST_TEST(q2.pop() == &h); + } - void - testWrapLifetime() - { - // Verify executor remains valid after original goes out of scope - executor exec; + // queue - splice { - value_executor ve; - exec = executor::wrap(ve); + int i1 = 0, d1 = 0; + int i2 = 0, d2 = 0; + int i3 = 0, d3 = 0; + int i4 = 0, d4 = 0; + test_handler h1(i1, d1); + test_handler h2(i2, d2); + test_handler h3(i3, d3); + test_handler h4(i4, d4); + + execution_context::queue q1; + execution_context::queue q2; + q1.push(&h1); + q1.push(&h2); + q2.push(&h3); + q2.push(&h4); + + q1.push(q2); + + BOOST_TEST(q2.empty()); + BOOST_TEST(q1.pop() == &h1); + BOOST_TEST(q1.pop() == &h2); + BOOST_TEST(q1.pop() == &h3); + BOOST_TEST(q1.pop() == &h4); + BOOST_TEST(q1.empty()); } - // ve is destroyed but exec should still work - // (the holder keeps a copy of the value_executor) - BOOST_TEST(static_cast(exec)); - bool called = false; - exec.post([&called]{ called = true; }); - BOOST_TEST(called); - } + // queue - destructor calls destroy + { + int invoked = 0; + int destroyed = 0; + test_handler h(invoked, destroyed); - void - testWrapSubmit() - { - int result = 0; - bool handler_called = false; - - value_executor ve; - executor exec = executor::wrap(ve); - exec.submit( - []{ return 42; }, - [&](system::result r) { - handler_called = true; - if(r.has_value()) - result = r.value(); - }); - - BOOST_TEST(handler_called); - BOOST_TEST_EQ(result, 42); - } - - void - run() - { - testDefaultConstruct(); - testConstructFromImpl(); - testCopyConstruct(); - testMoveConstruct(); - testCopyAssign(); - testMoveAssign(); - testPostLambda(); - testPostMultiple(); - testPostWithCapture(); - testPostWithMoveOnlyCapture(); - testQueuedExecution(); - testSharedReference(); - testSubmitNonVoid(); - testSubmitVoid(); - testSubmitException(); - testFactoryBasic(); - testFactoryRollback(); - - // Owning mode tests - testWrapBasic(); - testWrapTemporary(); - testWrapCopyConstruct(); - testWrapMoveConstruct(); - testWrapCopyAssign(); - testWrapMoveAssign(); - testWrapPostLambda(); - testWrapPostMultiple(); - testWrapSharedOwnership(); - testWrapLifetime(); - testWrapSubmit(); - } -}; - -TEST_SUITE( - execution_test, - "boost.capy.execution"); - -//----------------------------------------------------------------------------- - -#ifdef BOOST_CAPY_HAS_CORO - -template -T run_task_exec(task& t) -{ - while(!t.handle().done()) - t.handle().resume(); - return t.await_resume(); -} - -inline void run_task_exec(task& t) -{ - while(!t.handle().done()) - t.handle().resume(); - t.await_resume(); -} + execution_context::queue q; + q.push(&h); + // destructor should call destroy() + } -struct execution_coro_test -{ - void - testSubmitAwaitableNonVoid() - { - sync_executor ctx; + BOOST_TEST(invoked == 0); + BOOST_TEST(destroyed == 1); + } - auto run = [&ctx]() -> task + // executor - equality comparison { - executor exec(ctx); - int result = co_await exec.submit([]{ return 42; }); - co_return result; - }; - - auto t = run(); - BOOST_TEST_EQ(run_task_exec(t), 42); - } - - void - testSubmitAwaitableVoid() - { - bool executed = false; - sync_executor ctx; + test_context ctx1; + test_context ctx2; + test_executor e1(ctx1); + test_executor e2(ctx1); + test_executor e3(ctx2); + + BOOST_TEST(e1 == e2); + BOOST_TEST(!(e1 != e2)); + BOOST_TEST(e1 != e3); + BOOST_TEST(!(e1 == e3)); + } - auto run = [&ctx, &executed]() -> task + // executor - context() returns same reference for equal executors { - executor exec(ctx); - co_await exec.submit([&executed]{ executed = true; }); - co_return; - }; - - auto t = run(); - run_task_exec(t); - BOOST_TEST(executed); - } + test_context ctx; + test_executor e1(ctx); + test_executor e2(ctx); - void - testSubmitAwaitableMultiple() - { - sync_executor ctx; + BOOST_TEST(e1 == e2); + BOOST_TEST(&e1.context() == &e2.context()); + BOOST_TEST(&e1.context() == &ctx); + } - auto run = [&ctx]() -> task + // executor - copy preserves context { - executor exec(ctx); - int a = co_await exec.submit([]{ return 10; }); - int b = co_await exec.submit([]{ return 20; }); - int c = co_await exec.submit([]{ return 30; }); - co_return a + b + c; - }; - - auto t = run(); - BOOST_TEST_EQ(run_task_exec(t), 60); - } + test_context ctx; + test_executor e1(ctx); + test_executor e2(e1); - void - run() - { -#if 0 - testSubmitAwaitableNonVoid(); - testSubmitAwaitableVoid(); - testSubmitAwaitableMultiple(); -#endif + BOOST_TEST(e1 == e2); + BOOST_TEST(&e1.context() == &e2.context()); + } } }; TEST_SUITE( - execution_coro_test, - "boost.capy.execution.coro"); - -#endif + executor_test, + "boost.capy.executor"); } // capy } // boost diff --git a/test/unit/executor_work_guard.cpp b/test/unit/executor_work_guard.cpp new file mode 100644 index 00000000..898f9057 --- /dev/null +++ b/test/unit/executor_work_guard.cpp @@ -0,0 +1,222 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie dot falco at gmail dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + +// Test that header file is self-contained. +#include + +#include + +#include "test_suite.hpp" + +namespace boost { +namespace capy { + +// Minimal execution context for testing +struct guard_test_context +{ + int work_count = 0; +}; + +// Test executor that tracks work calls +struct guard_test_executor +{ + guard_test_context* ctx_ = nullptr; + + guard_test_executor() = default; + + explicit + guard_test_executor(guard_test_context& ctx) noexcept + : ctx_(&ctx) + { + } + + bool + operator==(guard_test_executor const& other) const noexcept + { + return ctx_ == other.ctx_; + } + + guard_test_context& + context() const noexcept + { + return *ctx_; + } + + void + on_work_started() const noexcept + { + ++ctx_->work_count; + } + + void + on_work_finished() const noexcept + { + --ctx_->work_count; + } + + std::coroutine_handle<> + dispatch(std::coroutine_handle<> h) const + { + return h; + } + + void + post(std::coroutine_handle<>) const + { + } + + void + defer(std::coroutine_handle<>) const + { + } +}; + +// Verify executor concept +static_assert(executor); + +struct executor_work_guard_test +{ + void + run() + { + // Construction calls on_work_started() + { + guard_test_context ctx; + BOOST_TEST(ctx.work_count == 0); + + { + guard_test_executor ex(ctx); + executor_work_guard guard(ex); + BOOST_TEST(ctx.work_count == 1); + BOOST_TEST(guard.owns_work()); + } + + BOOST_TEST(ctx.work_count == 0); + } + + // Destruction calls on_work_finished() + { + guard_test_context ctx; + + { + guard_test_executor ex(ctx); + executor_work_guard guard(ex); + BOOST_TEST(ctx.work_count == 1); + } + + BOOST_TEST(ctx.work_count == 0); + } + + // Copy constructor increments work count + { + guard_test_context ctx; + guard_test_executor ex(ctx); + + executor_work_guard guard1(ex); + BOOST_TEST(ctx.work_count == 1); + + { + executor_work_guard guard2(guard1); + BOOST_TEST(ctx.work_count == 2); + BOOST_TEST(guard1.owns_work()); + BOOST_TEST(guard2.owns_work()); + BOOST_TEST(guard1.get_executor() == guard2.get_executor()); + } + + BOOST_TEST(ctx.work_count == 1); + } + + // Move constructor transfers ownership (no extra calls) + { + guard_test_context ctx; + guard_test_executor ex(ctx); + + executor_work_guard guard1(ex); + BOOST_TEST(ctx.work_count == 1); + + executor_work_guard guard2(std::move(guard1)); + BOOST_TEST(ctx.work_count == 1); // No change + BOOST_TEST(!guard1.owns_work()); + BOOST_TEST(guard2.owns_work()); + } + + // reset() releases work early + { + guard_test_context ctx; + guard_test_executor ex(ctx); + + executor_work_guard guard(ex); + BOOST_TEST(ctx.work_count == 1); + BOOST_TEST(guard.owns_work()); + + guard.reset(); + BOOST_TEST(ctx.work_count == 0); + BOOST_TEST(!guard.owns_work()); + + // Second reset has no effect + guard.reset(); + BOOST_TEST(ctx.work_count == 0); + BOOST_TEST(!guard.owns_work()); + } + + // owns_work() returns correct state + { + guard_test_context ctx; + guard_test_executor ex(ctx); + + executor_work_guard guard(ex); + BOOST_TEST(guard.owns_work() == true); + + guard.reset(); + BOOST_TEST(guard.owns_work() == false); + } + + // get_executor() returns correct executor + { + guard_test_context ctx; + guard_test_executor ex(ctx); + + executor_work_guard guard(ex); + BOOST_TEST(guard.get_executor() == ex); + BOOST_TEST(&guard.get_executor().context() == &ctx); + } + + // make_work_guard(Executor) factory function + { + guard_test_context ctx; + guard_test_executor ex(ctx); + + auto guard = make_work_guard(ex); + BOOST_TEST(ctx.work_count == 1); + BOOST_TEST(guard.owns_work()); + BOOST_TEST(guard.get_executor() == ex); + } + + // Copy from guard that doesn't own work + { + guard_test_context ctx; + guard_test_executor ex(ctx); + + executor_work_guard guard1(ex); + guard1.reset(); + BOOST_TEST(ctx.work_count == 0); + + executor_work_guard guard2(guard1); + BOOST_TEST(ctx.work_count == 0); // No increment + BOOST_TEST(!guard2.owns_work()); + } + } +}; + +TEST_SUITE( + executor_work_guard_test, + "boost.capy.executor_work_guard"); + +} // capy +} // boost diff --git a/test/unit/intrusive_list.cpp b/test/unit/intrusive_list.cpp new file mode 100644 index 00000000..5e91b592 --- /dev/null +++ b/test/unit/intrusive_list.cpp @@ -0,0 +1,308 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie dot falco at gmail dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + +// Test that header file is self-contained. +#include + +#include + +#include "test_suite.hpp" + +namespace boost { +namespace capy { + +struct item : intrusive_list::node +{ + int value; + + explicit item(int v) : value(v) {} +}; + +struct intrusive_list_test +{ + void + run() + { + // default construction - empty list + { + intrusive_list q; + BOOST_TEST(q.empty()); + BOOST_TEST(q.pop_front() == nullptr); + } + + // push and pop single element + { + intrusive_list q; + item w(42); + q.push_back(&w); + BOOST_TEST(!q.empty()); + item* p = q.pop_front(); + BOOST_TEST(p == &w); + BOOST_TEST(p->value == 42); + BOOST_TEST(q.empty()); + BOOST_TEST(q.pop_front() == nullptr); + } + + // push multiple, pop in FIFO order + { + intrusive_list q; + item w1(1); + item w2(2); + item w3(3); + + q.push_back(&w1); + q.push_back(&w2); + q.push_back(&w3); + + BOOST_TEST(!q.empty()); + + item* p1 = q.pop_front(); + BOOST_TEST(p1 == &w1); + BOOST_TEST(p1->value == 1); + + item* p2 = q.pop_front(); + BOOST_TEST(p2 == &w2); + BOOST_TEST(p2->value == 2); + + item* p3 = q.pop_front(); + BOOST_TEST(p3 == &w3); + BOOST_TEST(p3->value == 3); + + BOOST_TEST(q.empty()); + BOOST_TEST(q.pop_front() == nullptr); + } + + // interleaved push and pop + { + intrusive_list q; + item w1(10); + item w2(20); + item w3(30); + + q.push_back(&w1); + q.push_back(&w2); + + item* p1 = q.pop_front(); + BOOST_TEST(p1 == &w1); + + q.push_back(&w3); + + item* p2 = q.pop_front(); + BOOST_TEST(p2 == &w2); + + item* p3 = q.pop_front(); + BOOST_TEST(p3 == &w3); + + BOOST_TEST(q.empty()); + } + + // reuse element after pop + { + intrusive_list q; + item w(100); + + q.push_back(&w); + item* p = q.pop_front(); + BOOST_TEST(p == &w); + + // push same element again + q.push_back(&w); + p = q.pop_front(); + BOOST_TEST(p == &w); + BOOST_TEST(q.empty()); + } + + // move constructor + { + intrusive_list q1; + item w1(1); + item w2(2); + q1.push_back(&w1); + q1.push_back(&w2); + + intrusive_list q2(std::move(q1)); + BOOST_TEST(q1.empty()); + BOOST_TEST(!q2.empty()); + + item* p1 = q2.pop_front(); + BOOST_TEST(p1 == &w1); + item* p2 = q2.pop_front(); + BOOST_TEST(p2 == &w2); + BOOST_TEST(q2.empty()); + } + + // move constructor from empty + { + intrusive_list q1; + intrusive_list q2(std::move(q1)); + BOOST_TEST(q1.empty()); + BOOST_TEST(q2.empty()); + } + + // splice non-empty into non-empty + { + intrusive_list q1; + intrusive_list q2; + item w1(1); + item w2(2); + item w3(3); + item w4(4); + + q1.push_back(&w1); + q1.push_back(&w2); + q2.push_back(&w3); + q2.push_back(&w4); + + q1.splice_back(q2); + + BOOST_TEST(q2.empty()); + BOOST_TEST(!q1.empty()); + + BOOST_TEST(q1.pop_front() == &w1); + BOOST_TEST(q1.pop_front() == &w2); + BOOST_TEST(q1.pop_front() == &w3); + BOOST_TEST(q1.pop_front() == &w4); + BOOST_TEST(q1.empty()); + } + + // splice non-empty into empty + { + intrusive_list q1; + intrusive_list q2; + item w1(1); + item w2(2); + + q2.push_back(&w1); + q2.push_back(&w2); + + q1.splice_back(q2); + + BOOST_TEST(q2.empty()); + BOOST_TEST(!q1.empty()); + + BOOST_TEST(q1.pop_front() == &w1); + BOOST_TEST(q1.pop_front() == &w2); + BOOST_TEST(q1.empty()); + } + + // splice empty into non-empty + { + intrusive_list q1; + intrusive_list q2; + item w1(1); + + q1.push_back(&w1); + + q1.splice_back(q2); + + BOOST_TEST(q2.empty()); + BOOST_TEST(!q1.empty()); + BOOST_TEST(q1.pop_front() == &w1); + BOOST_TEST(q1.empty()); + } + + // splice empty into empty + { + intrusive_list q1; + intrusive_list q2; + + q1.splice_back(q2); + + BOOST_TEST(q1.empty()); + BOOST_TEST(q2.empty()); + } + + // remove only element + { + intrusive_list q; + item w(1); + q.push_back(&w); + q.remove(&w); + BOOST_TEST(q.empty()); + BOOST_TEST(q.pop_front() == nullptr); + } + + // remove from head + { + intrusive_list q; + item w1(1); + item w2(2); + item w3(3); + q.push_back(&w1); + q.push_back(&w2); + q.push_back(&w3); + + q.remove(&w1); + + BOOST_TEST(!q.empty()); + BOOST_TEST(q.pop_front() == &w2); + BOOST_TEST(q.pop_front() == &w3); + BOOST_TEST(q.empty()); + } + + // remove from tail + { + intrusive_list q; + item w1(1); + item w2(2); + item w3(3); + q.push_back(&w1); + q.push_back(&w2); + q.push_back(&w3); + + q.remove(&w3); + + BOOST_TEST(!q.empty()); + BOOST_TEST(q.pop_front() == &w1); + BOOST_TEST(q.pop_front() == &w2); + BOOST_TEST(q.empty()); + } + + // remove from middle + { + intrusive_list q; + item w1(1); + item w2(2); + item w3(3); + q.push_back(&w1); + q.push_back(&w2); + q.push_back(&w3); + + q.remove(&w2); + + BOOST_TEST(!q.empty()); + BOOST_TEST(q.pop_front() == &w1); + BOOST_TEST(q.pop_front() == &w3); + BOOST_TEST(q.empty()); + } + + // reuse element after remove + { + intrusive_list q; + item w(100); + + q.push_back(&w); + q.remove(&w); + BOOST_TEST(q.empty()); + + // push same element again + q.push_back(&w); + item* p = q.pop_front(); + BOOST_TEST(p == &w); + BOOST_TEST(q.empty()); + } + } +}; + +TEST_SUITE( + intrusive_list_test, + "boost.capy.intrusive_list"); + +} // capy +} // boost diff --git a/test/unit/intrusive_queue.cpp b/test/unit/intrusive_queue.cpp new file mode 100644 index 00000000..8fc89c51 --- /dev/null +++ b/test/unit/intrusive_queue.cpp @@ -0,0 +1,228 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie dot falco at gmail dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + +// Test that header file is self-contained. +#include + +#include + +#include "test_suite.hpp" + +namespace boost { +namespace capy { + +struct queue_item : intrusive_queue::node +{ + int value; + + explicit queue_item(int v) : value(v) {} +}; + +struct intrusive_queue_test +{ + void + run() + { + // default construction - empty queue + { + intrusive_queue q; + BOOST_TEST(q.empty()); + BOOST_TEST(q.pop() == nullptr); + } + + // push and pop single element + { + intrusive_queue q; + queue_item w(42); + q.push(&w); + BOOST_TEST(!q.empty()); + queue_item* p = q.pop(); + BOOST_TEST(p == &w); + BOOST_TEST(p->value == 42); + BOOST_TEST(q.empty()); + BOOST_TEST(q.pop() == nullptr); + } + + // push multiple, pop in FIFO order + { + intrusive_queue q; + queue_item w1(1); + queue_item w2(2); + queue_item w3(3); + + q.push(&w1); + q.push(&w2); + q.push(&w3); + + BOOST_TEST(!q.empty()); + + queue_item* p1 = q.pop(); + BOOST_TEST(p1 == &w1); + BOOST_TEST(p1->value == 1); + + queue_item* p2 = q.pop(); + BOOST_TEST(p2 == &w2); + BOOST_TEST(p2->value == 2); + + queue_item* p3 = q.pop(); + BOOST_TEST(p3 == &w3); + BOOST_TEST(p3->value == 3); + + BOOST_TEST(q.empty()); + BOOST_TEST(q.pop() == nullptr); + } + + // interleaved push and pop + { + intrusive_queue q; + queue_item w1(10); + queue_item w2(20); + queue_item w3(30); + + q.push(&w1); + q.push(&w2); + + queue_item* p1 = q.pop(); + BOOST_TEST(p1 == &w1); + + q.push(&w3); + + queue_item* p2 = q.pop(); + BOOST_TEST(p2 == &w2); + + queue_item* p3 = q.pop(); + BOOST_TEST(p3 == &w3); + + BOOST_TEST(q.empty()); + } + + // reuse element after pop + { + intrusive_queue q; + queue_item w(100); + + q.push(&w); + queue_item* p = q.pop(); + BOOST_TEST(p == &w); + + // push same element again + q.push(&w); + p = q.pop(); + BOOST_TEST(p == &w); + BOOST_TEST(q.empty()); + } + + // move constructor + { + intrusive_queue q1; + queue_item w1(1); + queue_item w2(2); + q1.push(&w1); + q1.push(&w2); + + intrusive_queue q2(std::move(q1)); + BOOST_TEST(q1.empty()); + BOOST_TEST(!q2.empty()); + + queue_item* p1 = q2.pop(); + BOOST_TEST(p1 == &w1); + queue_item* p2 = q2.pop(); + BOOST_TEST(p2 == &w2); + BOOST_TEST(q2.empty()); + } + + // move constructor from empty + { + intrusive_queue q1; + intrusive_queue q2(std::move(q1)); + BOOST_TEST(q1.empty()); + BOOST_TEST(q2.empty()); + } + + // splice non-empty into non-empty + { + intrusive_queue q1; + intrusive_queue q2; + queue_item w1(1); + queue_item w2(2); + queue_item w3(3); + queue_item w4(4); + + q1.push(&w1); + q1.push(&w2); + q2.push(&w3); + q2.push(&w4); + + q1.splice(q2); + + BOOST_TEST(q2.empty()); + BOOST_TEST(!q1.empty()); + + BOOST_TEST(q1.pop() == &w1); + BOOST_TEST(q1.pop() == &w2); + BOOST_TEST(q1.pop() == &w3); + BOOST_TEST(q1.pop() == &w4); + BOOST_TEST(q1.empty()); + } + + // splice non-empty into empty + { + intrusive_queue q1; + intrusive_queue q2; + queue_item w1(1); + queue_item w2(2); + + q2.push(&w1); + q2.push(&w2); + + q1.splice(q2); + + BOOST_TEST(q2.empty()); + BOOST_TEST(!q1.empty()); + + BOOST_TEST(q1.pop() == &w1); + BOOST_TEST(q1.pop() == &w2); + BOOST_TEST(q1.empty()); + } + + // splice empty into non-empty + { + intrusive_queue q1; + intrusive_queue q2; + queue_item w1(1); + + q1.push(&w1); + + q1.splice(q2); + + BOOST_TEST(q2.empty()); + BOOST_TEST(!q1.empty()); + BOOST_TEST(q1.pop() == &w1); + BOOST_TEST(q1.empty()); + } + + // splice empty into empty + { + intrusive_queue q1; + intrusive_queue q2; + + q1.splice(q2); + + BOOST_TEST(q1.empty()); + BOOST_TEST(q2.empty()); + } + } +}; + +TEST_SUITE( + intrusive_queue_test, + "boost.capy.intrusive_queue"); + +} // capy +} // boost diff --git a/test/unit/task.cpp b/test/unit/task.cpp index e2e19576..ff540e8b 100644 --- a/test/unit/task.cpp +++ b/test/unit/task.cpp @@ -10,70 +10,111 @@ // Test that header file is self-contained. #include -#ifdef BOOST_CAPY_HAS_CORO - #include -#include +#include #include "test_suite.hpp" #include -#include #include #include +#include #include namespace boost { namespace capy { -/** Simple synchronous executor for testing. +static_assert(affine_awaitable, any_dispatcher>); +static_assert(affine_awaitable, any_dispatcher>); +static_assert(stoppable_awaitable, any_dispatcher>); +static_assert(stoppable_awaitable, any_dispatcher>); + +/** Simple synchronous dispatcher for testing. + + Satisfies the dispatcher concept: callable with (coro) returning coro. + Executes inline (returns the handle for symmetric transfer). + Uses a pointer to external counter to allow copying. */ -struct sync_executor +struct test_dispatcher { - friend struct executor::access; - - std::atomic alloc_count{0}; - std::atomic submit_count{0}; + int* dispatch_count_; -private: - struct header + explicit test_dispatcher(int& count) + : dispatch_count_(&count) { - std::size_t size; - }; + } - void* - allocate(std::size_t size, std::size_t /*align*/) + coro operator()(coro h) const { - ++alloc_count; - std::size_t total = sizeof(header) + size; - void* p = std::malloc(total); - auto* h = new(p) header{total}; - return h + 1; + ++(*dispatch_count_); + return h; // Inline execution for sync tests } +}; - void - deallocate(void* p, std::size_t /*size*/, std::size_t /*align*/) +static_assert(dispatcher); + +/** Tracking dispatcher that logs dispatch calls with an ID. + Uses pointers to external storage to allow copying. +*/ +struct tracking_dispatcher +{ + int id; + int* dispatch_count_; + std::vector* dispatch_log; + + tracking_dispatcher(int id_, int& count, std::vector* log = nullptr) + : id(id_) + , dispatch_count_(&count) + , dispatch_log(log) { - auto* h = static_cast(p) - 1; - std::free(h); } - void - submit(executor::work* w) + coro operator()(coro h) const { - ++submit_count; - w->invoke(); - w->~work(); - deallocate(w, 0, 0); + ++(*dispatch_count_); + if (dispatch_log) + dispatch_log->push_back(id); + return h; // Inline execution } }; +static_assert(dispatcher); + +/** Run a task to completion by manually stepping through it. + + Takes ownership of the task via release() and runs until done. +*/ template -T run_task(task& t) +T run_task(task t) { - while (!t.handle().done()) - t.handle().resume(); - return t.await_resume(); + auto h = t.release(); // Take ownership + while (!h.done()) + h.resume(); + auto& p = h.promise(); + // Check for exception first (result may be empty if exception occurred) + if (p.ep_) + { + auto ep = p.ep_; + h.destroy(); + std::rethrow_exception(ep); + } + if constexpr (!std::is_void_v) + { + auto result = std::move(*p.result_); + h.destroy(); + return result; + } + else + { + h.destroy(); + } +} + +/** Run a void task to completion. +*/ +inline void run_void_task(task t) +{ + run_task(std::move(t)); } struct test_exception : std::runtime_error @@ -109,14 +150,12 @@ struct task_test { // task returning int { - auto t = returns_int(); - BOOST_TEST_EQ(run_task(t), 42); + BOOST_TEST_EQ(run_task(returns_int()), 42); } // task returning string { - auto t = returns_string(); - BOOST_TEST_EQ(run_task(t), "hello"); + BOOST_TEST_EQ(run_task(returns_string()), "hello"); } } @@ -139,18 +178,12 @@ struct task_test { // task that throws custom exception { - auto t = throws_exception(); - while (!t.handle().done()) - t.handle().resume(); - BOOST_TEST_THROWS(t.await_resume(), test_exception); + BOOST_TEST_THROWS(run_task(throws_exception()), test_exception); } // task that throws std::runtime_error { - auto t = throws_std_exception(); - while (!t.handle().done()) - t.handle().resume(); - BOOST_TEST_THROWS(t.await_resume(), std::runtime_error); + BOOST_TEST_THROWS(run_task(throws_std_exception()), std::runtime_error); } } @@ -216,28 +249,22 @@ struct task_test { // outer task awaits inner task with value { - auto t = outer_task_awaits_inner(); - BOOST_TEST_EQ(run_task(t), 101); + BOOST_TEST_EQ(run_task(outer_task_awaits_inner()), 101); } // outer task awaits inner task that throws { - auto t = outer_task_awaits_throwing_inner(); - while (!t.handle().done()) - t.handle().resume(); - BOOST_TEST_THROWS(t.await_resume(), test_exception); + BOOST_TEST_THROWS(run_task(outer_task_awaits_throwing_inner()), test_exception); } // outer task catches exception from inner task { - auto t = outer_task_catches_inner_exception(); - BOOST_TEST_EQ(run_task(t), 999); + BOOST_TEST_EQ(run_task(outer_task_catches_inner_exception()), 999); } // chained tasks (3 levels) { - auto t = chained_tasks(); - BOOST_TEST_EQ(run_task(t), 25); + BOOST_TEST_EQ(run_task(chained_tasks()), 25); } } @@ -247,14 +274,20 @@ struct task_test // move constructor { auto t1 = returns_int(); - auto h = t1.handle(); - BOOST_TEST(h); + auto h1 = t1.release(); + BOOST_TEST(h1); + // Re-wrap for move test task t2(std::move(t1)); - BOOST_TEST(!t1.handle()); - BOOST_TEST(t2.handle() == h); - - BOOST_TEST_EQ(run_task(t2), 42); + // t1 is now moved-from, t2 should be empty since t1 was released + // This test verifies move semantics + BOOST_TEST(!t2.release()); // t2 is empty + + // Run the released handle + while (!h1.done()) + h1.resume(); + BOOST_TEST_EQ(*h1.promise().result_, 42); + h1.destroy(); } // release() @@ -262,7 +295,7 @@ struct task_test auto t = returns_int(); auto h = t.release(); BOOST_TEST(h); - BOOST_TEST(!t.handle()); + BOOST_TEST(!t.release()); // Already released while (!h.done()) h.resume(); @@ -310,16 +343,40 @@ struct task_test void testTaskAwaitsAsyncResult() { - // task awaits single async_op + // task awaits single async_op - needs async_run for dispatcher { - auto t = task_awaits_async_op(); - BOOST_TEST_EQ(run_task(t), 124); + int dispatch_count = 0; + test_dispatcher d(dispatch_count); + int result = 0; + bool completed = false; + + async_run(d)(task_awaits_async_op(), + [&](int v) { + result = v; + completed = true; + }, + [](std::exception_ptr) {}); + + BOOST_TEST(completed); + BOOST_TEST_EQ(result, 124); } // task awaits multiple async_ops { - auto t = task_awaits_multiple_async_ops(); - BOOST_TEST_EQ(run_task(t), 579); + int dispatch_count = 0; + test_dispatcher d(dispatch_count); + int result = 0; + bool completed = false; + + async_run(d)(task_awaits_multiple_async_ops(), + [&](int v) { + result = v; + completed = true; + }, + [](std::exception_ptr) {}); + + BOOST_TEST(completed); + BOOST_TEST_EQ(result, 579); } } @@ -348,19 +405,13 @@ struct task_test void testVoidTaskBasic() { - auto t = void_task_basic(); - while (!t.handle().done()) - t.handle().resume(); - t.await_resume(); // should not throw + run_void_task(void_task_basic()); // should not throw } void testVoidTaskException() { - auto t = void_task_throws(); - while (!t.handle().done()) - t.handle().resume(); - BOOST_TEST_THROWS(t.await_resume(), test_exception); + BOOST_TEST_THROWS(run_void_task(void_task_throws()), test_exception); } static task @@ -383,18 +434,12 @@ struct task_test { // void task awaits value-returning task { - auto t = void_task_awaits_value(); - while (!t.handle().done()) - t.handle().resume(); - t.await_resume(); + run_void_task(void_task_awaits_value()); } // void task awaits another void task { - auto t = void_task_awaits_void(); - while (!t.handle().done()) - t.handle().resume(); - t.await_resume(); + run_void_task(void_task_awaits_void()); } } @@ -416,22 +461,24 @@ struct task_test void testVoidTaskChain() { - auto t = void_task_chain(); - while (!t.handle().done()) - t.handle().resume(); - t.await_resume(); + run_void_task(void_task_chain()); } void testVoidTaskMove() { auto t1 = void_task_basic(); - auto h = t1.handle(); + auto h = t1.release(); BOOST_TEST(h); task t2(std::move(t1)); - BOOST_TEST(!t1.handle()); - BOOST_TEST(t2.handle() == h); + // t1 was already released, t2 should be empty + BOOST_TEST(!t2.release()); + + // Clean up the handle + while (!h.done()) + h.resume(); + h.destroy(); } static task @@ -445,30 +492,27 @@ struct task_test void testVoidTaskAwaitsAsyncResult() { - auto t = void_task_awaits_async_op(); - while (!t.handle().done()) - t.handle().resume(); - t.await_resume(); + // Needs async_run since void_task_awaits_async_op awaits an async_op + int dispatch_count = 0; + test_dispatcher d(dispatch_count); + bool completed = false; + + async_run(d)(void_task_awaits_async_op(), + [&]() { completed = true; }, + [](std::exception_ptr) {}); + + BOOST_TEST(completed); } - // executor affinity tests + // Dispatcher tests using async_run - void - testExecutorDefault() + static async_op + async_op_immediate(int value) { - // task executor defaults to empty (no affinity) - { - auto t = returns_int(); - auto& p = t.handle().promise(); - BOOST_TEST(!p.get_executor()); - } - - // task executor defaults to empty (no affinity) - { - auto t = void_task_basic(); - auto& p = t.handle().promise(); - BOOST_TEST(!p.get_executor()); - } + return make_async_op( + [value](auto cb) { + cb(value); + }); } static task @@ -479,18 +523,25 @@ struct task_test } void - testExecutorUsedByAwait() + testDispatcherUsedByAwait() { - // Verify that executor is used when awaiting - sync_executor ctx; - executor ex(ctx); + // Verify that dispatcher is used when awaiting via async_run + int dispatch_count = 0; + test_dispatcher d(dispatch_count); + bool completed = false; + int result = 0; - auto t = task_with_async_for_affinity_test(); - t.on(ex); + async_run(d)(task_with_async_for_affinity_test(), + [&](int v) { + result = v; + completed = true; + }, + [](std::exception_ptr) {}); - BOOST_TEST_EQ(run_task(t), 124); - // Work should have been posted through the executor - BOOST_TEST_GE(ctx.submit_count.load(), 1); + BOOST_TEST(completed); + BOOST_TEST_EQ(result, 124); + // Work should have been dispatched + BOOST_TEST_GE(dispatch_count, 1); } static task @@ -502,106 +553,20 @@ struct task_test } void - testVoidTaskExecutorUsedByAwait() - { - // Verify that executor is used for void tasks - sync_executor ctx; - executor ex(ctx); - - auto t = void_task_with_async_for_affinity_test(); - t.on(ex); - - while (!t.handle().done()) - t.handle().resume(); - t.await_resume(); - - // Work should have been posted through the executor - BOOST_TEST_GE(ctx.submit_count.load(), 1); - } - - // on() method tests - - void - testOnSetsExecutor() + testVoidTaskDispatcherUsedByAwait() { - // Verify on() sets the executor for task - sync_executor ctx; - executor ex(ctx); + // Verify that dispatcher is used for void tasks + int dispatch_count = 0; + test_dispatcher d(dispatch_count); + bool completed = false; - auto t = task_with_async_for_affinity_test(); - t.on(ex); + async_run(d)(void_task_with_async_for_affinity_test(), + [&]() { completed = true; }, + [](std::exception_ptr) {}); - // Executor should now be set - BOOST_TEST(static_cast(t.handle().promise().get_executor())); - BOOST_TEST_EQ(run_task(t), 124); - // Work should have been posted through the executor - BOOST_TEST_GE(ctx.submit_count.load(), 1); - } - - void - testOnSetsExecutorVoid() - { - // Verify on() sets the executor for task - sync_executor ctx; - executor ex(ctx); - - auto t = void_task_with_async_for_affinity_test(); - t.on(ex); - - while (!t.handle().done()) - t.handle().resume(); - t.await_resume(); - - // Work should have been posted through the executor - BOOST_TEST_GE(ctx.submit_count.load(), 1); - } - - void - testOnFluentSyntax() - { - // Verify fluent syntax works with rvalue - sync_executor ctx; - executor ex(ctx); - - auto make_task = []() -> task { - co_return co_await async_returns_value(); - }; - - auto t = make_task().on(ex); - BOOST_TEST_EQ(run_task(t), 123); - BOOST_TEST_GE(ctx.submit_count.load(), 1); - } - - void - testOnFluentSyntaxVoid() - { - // Verify fluent syntax works with rvalue for void tasks - sync_executor ctx; - executor ex(ctx); - - auto make_task = []() -> task { - co_await async_returns_value(); - co_return; - }; - - auto t = make_task().on(ex); - while (!t.handle().done()) - t.handle().resume(); - t.await_resume(); - - BOOST_TEST_GE(ctx.submit_count.load(), 1); - } - - void - testOnLvalueReturnsReference() - { - // Verify on() returns reference to same task for lvalue - sync_executor ctx; - executor ex(ctx); - - auto t = returns_int(); - auto& ref = t.on(ex); - BOOST_TEST(&ref == &t); + BOOST_TEST(completed); + // Work should have been dispatched + BOOST_TEST_GE(dispatch_count, 1); } // Affinity propagation tests @@ -630,16 +595,23 @@ struct task_test testAffinityPropagation() { // Verify affinity propagates through task chain (ABC problem) - // a has affinity, b and c should inherit it - sync_executor ctx; - executor ex(ctx); + // The dispatcher from async_run should be inherited by nested tasks + int dispatch_count = 0; + test_dispatcher d(dispatch_count); + bool completed = false; + int result = 0; - auto t = outer_task_a(); - t.on(ex); + async_run(d)(outer_task_a(), + [&](int v) { + result = v; + completed = true; + }, + [](std::exception_ptr) {}); - BOOST_TEST_EQ(run_task(t), 125); // 123 + 1 + 1 - // All async completions should dispatch through executor - BOOST_TEST_GE(ctx.submit_count.load(), 1); + BOOST_TEST(completed); + BOOST_TEST_EQ(result, 125); // 123 + 1 + 1 + // All async completions should dispatch through the dispatcher + BOOST_TEST_GE(dispatch_count, 1); } static task @@ -667,128 +639,41 @@ struct task_test testAffinityPropagationVoid() { // Verify affinity propagates through void task chain - sync_executor ctx; - executor ex(ctx); + int dispatch_count = 0; + test_dispatcher d(dispatch_count); + bool completed = false; - auto t = outer_void_task_a(); - t.on(ex); + async_run(d)(outer_void_task_a(), + [&]() { completed = true; }, + [](std::exception_ptr) {}); - while (!t.handle().done()) - t.handle().resume(); - t.await_resume(); - - BOOST_TEST_GE(ctx.submit_count.load(), 1); + BOOST_TEST(completed); + BOOST_TEST_GE(dispatch_count, 1); } void - testExplicitAffinityOverridesInheritance() + testNoDispatcherRunsInline() { - // Verify explicit affinity via .on() is not overwritten - sync_executor ctx1; - sync_executor ctx2; - executor ex1(ctx1); - executor ex2(ctx2); - - // Create a task with explicit affinity - auto make_inner = [ex2]() -> task { - return inner_task_c().on(ex2); // explicit affinity to ex2 - }; - - auto outer = [make_inner]() -> task { - int v = co_await make_inner(); - co_return v + 1; - }; - - auto t = outer(); - t.on(ex1); // outer has affinity to ex1 - - BOOST_TEST_EQ(run_task(t), 124); - // Inner should use ex2, not inherit ex1 - BOOST_TEST_GE(ctx2.submit_count.load(), 1); + // Verify that simple tasks can run without async_run (manual stepping) + // Note: Only works for tasks that don't await dispatcher-aware awaitables + BOOST_TEST_EQ(run_task(chained_tasks()), 25); } - void - testNoAffinityRunsInline() - { - // Verify that without affinity, no executor dispatch occurs - auto t = outer_task_a(); // no .on() call - - BOOST_TEST_EQ(run_task(t), 125); - // Task should complete without any executor - } - - // Affinity preservation tests - - /** Executor that tracks submissions with an ID. - */ - struct tracking_executor - { - friend struct executor::access; - - int id; - std::atomic submit_count{0}; - mutable std::vector* submission_log; - - explicit - tracking_executor(int id_, std::vector* log) - : id(id_) - , submission_log(log) - { - } - - private: - struct header - { - std::size_t size; - }; - - void* - allocate(std::size_t size, std::size_t /*align*/) - { - std::size_t total = sizeof(header) + size; - void* p = std::malloc(total); - auto* h = new(p) header{total}; - return h + 1; - } - - void - deallocate(void* p, std::size_t /*size*/, std::size_t /*align*/) - { - auto* h = static_cast(p) - 1; - std::free(h); - } - - void - submit(executor::work* w) - { - ++submit_count; - if (submission_log) - submission_log->push_back(id); - w->invoke(); - w->~work(); - deallocate(w, 0, 0); - } - }; - - static async_op - async_op_immediate(int value) - { - return make_async_op( - [value](auto cb) { - cb(value); - }); - } + // Affinity preservation tests with tracking dispatcher void testInheritedAffinityVerification() { // Test that child tasks actually use inherited affinity - // by checking that all resumptions go through the parent's executor + // by checking that all resumptions go through the parent's dispatcher std::vector log; - tracking_executor ctx(1, &log); - executor ex(ctx); + int dispatch_count = 0; + tracking_dispatcher d(1, dispatch_count, &log); - // Chain: outer -> middle -> inner, only outer has .on() + bool completed = false; + int result = 0; + + // Chain: outer -> middle -> inner auto inner = []() -> task { co_return co_await async_op_immediate(100); }; @@ -803,91 +688,31 @@ struct task_test co_return v + co_await async_op_immediate(1); }; - auto t = outer(); - t.on(ex); - - BOOST_TEST_EQ(run_task(t), 111); - // All three async_ops should have resumed through executor 1 - BOOST_TEST_GE(ctx.submit_count.load(), 3); - for (int id : log) - BOOST_TEST_EQ(id, 1); - } - - void - testCrossExecutorAsyncOp() - { - // Test: async_op "completes" but task resumes on its affinity executor - // This verifies the dispatcher is correctly used for resumption - std::vector log; - tracking_executor ctx1(1, &log); - tracking_executor ctx2(2, &log); - executor ex1(ctx1); - executor ex2(ctx2); - - // Create a task with affinity to ex1 - auto task_with_affinity = []() -> task { - // This async_op completes inline (simulating completion on "other" context) - int v = co_await async_op_immediate(42); - co_return v; - }; - - auto t = task_with_affinity(); - t.on(ex1); - - BOOST_TEST_EQ(run_task(t), 42); - // Resumption should go through ex1, not ex2 - BOOST_TEST_GE(ctx1.submit_count.load(), 1); - BOOST_TEST_EQ(ctx2.submit_count.load(), 0); - // All logged submissions should be to executor 1 + async_run(d)(outer(), + [&](int v) { + result = v; + completed = true; + }, + [](std::exception_ptr) {}); + + BOOST_TEST(completed); + BOOST_TEST_EQ(result, 111); + // All three async_ops should have resumed through dispatcher 1 + BOOST_TEST_GE(dispatch_count, 3); for (int id : log) BOOST_TEST_EQ(id, 1); } - void - testMixedAffinityChain() - { - // Test: outer has ex1, inner explicitly has ex2 - // Verify each task uses its own affinity - std::vector outer_log; - std::vector inner_log; - tracking_executor ctx1(1, &outer_log); - tracking_executor ctx2(2, &inner_log); - executor ex1(ctx1); - executor ex2(ctx2); - - // Inner task with explicit affinity to ex2 - auto make_inner = [ex2]() -> task { - auto inner = []() -> task { - co_return co_await async_op_immediate(100); - }; - return inner().on(ex2); - }; - - // Outer task with affinity to ex1 - auto outer = [make_inner]() -> task { - int v = co_await make_inner(); - // This await should use ex1 (outer's affinity) - v += co_await async_op_immediate(1); - co_return v; - }; - - auto t = outer(); - t.on(ex1); - - BOOST_TEST_EQ(run_task(t), 101); - // Inner's async should use ex2 - BOOST_TEST_GE(ctx2.submit_count.load(), 1); - // Outer's async should use ex1 - BOOST_TEST_GE(ctx1.submit_count.load(), 1); - } - void testAffinityPreservedAcrossMultipleAwaits() { // Test that affinity is preserved across multiple co_await expressions std::vector log; - tracking_executor ctx(1, &log); - executor ex(ctx); + int dispatch_count = 0; + tracking_dispatcher d(1, dispatch_count, &log); + + bool completed = false; + int result = 0; auto multi_await = []() -> task { int sum = 0; @@ -899,13 +724,18 @@ struct task_test co_return sum; }; - auto t = multi_await(); - t.on(ex); - - BOOST_TEST_EQ(run_task(t), 15); - // All 5 awaits should use the same executor - BOOST_TEST_EQ(ctx.submit_count.load(), 5); - BOOST_TEST_EQ(log.size(), 5u); + async_run(d)(multi_await(), + [&](int v) { + result = v; + completed = true; + }, + [](std::exception_ptr) {}); + + BOOST_TEST(completed); + BOOST_TEST_EQ(result, 15); + // 6 dispatches: 1 from async_run start + 5 from async_ops completing + BOOST_TEST_EQ(dispatch_count, 6); + BOOST_TEST_EQ(log.size(), 6u); for (int id : log) BOOST_TEST_EQ(id, 1); } @@ -915,10 +745,11 @@ struct task_test { // Test affinity propagation through void task nesting std::vector log; - tracking_executor ctx(1, &log); - executor ex(ctx); + int dispatch_count = 0; + tracking_dispatcher d(1, dispatch_count, &log); std::atomic counter{0}; + bool completed = false; auto leaf = [&counter]() -> task { co_await async_op_immediate(0); @@ -940,16 +771,14 @@ struct task_test co_return; }; - auto t = root(); - t.on(ex); - - while (!t.handle().done()) - t.handle().resume(); - t.await_resume(); + async_run(d)(root(), + [&]() { completed = true; }, + [](std::exception_ptr) {}); + BOOST_TEST(completed); BOOST_TEST_EQ(counter.load(), 3); - // All async_ops should dispatch through executor - BOOST_TEST_GE(ctx.submit_count.load(), 3); + // All async_ops should dispatch through the dispatcher + BOOST_TEST_GE(dispatch_count, 3); for (int id : log) BOOST_TEST_EQ(id, 1); } @@ -959,8 +788,11 @@ struct task_test { // Test that when child task completes, it resumes parent via dispatcher std::vector log; - tracking_executor ctx(1, &log); - executor ex(ctx); + int dispatch_count = 0; + tracking_dispatcher d(1, dispatch_count, &log); + + bool completed = false; + int result = 0; // Simple child that just returns a value auto child = []() -> task { @@ -973,114 +805,128 @@ struct task_test co_return v + 1; }; - auto t = parent(); - t.on(ex); + async_run(d)(parent(), + [&](int v) { + result = v; + completed = true; + }, + [](std::exception_ptr) {}); - BOOST_TEST_EQ(run_task(t), 43); - // Child's completion should dispatch through executor - BOOST_TEST_GE(ctx.submit_count.load(), 1); + BOOST_TEST(completed); + BOOST_TEST_EQ(result, 43); + // Child's completion should dispatch through the dispatcher + BOOST_TEST_GE(dispatch_count, 1); } - // spawn() tests + // async_run() tests (replacing old spawn() tests) void - testSpawnValueTask() + testAsyncRunValueTask() { - sync_executor ctx; - executor ex(ctx); - std::optional> received; + int dispatch_count = 0; + test_dispatcher d(dispatch_count); + bool completed = false; + int result = 0; auto compute = []() -> task { co_return 42; }; - spawn(ex, compute(), [&](auto result) { - received = result; - }); + async_run(d)(compute(), + [&](int v) { + result = v; + completed = true; + }, + [](std::exception_ptr) {}); - BOOST_TEST(received.has_value()); - BOOST_TEST(received->has_value()); - BOOST_TEST_EQ(*(*received), 42); - BOOST_TEST_GE(ctx.submit_count.load(), 1); + BOOST_TEST(completed); + BOOST_TEST_EQ(result, 42); + BOOST_TEST_GE(dispatch_count, 1); } void - testSpawnVoidTask() + testAsyncRunVoidTask() { - sync_executor ctx; - executor ex(ctx); + int dispatch_count = 0; + test_dispatcher d(dispatch_count); bool task_done = false; - std::optional> received; + bool completed = false; auto do_work = [&task_done]() -> task { task_done = true; co_return; }; - spawn(ex, do_work(), [&](auto result) { - received = result; - }); + async_run(d)(do_work(), + [&]() { completed = true; }, + [](std::exception_ptr) {}); - BOOST_TEST(received.has_value()); - BOOST_TEST(received->has_value()); + BOOST_TEST(completed); BOOST_TEST(task_done); - BOOST_TEST_GE(ctx.submit_count.load(), 1); + BOOST_TEST_GE(dispatch_count, 1); } void - testSpawnTaskWithException() + testAsyncRunTaskWithException() { - sync_executor ctx; - executor ex(ctx); - std::optional> received; + int dispatch_count = 0; + test_dispatcher d(dispatch_count); + bool completed = false; + bool caught_exception = false; auto throwing_task = []() -> task { - throw_test_exception("spawn test"); + throw_test_exception("async_run test"); co_return 0; }; - spawn(ex, throwing_task(), [&](auto result) { - received = result; - }); + async_run(d)(throwing_task(), + [&](int) { completed = true; }, + [&](std::exception_ptr ep) { + try { + std::rethrow_exception(ep); + } catch (test_exception const&) { + caught_exception = true; + } + }); - BOOST_TEST(received.has_value()); - BOOST_TEST(received->has_error()); - bool caught = false; - try { std::rethrow_exception(received->error()); } - catch (test_exception const&) { caught = true; } - BOOST_TEST(caught); + BOOST_TEST(!completed); + BOOST_TEST(caught_exception); } void - testSpawnVoidTaskWithException() + testAsyncRunVoidTaskWithException() { - sync_executor ctx; - executor ex(ctx); - std::optional> received; + int dispatch_count = 0; + test_dispatcher d(dispatch_count); + bool completed = false; + bool caught_exception = false; auto throwing_void_task = []() -> task { - throw_test_exception("void spawn exception"); + throw_test_exception("void async_run exception"); co_return; }; - spawn(ex, throwing_void_task(), [&](auto result) { - received = result; - }); + async_run(d)(throwing_void_task(), + [&]() { completed = true; }, + [&](std::exception_ptr ep) { + try { + std::rethrow_exception(ep); + } catch (test_exception const&) { + caught_exception = true; + } + }); - BOOST_TEST(received.has_value()); - BOOST_TEST(received->has_error()); - bool caught = false; - try { std::rethrow_exception(received->error()); } - catch (test_exception const&) { caught = true; } - BOOST_TEST(caught); + BOOST_TEST(!completed); + BOOST_TEST(caught_exception); } void - testSpawnWithNestedAwaits() + testAsyncRunWithNestedAwaits() { - sync_executor ctx; - executor ex(ctx); - std::optional> received; + int dispatch_count = 0; + test_dispatcher d(dispatch_count); + bool completed = false; + int result = 0; auto inner = []() -> task { co_return 10; @@ -1092,44 +938,50 @@ struct task_test co_return a + b; }; - spawn(ex, outer(), [&](auto result) { - received = result; - }); + async_run(d)(outer(), + [&](int v) { + result = v; + completed = true; + }, + [](std::exception_ptr) {}); - BOOST_TEST(received.has_value()); - BOOST_TEST(received->has_value()); - BOOST_TEST_EQ(*(*received), 20); + BOOST_TEST(completed); + BOOST_TEST_EQ(result, 20); } void - testSpawnWithAsyncOp() + testAsyncRunWithAsyncOp() { - sync_executor ctx; - executor ex(ctx); - std::optional> received; + int dispatch_count = 0; + test_dispatcher d(dispatch_count); + bool completed = false; + int result = 0; auto task_with_async = []() -> task { int v = co_await async_op_immediate(100); co_return v + 1; }; - spawn(ex, task_with_async(), [&](auto result) { - received = result; - }); + async_run(d)(task_with_async(), + [&](int v) { + result = v; + completed = true; + }, + [](std::exception_ptr) {}); - BOOST_TEST(received.has_value()); - BOOST_TEST(received->has_value()); - BOOST_TEST_EQ(*(*received), 101); - BOOST_TEST_GE(ctx.submit_count.load(), 1); + BOOST_TEST(completed); + BOOST_TEST_EQ(result, 101); + BOOST_TEST_GE(dispatch_count, 1); } void - testSpawnAffinityPropagation() + testAsyncRunAffinityPropagation() { std::vector log; - tracking_executor ctx(1, &log); - executor ex(ctx); - std::optional> received; + int dispatch_count = 0; + tracking_dispatcher d(1, dispatch_count, &log); + bool completed = false; + int result = 0; auto inner = []() -> task { co_return co_await async_op_immediate(50); @@ -1141,76 +993,73 @@ struct task_test co_return v; }; - spawn(ex, outer(), [&](auto result) { - received = result; - }); + async_run(d)(outer(), + [&](int v) { + result = v; + completed = true; + }, + [](std::exception_ptr) {}); - BOOST_TEST(received.has_value()); - BOOST_TEST(received->has_value()); - BOOST_TEST_EQ(*(*received), 55); - BOOST_TEST_GE(ctx.submit_count.load(), 2); + BOOST_TEST(completed); + BOOST_TEST_EQ(result, 55); + BOOST_TEST_GE(dispatch_count, 2); for (int id : log) BOOST_TEST_EQ(id, 1); } void - testSpawnChained() + testAsyncRunChained() { - sync_executor ctx; - executor ex(ctx); + int dispatch_count = 0; + test_dispatcher d(dispatch_count); int sum = 0; auto task1 = []() -> task { co_return 1; }; auto task2 = []() -> task { co_return 2; }; auto task3 = []() -> task { co_return 3; }; - spawn(ex, task1(), [&](auto r) { if (r) sum += *r; }); - spawn(ex, task2(), [&](auto r) { if (r) sum += *r; }); - spawn(ex, task3(), [&](auto r) { if (r) sum += *r; }); + async_run(d)(task1(), [&](int v) { sum += v; }, [](std::exception_ptr) {}); + async_run(d)(task2(), [&](int v) { sum += v; }, [](std::exception_ptr) {}); + async_run(d)(task3(), [&](int v) { sum += v; }, [](std::exception_ptr) {}); BOOST_TEST_EQ(sum, 6); } void - testSpawnResultErrorAccess() + testAsyncRunErrorHandler() { - sync_executor ctx; - executor ex(ctx); - std::optional> received; + int dispatch_count = 0; + test_dispatcher d(dispatch_count); + bool caught = false; + std::string error_msg; auto failing = []() -> task { throw std::runtime_error("specific error"); co_return 0; }; - spawn(ex, failing(), [&](auto result) { - received = result; - }); - - BOOST_TEST(received.has_value()); - BOOST_TEST(!received->has_value()); - BOOST_TEST(received->has_error()); - BOOST_TEST(received->error() != nullptr); + async_run(d)(failing(), + [](int) {}, + [&](std::exception_ptr ep) { + try { + std::rethrow_exception(ep); + } catch (std::runtime_error const& e) { + error_msg = e.what(); + caught = true; + } + }); - bool caught = false; - try - { - std::rethrow_exception(received->error()); - } - catch (std::runtime_error const& e) - { - BOOST_TEST(std::string(e.what()) == "specific error"); - caught = true; - } BOOST_TEST(caught); + BOOST_TEST_EQ(error_msg, "specific error"); } void - testSpawnDeeplyNested() + testAsyncRunDeeplyNested() { - sync_executor ctx; - executor ex(ctx); - std::optional> received; + int dispatch_count = 0; + test_dispatcher d(dispatch_count); + bool completed = false; + int result = 0; auto level3 = []() -> task { co_return co_await async_op_immediate(1); @@ -1226,14 +1075,71 @@ struct task_test co_return v + co_await async_op_immediate(100); }; - spawn(ex, level1(), [&](auto result) { - received = result; - }); + async_run(d)(level1(), + [&](int v) { + result = v; + completed = true; + }, + [](std::exception_ptr) {}); - BOOST_TEST(received.has_value()); - BOOST_TEST(received->has_value()); - BOOST_TEST_EQ(*(*received), 111); - BOOST_TEST_GE(ctx.submit_count.load(), 3); + BOOST_TEST(completed); + BOOST_TEST_EQ(result, 111); + BOOST_TEST_GE(dispatch_count, 3); + } + + void + testAsyncRunFireAndForget() + { + // Test fire-and-forget mode (default handler) + int dispatch_count = 0; + test_dispatcher d(dispatch_count); + std::atomic task_ran{false}; + + auto simple_task = [&task_ran]() -> task { + task_ran = true; + co_return; + }; + + async_run(d)(simple_task()); + + BOOST_TEST(task_ran.load()); + } + + void + testAsyncRunSingleHandler() + { + // Test single handler that handles both success and exception + int dispatch_count = 0; + test_dispatcher d(dispatch_count); + bool success_called = false; + bool exception_called = false; + + struct overloaded_handler + { + bool* success; + bool* exception; + + void operator()(int v) + { + (void)v; + *success = true; + } + + void operator()(std::exception_ptr) + { + *exception = true; + } + }; + + auto success_task = []() -> task { + co_return 42; + }; + + async_run(d)(success_task(), + overloaded_handler{&success_called, &exception_called}); + + BOOST_TEST(success_called); + BOOST_TEST(!exception_called); } void @@ -1254,194 +1160,34 @@ struct task_test testVoidTaskMove(); testVoidTaskAwaitsAsyncResult(); - // executor affinity tests - testExecutorDefault(); - testExecutorUsedByAwait(); - testVoidTaskExecutorUsedByAwait(); - - // on() method tests - testOnSetsExecutor(); - testOnSetsExecutorVoid(); - testOnFluentSyntax(); - testOnFluentSyntaxVoid(); - testOnLvalueReturnsReference(); + // dispatcher tests (via async_run) + testDispatcherUsedByAwait(); + testVoidTaskDispatcherUsedByAwait(); // affinity propagation tests (ABC problem) testAffinityPropagation(); testAffinityPropagationVoid(); - testExplicitAffinityOverridesInheritance(); - testNoAffinityRunsInline(); + testNoDispatcherRunsInline(); // affinity preservation tests testInheritedAffinityVerification(); - testCrossExecutorAsyncOp(); - testMixedAffinityChain(); testAffinityPreservedAcrossMultipleAwaits(); testAffinityWithNestedVoidTasks(); testFinalSuspendUsesDispatcher(); - // spawn() function tests - testSpawnValueTask(); - testSpawnVoidTask(); - testSpawnTaskWithException(); - testSpawnVoidTaskWithException(); - testSpawnWithNestedAwaits(); - testSpawnWithAsyncOp(); - testSpawnAffinityPropagation(); - testSpawnChained(); - testSpawnResultErrorAccess(); - testSpawnDeeplyNested(); - testGccUninitialized(); - } - - // GCC 12+ -Wmaybe-uninitialized false positive tests - // https://github.com/boostorg/variant2/issues/XXX - // These attempt to reproduce the warning without coroutines. - void - testGccUninitialized() - { - using result_void = system::result; - using result_string = system::result; - - // Test 1: Simple copy construction - { - result_void r1; - result_void r2(r1); - (void)r2; - } - - // Test 2: Copy assignment - { - result_void r1; - result_void r2; - r2 = r1; - (void)r2; - } - - // Test 3: std::optional assignment (matches spawn pattern) - { - std::optional opt; - opt = result_void{}; - (void)opt; - } - - // Test 4: Pass to function via copy - { - auto fn = [](result_void r) { (void)r; }; - fn(result_void{}); - } - - // Test 5: Lambda capture + optional (closest to spawn) - { - auto fn = [](result_void r) { - std::optional opt; - opt = r; - return opt.has_value(); - }; - (void)fn(result_void{}); - } - - // Test 6: Non-void result with string (triggers string warning) - { - result_string r1; - result_string r2(r1); - (void)r2; - } - - // Test 7: Assign exception to result holding value - { - result_string r1{"hello"}; - r1 = std::make_exception_ptr(std::runtime_error("test")); - (void)r1; - } - - // Test 8: Optional with string result - { - std::optional opt; - opt = result_string{}; - (void)opt; - } - -#ifdef BOOST_CAPY_HAS_CORO - // Minimal fire-and-forget coroutine for testing - struct fire_and_forget - { - struct promise_type - { - fire_and_forget get_return_object() { return {}; } - std::suspend_never initial_suspend() noexcept { return {}; } - std::suspend_never final_suspend() noexcept { return {}; } - void return_void() {} - void unhandled_exception() { std::terminate(); } - }; - }; - - // Test 9: Coroutine returning result (mimics spawn) - { - auto coro = []() -> fire_and_forget { - result_void r{}; - (void)r; - co_return; - }; - coro(); - } - - // Test 10: Coroutine with handler call (closest to actual spawn) - { - std::optional received; - auto handler = [&](result_void r) { - received = r; - }; - auto coro = [&]() -> fire_and_forget { - handler(result_void{}); - co_return; - }; - coro(); - (void)received; - } - - // Test 11: Coroutine with try/catch like spawn - { - std::optional received; - auto handler = [&](result_void r) { - received = r; - }; - auto coro = [&]() -> fire_and_forget { - try - { - handler(result_void{}); - } - catch (...) - { - handler(result_void{std::current_exception()}); - } - co_return; - }; - coro(); - (void)received; - } - - // Test 12: Coroutine with string result - { - std::optional received; - auto handler = [&](result_string r) { - received = r; - }; - auto coro = [&]() -> fire_and_forget { - try - { - handler(result_string{"test"}); - } - catch (...) - { - handler(result_string{std::current_exception()}); - } - co_return; - }; - coro(); - (void)received; - } -#endif + // async_run() function tests + testAsyncRunValueTask(); + testAsyncRunVoidTask(); + testAsyncRunTaskWithException(); + testAsyncRunVoidTaskWithException(); + testAsyncRunWithNestedAwaits(); + testAsyncRunWithAsyncOp(); + testAsyncRunAffinityPropagation(); + testAsyncRunChained(); + testAsyncRunErrorHandler(); + testAsyncRunDeeplyNested(); + testAsyncRunFireAndForget(); + testAsyncRunSingleHandler(); } }; @@ -1451,5 +1197,3 @@ TEST_SUITE( } // capy } // boost - -#endif diff --git a/test/unit/thread_local_ptr.cpp b/test/unit/thread_local_ptr.cpp new file mode 100644 index 00000000..633fcb85 --- /dev/null +++ b/test/unit/thread_local_ptr.cpp @@ -0,0 +1,158 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie dot falco at gmail dot com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + +// Test that header file is self-contained. +#include + +#include + +#include "test_suite.hpp" + +namespace boost { +namespace capy { + +// Test class type +struct widget +{ + int value; + explicit widget(int v) : value(v) {} +}; + +struct thread_local_ptr_test +{ + void + run() + { + // default state is nullptr + { + thread_local_ptr p; + BOOST_TEST(p.get() == nullptr); + } + + // set and get with int* + { + thread_local_ptr p; + int x = 42; + p.set(&x); + BOOST_TEST(p.get() == &x); + BOOST_TEST(*p.get() == 42); + + // clear + p.set(nullptr); + BOOST_TEST(p.get() == nullptr); + } + + // operator= for setting + { + thread_local_ptr p; + int x = 100; + int* result = (p = &x); + BOOST_TEST(result == &x); + BOOST_TEST(p.get() == &x); + } + + // operator* dereference + { + thread_local_ptr p; + int x = 55; + p = &x; + BOOST_TEST(*p == 55); + + *p = 66; + BOOST_TEST(x == 66); + } + + // operator-> for class types + { + thread_local_ptr p; + widget w(123); + p = &w; + BOOST_TEST(p->value == 123); + p->value = 456; + BOOST_TEST(w.value == 456); + } + + // all instances of same type share slot + { + thread_local_ptr p1; + thread_local_ptr p2; + + int a = 1; + + p1 = &a; + + // Same type = same slot + BOOST_TEST(p1.get() == &a); + BOOST_TEST(p2.get() == &a); + BOOST_TEST(p1.get() == p2.get()); + } + + // thread independence + { + thread_local_ptr p; + long main_val = 100; + long thread_val = 200; + + p = &main_val; + BOOST_TEST(p.get() == &main_val); + + bool thread_saw_nullptr = false; + bool thread_set_worked = false; + + std::thread t([&]() { + // New thread should see nullptr initially + thread_saw_nullptr = (p.get() == nullptr); + + // Set in thread + p = &thread_val; + thread_set_worked = (p.get() == &thread_val); + }); + t.join(); + + BOOST_TEST(thread_saw_nullptr); + BOOST_TEST(thread_set_worked); + + // Main thread should still see its value + BOOST_TEST(p.get() == &main_val); + } + + // multiple threads + { + thread_local_ptr p; + + widget main_w(1); + p = &main_w; + + std::thread t1([&]() { + widget w(10); + p = &w; + BOOST_TEST(p->value == 10); + }); + + std::thread t2([&]() { + widget w(20); + p = &w; + BOOST_TEST(p->value == 20); + }); + + t1.join(); + t2.join(); + + // Main thread unchanged + BOOST_TEST(p->value == 1); + } + } +}; + +TEST_SUITE( + thread_local_ptr_test, + "boost.capy.thread_local_ptr"); + +} // capy +} // boost diff --git a/test/unit/thread_pool.cpp b/test/unit/thread_pool.cpp index 0ea0011c..994a55f9 100644 --- a/test/unit/thread_pool.cpp +++ b/test/unit/thread_pool.cpp @@ -12,12 +12,6 @@ #include "test_suite.hpp" -#include -#include -#include -#include -#include - namespace boost { namespace capy { @@ -29,215 +23,23 @@ struct thread_pool_test // Default construction (hardware concurrency) { thread_pool pool; - BOOST_TEST(static_cast(pool.get_executor())); } // Explicit thread count { thread_pool pool(2); - BOOST_TEST(static_cast(pool.get_executor())); } // Single thread { thread_pool pool(1); - BOOST_TEST(static_cast(pool.get_executor())); - } - } - - void - testGetExecutor() - { - thread_pool pool(1); - executor exec1 = pool.get_executor(); - executor exec2 = pool.get_executor(); - - BOOST_TEST(static_cast(exec1)); - BOOST_TEST(static_cast(exec2)); - } - - void - testPostSingle() - { - std::atomic called{false}; - { - thread_pool pool(1); - executor exec = pool.get_executor(); - exec.post([&called]{ called = true; }); - } - // Pool destructor waits for work to complete - BOOST_TEST(called.load()); - } - - void - testPostMultiple() - { - std::atomic count{0}; - { - thread_pool pool(2); - executor exec = pool.get_executor(); - exec.post([&count]{ ++count; }); - exec.post([&count]{ ++count; }); - exec.post([&count]{ ++count; }); - } - BOOST_TEST_EQ(count.load(), 3); - } - - void - testPostFromMultipleThreads() - { - std::atomic count{0}; - { - thread_pool pool(4); - executor exec = pool.get_executor(); - - std::thread t1([&]{ - for(int i = 0; i < 10; ++i) - exec.post([&count]{ ++count; }); - }); - - std::thread t2([&]{ - for(int i = 0; i < 10; ++i) - exec.post([&count]{ ++count; }); - }); - - t1.join(); - t2.join(); - } - BOOST_TEST_EQ(count.load(), 20); - } - - void - testConcurrentExecution() - { - // Verify work runs on multiple threads concurrently - std::atomic concurrent{0}; - std::atomic max_concurrent{0}; - - { - thread_pool pool(4); - executor exec = pool.get_executor(); - - for(int i = 0; i < 8; ++i) - { - exec.post([&]{ - int c = ++concurrent; - // Update max if this is higher - int expected = max_concurrent.load(); - while(c > expected) - { - if(max_concurrent.compare_exchange_weak(expected, c)) - break; - } - // Simulate some work - std::this_thread::sleep_for( - std::chrono::milliseconds(10)); - --concurrent; - }); - } - } - - // Should have had multiple concurrent executions - BOOST_TEST(max_concurrent.load() > 1); - } - - void - testSubmit() - { - std::atomic result{0}; - std::atomic handler_called{false}; - - { - thread_pool pool(1); - executor exec = pool.get_executor(); - exec.submit( - []{ return 42; }, - [&](system::result r) - { - if(r.has_value()) - result = r.value(); - handler_called = true; - }); - } - - BOOST_TEST(handler_called.load()); - BOOST_TEST_EQ(result.load(), 42); - } - - void - testSubmitVoid() - { - std::atomic work_called{false}; - std::atomic handler_called{false}; - - { - thread_pool pool(1); - executor exec = pool.get_executor(); - exec.submit( - [&work_called]{ work_called = true; }, - [&handler_called](system::result) - { - handler_called = true; - }); - } - - BOOST_TEST(work_called.load()); - BOOST_TEST(handler_called.load()); - } - - void - testMultipleExecutors() - { - std::atomic count{0}; - { - thread_pool pool(2); - executor exec1 = pool.get_executor(); - executor exec2 = pool.get_executor(); - executor exec3 = exec1; - - exec1.post([&count]{ ++count; }); - exec2.post([&count]{ ++count; }); - exec3.post([&count]{ ++count; }); - } - BOOST_TEST_EQ(count.load(), 3); - } - - void - testDestructorWaitsForWork() - { - std::atomic started{false}; - std::atomic finished{false}; - - { - thread_pool pool(1); - executor exec = pool.get_executor(); - exec.post([&]{ - started = true; - std::this_thread::sleep_for( - std::chrono::milliseconds(50)); - finished = true; - }); - // Give work time to start - while(!started.load()) - std::this_thread::yield(); } - // Pool destructor should have waited - BOOST_TEST(finished.load()); } void run() { testConstruct(); - testGetExecutor(); - testPostSingle(); - testPostMultiple(); - testPostFromMultipleThreads(); - testConcurrentExecution(); - testSubmit(); - testSubmitVoid(); - testMultipleExecutors(); - testDestructorWaitsForWork(); } }; From 57f1f666bdc366a5a60e9463cea93d9cd99a684a Mon Sep 17 00:00:00 2001 From: Vinnie Falco Date: Sun, 11 Jan 2026 12:27:18 -0800 Subject: [PATCH 2/2] Rewrite documentation following documenting.md guidelines - Restructure into Coroutines, Execution, Utilities sections - Add Quick Start tutorial page - Split monolithic pages into focused topics - Add When/When NOT to use sections throughout - Update concept reference pages with full examples - Add cross-references and logical progression between pages --- doc/modules/ROOT/nav.adoc | 21 +- doc/modules/ROOT/pages/advanced.adoc | 383 ------------------ .../ROOT/pages/concepts/affine_awaitable.adoc | 43 +- .../ROOT/pages/concepts/dispatcher.adoc | 36 +- doc/modules/ROOT/pages/concepts/executor.adoc | 53 ++- .../ROOT/pages/concepts/frame_allocator.adoc | 37 +- .../pages/concepts/is_execution_context.adoc | 41 +- .../pages/concepts/stoppable_awaitable.adoc | 35 +- doc/modules/ROOT/pages/coroutines.adoc | 263 ------------ .../ROOT/pages/coroutines/affinity.adoc | 234 +++++++++++ .../ROOT/pages/coroutines/cancellation.adoc | 194 +++++++++ .../ROOT/pages/coroutines/launching.adoc | 175 ++++++++ doc/modules/ROOT/pages/coroutines/tasks.adoc | 200 +++++++++ doc/modules/ROOT/pages/execution.adoc | 314 -------------- .../ROOT/pages/execution/contexts.adoc | 249 ++++++++++++ .../ROOT/pages/execution/executors.adoc | 228 +++++++++++ .../pages/execution/frame-allocation.adoc | 211 ++++++++++ doc/modules/ROOT/pages/index.adoc | 99 ++++- doc/modules/ROOT/pages/quick-start.adoc | 122 ++++++ .../ROOT/pages/utilities/compression.adoc | 261 ++++++++++++ .../ROOT/pages/utilities/containers.adoc | 248 ++++++++++++ doc/modules/ROOT/pages/utilities/file-io.adoc | 253 ++++++++++++ 22 files changed, 2623 insertions(+), 1077 deletions(-) delete mode 100644 doc/modules/ROOT/pages/advanced.adoc delete mode 100644 doc/modules/ROOT/pages/coroutines.adoc create mode 100644 doc/modules/ROOT/pages/coroutines/affinity.adoc create mode 100644 doc/modules/ROOT/pages/coroutines/cancellation.adoc create mode 100644 doc/modules/ROOT/pages/coroutines/launching.adoc create mode 100644 doc/modules/ROOT/pages/coroutines/tasks.adoc delete mode 100644 doc/modules/ROOT/pages/execution.adoc create mode 100644 doc/modules/ROOT/pages/execution/contexts.adoc create mode 100644 doc/modules/ROOT/pages/execution/executors.adoc create mode 100644 doc/modules/ROOT/pages/execution/frame-allocation.adoc create mode 100644 doc/modules/ROOT/pages/quick-start.adoc create mode 100644 doc/modules/ROOT/pages/utilities/compression.adoc create mode 100644 doc/modules/ROOT/pages/utilities/containers.adoc create mode 100644 doc/modules/ROOT/pages/utilities/file-io.adoc diff --git a/doc/modules/ROOT/nav.adoc b/doc/modules/ROOT/nav.adoc index c851c760..573d9542 100644 --- a/doc/modules/ROOT/nav.adoc +++ b/doc/modules/ROOT/nav.adoc @@ -1,11 +1,22 @@ -* xref:coroutines.adoc[Coroutines] -* xref:execution.adoc[Execution Model] -* xref:advanced.adoc[Advanced Topics] +* xref:index.adoc[Introduction] +* xref:quick-start.adoc[Quick Start] +* Coroutines +** xref:coroutines/tasks.adoc[Tasks] +** xref:coroutines/launching.adoc[Launching Tasks] +** xref:coroutines/affinity.adoc[Executor Affinity] +** xref:coroutines/cancellation.adoc[Cancellation] +* Execution +** xref:execution/executors.adoc[Executors] +** xref:execution/contexts.adoc[Execution Contexts] +** xref:execution/frame-allocation.adoc[Frame Allocation] +* Utilities +** xref:utilities/containers.adoc[Containers] +** xref:utilities/file-io.adoc[File I/O] +** xref:utilities/compression.adoc[Compression] * Concepts ** xref:concepts/dispatcher.adoc[dispatcher] +** xref:concepts/executor.adoc[executor] ** xref:concepts/affine_awaitable.adoc[affine_awaitable] ** xref:concepts/stoppable_awaitable.adoc[stoppable_awaitable] -** xref:concepts/executor.adoc[executor] ** xref:concepts/frame_allocator.adoc[frame_allocator] ** xref:concepts/is_execution_context.adoc[is_execution_context] -* xref:reference:boost/capy.adoc[Reference] diff --git a/doc/modules/ROOT/pages/advanced.adoc b/doc/modules/ROOT/pages/advanced.adoc deleted file mode 100644 index 7cce0f87..00000000 --- a/doc/modules/ROOT/pages/advanced.adoc +++ /dev/null @@ -1,383 +0,0 @@ -// -// Copyright (c) 2025 Vinnie Falco (vinnie.falco@gmail.com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// -// Official repository: https://github.com/cppalliance/capy -// - -= Advanced Topics - -This page covers customization points for power users: frame allocation, -cancellation with stop tokens, and implementing custom affine awaitables. - -== Frame Allocation - -Every coroutine requires memory for its *frame*—the compiler-generated -structure holding local variables, parameters, and suspension state. -By default, frames are allocated with `::operator new`. For -high-frequency coroutine creation, custom allocators can significantly -reduce allocation overhead. - -=== The frame_allocator Concept - -A type satisfying xref:concepts/frame_allocator.adoc[`frame_allocator`] -provides: - -[source,cpp] ----- -void* allocate(std::size_t n); -void deallocate(void* p, std::size_t n); ----- - -Frame allocators must be cheaply copyable handles to an underlying -memory resource. - -=== Custom Allocators with async_run - -Pass a custom allocator as the second argument to `async_run`: - -[source,cpp] ----- -#include - -my_pool_allocator alloc{pool}; - -async_run(ex, alloc)(my_task()); ----- - -The allocator is used for all coroutine frames in the launched call -tree. The library embeds the allocator in the first frame, making it -available for child coroutines. - -=== The Recycling Frame Allocator - -By default, `async_run` uses a *recycling frame allocator* that caches -deallocated frames for reuse. This eliminates most allocation overhead -for typical coroutine patterns where frames are created and destroyed -in LIFO order. - -The recycling allocator: - -* Maintains a thread-local free list -* Reuses frames of matching size -* Falls back to global new/delete for mismatched sizes - -For custom allocation strategies, implement the `frame_allocator` -concept and pass your allocator to `async_run`. - -=== Memory Layout - -Coroutine frames have this layout: - ----- -First frame: [coroutine frame | tagged_ptr | allocator_wrapper] -Child frames: [coroutine frame | ptr] ----- - -The pointer at the end of each frame enables correct deallocation -regardless of which allocator was active at allocation time. A tag -bit distinguishes the first frame (with embedded wrapper) from child -frames (with pointer to wrapper). - -== Stop Token Propagation - -Capy coroutines support cooperative cancellation through `std::stop_token`. -When a task is launched with stop support, the token propagates through -the entire call chain. - -=== Receiving Stop Tokens - -Awaitables that support cancellation implement the -xref:concepts/stoppable_awaitable.adoc[`stoppable_awaitable`] concept. -Their `await_suspend` receives both a dispatcher and a stop token: - -[source,cpp] ----- -template -auto await_suspend( - std::coroutine_handle<> h, - Dispatcher const& d, - std::stop_token token) -{ - if (token.stop_requested()) - { - // Handle cancellation - return d(h); // Resume immediately - } - - // Start async operation with cancellation support - start_async([h, &d, token] { - if (token.stop_requested()) - { - // Cancelled during operation - } - d(h); - }); - return std::noop_coroutine(); -} ----- - -=== Stop Token in Tasks - -The `task` type automatically propagates stop tokens through its -`await_transform`. When you `co_await` a stoppable awaitable inside -a task, the task's stop token is forwarded: - -[source,cpp] ----- -task cancellable_work() -{ - // If this task has a stop token, it's automatically - // passed to any stoppable awaitables we co_await - co_await some_stoppable_operation(); -} ----- - -== The Affine Awaitable Protocol - -The *affine awaitable protocol* enables zero-overhead dispatcher -propagation. Awaitables that implement this protocol receive the -caller's dispatcher in `await_suspend`, allowing them to resume -through the correct execution context. - -=== The dispatcher Concept - -A xref:concepts/dispatcher.adoc[`dispatcher`] is a callable that -schedules coroutine resumption: - -[source,cpp] ----- -template -concept dispatcher = requires(D const& d, std::coroutine_handle

h) { - { d(h) } -> std::convertible_to; -}; ----- - -Calling `d(h)` schedules `h` for resumption and returns a handle -suitable for symmetric transfer. The dispatcher may resume inline -(returning `h`) or queue the work (returning `std::noop_coroutine()`). - -=== The affine_awaitable Concept - -An xref:concepts/affine_awaitable.adoc[`affine_awaitable`] provides -an extended `await_suspend` that receives the dispatcher: - -[source,cpp] ----- -template -concept affine_awaitable = - dispatcher && - requires(A a, std::coroutine_handle

h, D const& d) { - a.await_suspend(h, d); - }; ----- - -=== Implementing a Custom Affine Awaitable - -Here's a complete example of an affine awaitable that wraps an -asynchronous timer: - -[source,cpp] ----- -struct async_timer -{ - std::chrono::milliseconds duration_; - - bool await_ready() const noexcept - { - return duration_.count() <= 0; - } - - template - auto await_suspend( - std::coroutine_handle<> h, - Dispatcher const& d) - { - // Start timer, resume through dispatcher when done - start_timer(duration_, [h, &d] { - d(h); // Resume via dispatcher - }); - return std::noop_coroutine(); - } - - void await_resume() const noexcept - { - // Timer completed, nothing to return - } -}; - -// Usage in a task: -task delayed_work() -{ - co_await async_timer{std::chrono::seconds{1}}; - // Resumes on the task's executor after 1 second -} ----- - -=== Type-Erased Dispatchers - -The `any_dispatcher` class provides type erasure for dispatchers, -enabling runtime polymorphism without virtual functions: - -[source,cpp] ----- -#include - -using boost::capy::any_dispatcher; - -void store_dispatcher(any_dispatcher d) -{ - // Can store any dispatcher type uniformly - d(some_handle); // Invoke through type-erased interface -} ----- - -`task` uses `any_dispatcher` internally to store the inherited -dispatcher, enabling tasks to work with any executor type. - -== The Stoppable Awaitable Protocol - -The xref:concepts/stoppable_awaitable.adoc[`stoppable_awaitable`] -protocol extends `affine_awaitable` with stop token support: - -[source,cpp] ----- -template -concept stoppable_awaitable = - affine_awaitable && - requires(A a, std::coroutine_handle

h, D const& d, std::stop_token token) { - a.await_suspend(h, d, token); - }; ----- - -A stoppable awaitable provides _both_ overloads of `await_suspend`. -The task's `await_transform` automatically selects the appropriate -overload based on whether a stop token is available. - -=== Example: Stoppable Timer - -[source,cpp] ----- -struct stoppable_timer -{ - std::chrono::milliseconds duration_; - - bool await_ready() const noexcept - { - return duration_.count() <= 0; - } - - // Affine path (no cancellation) - template - auto await_suspend( - std::coroutine_handle<> h, - Dispatcher const& d) - { - start_timer(duration_, [h, &d] { d(h); }); - return std::noop_coroutine(); - } - - // Stoppable path (with cancellation) - template - auto await_suspend( - std::coroutine_handle<> h, - Dispatcher const& d, - std::stop_token token) - { - if (token.stop_requested()) - { - cancelled_ = true; - return d(h); // Resume immediately - } - - auto timer = start_timer(duration_, [h, &d] { d(h); }); - - // Register stop callback - stop_callback_ = std::stop_callback(token, [timer] { - timer.cancel(); - }); - - return std::noop_coroutine(); - } - - void await_resume() const - { - if (cancelled_) - throw operation_cancelled{}; - } - -private: - bool cancelled_ = false; - std::optional> stop_callback_; -}; ----- - -== Legacy Awaitable Compatibility - -Not all awaitables implement the affine protocol. For standard library -awaitables or third-party types, capy provides automatic compatibility -through a *trampoline coroutine*. - -When `await_transform` encounters an awaitable that doesn't satisfy -`affine_awaitable`, it wraps it in `make_affine`: - -[source,cpp] ----- -// Inside task's await_transform: -template -auto await_transform(Awaitable&& a) -{ - using A = std::decay_t; - if constexpr (affine_awaitable) - { - // Zero-overhead path - return transform_awaiter{std::forward(a), this}; - } - else - { - // Trampoline fallback - return make_affine(std::forward(a), ex_); - } -} ----- - -The trampoline: - -1. Awaits the legacy awaitable normally -2. After completion, dispatches through the executor -3. Resumes the caller on the correct context - -This adds one extra coroutine frame but ensures correct affinity -for any awaitable type. Prefer implementing the affine protocol -for performance-critical awaitables. - -== Summary - -[cols="1,2"] -|=== -| Feature | Use Case - -| Custom frame allocator -| High-frequency coroutine creation, memory pools - -| Stop token propagation -| Cooperative cancellation of async operations - -| Affine awaitable protocol -| Zero-overhead dispatcher propagation - -| Stoppable awaitable protocol -| Cancellable async operations - -| Legacy compatibility -| Using non-affine awaitables in tasks -|=== - -== See Also - -* xref:concepts/dispatcher.adoc[dispatcher concept] -* xref:concepts/affine_awaitable.adoc[affine_awaitable concept] -* xref:concepts/stoppable_awaitable.adoc[stoppable_awaitable concept] -* xref:concepts/frame_allocator.adoc[frame_allocator concept] diff --git a/doc/modules/ROOT/pages/concepts/affine_awaitable.adoc b/doc/modules/ROOT/pages/concepts/affine_awaitable.adoc index 5de62537..bb138726 100644 --- a/doc/modules/ROOT/pages/concepts/affine_awaitable.adoc +++ b/doc/modules/ROOT/pages/concepts/affine_awaitable.adoc @@ -9,8 +9,10 @@ = affine_awaitable -An awaitable is affine if it participates in the affine awaitable -protocol by accepting a dispatcher in its `await_suspend` method. +An awaitable is affine if it participates in the affine awaitable protocol +by accepting a dispatcher in its `await_suspend` method. + +Requires: C++20 == Synopsis @@ -32,15 +34,18 @@ concept affine_awaitable = == Description -The affine awaitable protocol enables zero-overhead scheduler affinity -without requiring the full sender/receiver protocol. When an awaitable -is affine, it receives the caller's dispatcher in `await_suspend` and -uses it to resume the caller on the correct execution context. +The affine awaitable protocol enables zero-overhead scheduler affinity without +requiring the full sender/receiver protocol. When an awaitable is affine, it +receives the caller's dispatcher in `await_suspend` and uses it to resume the +caller on the correct execution context. + +The awaitable must use the dispatcher `d` to resume the caller when the +operation completes. Typically this looks like `return d(h);` for symmetric +transfer or calling `d(h)` before returning `std::noop_coroutine()`. + +== Preconditions -The awaitable must use the dispatcher `d` to resume the caller when -the operation completes. Typically this looks like `return d(h);` for -symmetric transfer or calling `d(h)` before returning -`std::noop_coroutine()`. +* The dispatcher `d` remains valid until the awaitable resumes the caller == Valid Expressions @@ -56,21 +61,27 @@ Given: | `a.await_ready()` | `bool` -| Returns `true` if the operation has already completed +| Return `true` if the operation has already completed | `a.await_suspend(h, d)` | (unspecified) -| Suspends and starts the async operation, using `d` for resumption +| Suspend and start the async operation, using `d` for resumption | `a.await_resume()` | (unspecified) -| Returns the operation result or rethrows any exception +| Return the operation result or rethrow any exception |=== == Example [source,cpp] ---- +#include + +using boost::capy::coro; +using boost::capy::affine_awaitable; +using boost::capy::any_dispatcher; + struct my_async_op { bool await_ready() const noexcept @@ -102,6 +113,6 @@ static_assert(affine_awaitable); == See Also -* xref:concepts/dispatcher.adoc[dispatcher] -* xref:concepts/stoppable_awaitable.adoc[stoppable_awaitable] -* xref:advanced.adoc[Advanced Topics] +* xref:dispatcher.adoc[dispatcher] — The dispatcher concept +* xref:stoppable_awaitable.adoc[stoppable_awaitable] — Extended protocol with cancellation +* xref:../coroutines/affinity.adoc[Executor Affinity] — Tutorial on affinity propagation diff --git a/doc/modules/ROOT/pages/concepts/dispatcher.adoc b/doc/modules/ROOT/pages/concepts/dispatcher.adoc index ef123cbd..1e4cc6ed 100644 --- a/doc/modules/ROOT/pages/concepts/dispatcher.adoc +++ b/doc/modules/ROOT/pages/concepts/dispatcher.adoc @@ -12,6 +12,8 @@ A dispatcher is a callable object that accepts a coroutine handle and schedules it for resumption. +Requires: C++20 + == Synopsis Defined in header `` @@ -30,19 +32,18 @@ concept dispatcher = requires(D const& d, std::coroutine_handle

h) { == Description -A dispatcher encapsulates the rules for how and where a coroutine -resumes. When invoked with a coroutine handle, the dispatcher -schedules the handle for resumption and returns a handle suitable -for symmetric transfer. +A dispatcher encapsulates the rules for how and where a coroutine resumes. +When invoked with a coroutine handle, the dispatcher schedules the handle for +resumption and returns a handle suitable for symmetric transfer. -Dispatchers must be const-callable, enabling thread-safe concurrent -dispatch from multiple coroutines. The dispatcher may resume the -handle inline (by returning the handle itself) or queue it for later -execution (by returning `std::noop_coroutine()`). +Dispatchers must be const-callable, enabling thread-safe concurrent dispatch +from multiple coroutines. The dispatcher may resume the handle inline (by +returning the handle itself) or queue it for later execution (by returning +`std::noop_coroutine()`). -Since `coro` (an alias for `std::coroutine_handle`) has -`operator()` which invokes `resume()`, the handle itself is callable -and can be dispatched directly. +Since `coro` (an alias for `std::coroutine_handle`) has `operator()` +which invokes `resume()`, the handle itself is callable and can be dispatched +directly. == Valid Expressions @@ -57,13 +58,18 @@ Given: | `d(h)` | convertible to `coro` -| Schedules `h` for resumption and returns a handle for symmetric transfer +| Schedule `h` for resumption and return a handle for symmetric transfer |=== == Example [source,cpp] ---- +#include + +using boost::capy::coro; +using boost::capy::dispatcher; + struct inline_dispatcher { coro operator()(coro h) const @@ -89,6 +95,6 @@ static_assert(dispatcher); == See Also -* xref:concepts/affine_awaitable.adoc[affine_awaitable] -* xref:concepts/stoppable_awaitable.adoc[stoppable_awaitable] -* xref:advanced.adoc[Advanced Topics] +* xref:affine_awaitable.adoc[affine_awaitable] — Awaitable that accepts a dispatcher +* xref:stoppable_awaitable.adoc[stoppable_awaitable] — Awaitable with cancellation support +* xref:../coroutines/affinity.adoc[Executor Affinity] — Tutorial on affinity propagation diff --git a/doc/modules/ROOT/pages/concepts/executor.adoc b/doc/modules/ROOT/pages/concepts/executor.adoc index bdc29d90..9034f5bb 100644 --- a/doc/modules/ROOT/pages/concepts/executor.adoc +++ b/doc/modules/ROOT/pages/concepts/executor.adoc @@ -11,6 +11,8 @@ An executor provides mechanisms for scheduling work for execution. +Requires: C++20 + == Synopsis Defined in header `` @@ -38,9 +40,9 @@ concept executor = == Description A type meeting the executor requirements embodies a set of rules for -determining how submitted coroutines are to be executed. An executor -is a lightweight, copyable handle to an execution context such as a -thread pool, I/O context, or strand. +determining how submitted coroutines are to be executed. An executor is a +lightweight, copyable handle to an execution context such as a thread pool, +I/O context, or strand. The executor provides three scheduling operations: @@ -48,24 +50,32 @@ The executor provides three scheduling operations: * **post** — Always queue, never inline. Guaranteed asynchrony. * **defer** — Always queue with continuation hint. Enables optimizations. -=== No-Throw Guarantee +== No-Throw Guarantee + +The following operations shall not exit via an exception: -The following operations shall not exit via an exception: constructors, -comparison operators, copy/move operations, swap, `context()`, -`on_work_started()`, and `on_work_finished()`. +* Constructors +* Comparison operators +* Copy/move operations +* `swap` +* `context()` +* `on_work_started()` +* `on_work_finished()` -=== Thread Safety +== Thread Safety The executor copy constructor, comparison operators, and other member -functions shall not introduce data races as a result of concurrent -calls from different threads. +functions shall not introduce data races as a result of concurrent calls +from different threads. -=== Executor Validity +== Executor Validity Let `ctx` be the execution context returned by `context()`. An executor -becomes invalid when the first call to `ctx.shutdown()` returns. The -effect of calling `dispatch`, `post`, or `defer` on an invalid executor -is undefined. +becomes invalid when the first call to `ctx.shutdown()` returns. The effect +of calling `dispatch`, `post`, or `defer` on an invalid executor is undefined. + +The copy constructor, comparison operators, and `context()` remain valid +until `ctx` is destroyed. == Valid Expressions @@ -81,15 +91,15 @@ Given: | `ce.context()` | `Context&` -| Returns a reference to the associated execution context +| Return a reference to the associated execution context | `ce.on_work_started()` | — -| Informs the executor that work is beginning. Must not throw. +| Inform the executor that work is beginning (must not throw) | `ce.on_work_finished()` | — -| Informs the executor that work has completed. Must not throw. +| Inform the executor that work has completed (must not throw) | `ce.dispatch(h)` | convertible to `std::coroutine_handle<>` @@ -108,6 +118,10 @@ Given: [source,cpp] ---- +#include + +using boost::capy::executor; + class my_executor { my_context* ctx_; @@ -149,5 +163,6 @@ static_assert(executor); == See Also -* xref:concepts/is_execution_context.adoc[is_execution_context] -* xref:execution.adoc[Execution Model] +* xref:is_execution_context.adoc[is_execution_context] — Execution context concept +* xref:dispatcher.adoc[dispatcher] — Simpler scheduling interface +* xref:../execution/executors.adoc[Executors] — Tutorial on the execution model diff --git a/doc/modules/ROOT/pages/concepts/frame_allocator.adoc b/doc/modules/ROOT/pages/concepts/frame_allocator.adoc index 04a99fed..0060cf84 100644 --- a/doc/modules/ROOT/pages/concepts/frame_allocator.adoc +++ b/doc/modules/ROOT/pages/concepts/frame_allocator.adoc @@ -11,6 +11,8 @@ A frame allocator provides memory allocation for coroutine frames. +Requires: C++20 + == Synopsis Defined in header `` @@ -33,18 +35,18 @@ concept frame_allocator = == Description Frame allocators provide memory for coroutine frames—the compiler-generated -structures holding local variables, parameters, and suspension state. -A frame allocator must be a cheaply copyable handle to an underlying -memory resource (e.g., a pointer to a pool). +structures holding local variables, parameters, and suspension state. A frame +allocator must be a cheaply copyable handle to an underlying memory resource +(e.g., a pointer to a pool). -The library copies the allocator into the first coroutine frame for -lifetime safety. Subsequent frames in the call tree use the embedded -allocator for both allocation and deallocation. +The library copies the allocator into the first coroutine frame for lifetime +safety. Subsequent frames in the call tree use the embedded allocator for +both allocation and deallocation. -=== Default Frame Allocator +== Default Frame Allocator -The library provides `default_frame_allocator` which passes through -to `::operator new` and `::operator delete`: +The library provides `default_frame_allocator` which passes through to +`::operator new` and `::operator delete`: [source,cpp] ---- @@ -62,10 +64,10 @@ struct default_frame_allocator }; ---- -=== Recycling Frame Allocator +== Recycling Frame Allocator By default, `async_run` uses a recycling frame allocator that caches -deallocated frames for reuse, eliminating most allocation overhead +deallocated frames for reuse. This eliminates most allocation overhead for typical coroutine patterns. == Valid Expressions @@ -82,17 +84,23 @@ Given: | `a.allocate(n)` | `void*` -| Allocates `n` bytes for a coroutine frame +| Allocate `n` bytes for a coroutine frame | `a.deallocate(p, n)` | — -| Deallocates memory previously allocated with `allocate(n)` +| Deallocate memory previously allocated with `allocate(n)` |=== == Example [source,cpp] ---- +#include +#include + +using boost::capy::frame_allocator; +using boost::capy::async_run; + class pool_frame_allocator { memory_pool* pool_; @@ -125,4 +133,5 @@ async_run(ex, alloc)(my_task()); == See Also -* xref:advanced.adoc#_frame_allocation[Frame Allocation] +* xref:../execution/frame-allocation.adoc[Frame Allocation] — Tutorial on custom allocators +* xref:../coroutines/launching.adoc[Launching Tasks] — Using allocators with `async_run` diff --git a/doc/modules/ROOT/pages/concepts/is_execution_context.adoc b/doc/modules/ROOT/pages/concepts/is_execution_context.adoc index 550574de..59425b0a 100644 --- a/doc/modules/ROOT/pages/concepts/is_execution_context.adoc +++ b/doc/modules/ROOT/pages/concepts/is_execution_context.adoc @@ -9,8 +9,10 @@ = is_execution_context -A type satisfies `is_execution_context` if it derives from -`execution_context` and provides an associated executor type. +A type satisfies `is_execution_context` if it derives from `execution_context` +and provides an associated executor type. + +Requires: C++20 == Synopsis @@ -34,28 +36,28 @@ concept is_execution_context = == Description -An execution context represents a place where function objects are -executed. It provides: +An execution context represents a place where function objects are executed. +It provides: * A service registry for polymorphic services * An associated executor type for scheduling work * Lifecycle management (shutdown, destroy) -Derived classes such as `io_context` extend `execution_context` to -provide execution facilities like event loops and thread pools. +Derived classes such as `io_context` extend `execution_context` to provide +execution facilities like event loops and thread pools. -=== Service Management +== Service Management Execution contexts own services that provide extensible functionality. -Services are created on first use via `use_service()` or explicitly -via `make_service()`. During destruction, services are shut down -and deleted in reverse order of creation. +Services are created on first use via `use_service()` or explicitly via +`make_service()`. During destruction, services are shut down and deleted +in reverse order of creation. -=== Destructor Requirements +== Destructor Requirements -The destructor must destroy all unexecuted work that was submitted -via an executor object associated with the execution context. This -is a semantic requirement that cannot be verified at compile time. +The destructor must destroy all unexecuted work that was submitted via an +executor object associated with the execution context. This is a semantic +requirement that cannot be verified at compile time. == Valid Expressions @@ -73,13 +75,18 @@ Given: | `x.get_executor()` | `X::executor_type` -| Returns an executor for scheduling work on this context +| Return an executor for scheduling work on this context |=== == Example [source,cpp] ---- +#include + +using boost::capy::execution_context; +using boost::capy::is_execution_context; + class io_context : public execution_context { public: @@ -119,5 +126,5 @@ static_assert(is_execution_context); == See Also -* xref:concepts/executor.adoc[executor] -* xref:execution.adoc[Execution Model] +* xref:executor.adoc[executor] — The executor concept +* xref:../execution/contexts.adoc[Execution Contexts] — Tutorial on contexts and services diff --git a/doc/modules/ROOT/pages/concepts/stoppable_awaitable.adoc b/doc/modules/ROOT/pages/concepts/stoppable_awaitable.adoc index 5615d594..ef6ac0fc 100644 --- a/doc/modules/ROOT/pages/concepts/stoppable_awaitable.adoc +++ b/doc/modules/ROOT/pages/concepts/stoppable_awaitable.adoc @@ -13,6 +13,8 @@ An awaitable is stoppable if it participates in the stoppable awaitable protocol by accepting both a dispatcher and a stop token in its `await_suspend` method. +Requires: C++20 + == Synopsis Defined in header `` @@ -35,16 +37,20 @@ concept stoppable_awaitable = The stoppable awaitable protocol extends `affine_awaitable` to enable automatic stop token propagation through coroutine chains. When a task -has a stop token, it passes the token to any stoppable awaitables it -awaits. +has a stop token, it passes the token to any stoppable awaitables it awaits. A stoppable awaitable must provide _both_ overloads of `await_suspend`: * `await_suspend(h, d)` — for callers without stop tokens * `await_suspend(h, d, token)` — for callers with stop tokens -The awaitable should use the stop token to support cancellation of -the underlying operation. +The awaitable should use the stop token to support cancellation of the +underlying operation. + +== Preconditions + +* The dispatcher `d` remains valid until the awaitable resumes the caller +* The stop token `token` remains valid until the operation completes == Valid Expressions @@ -61,25 +67,32 @@ Given: | `a.await_ready()` | `bool` -| Returns `true` if the operation has already completed +| Return `true` if the operation has already completed | `a.await_suspend(h, d)` | (unspecified) -| Suspends without cancellation support +| Suspend without cancellation support | `a.await_suspend(h, d, token)` | (unspecified) -| Suspends with cancellation support via the stop token +| Suspend with cancellation support via the stop token | `a.await_resume()` | (unspecified) -| Returns the operation result or rethrows any exception +| Return the operation result or rethrow any exception |=== == Example [source,cpp] ---- +#include +#include + +using boost::capy::coro; +using boost::capy::stoppable_awaitable; +using boost::capy::any_dispatcher; + struct stoppable_timer { std::chrono::milliseconds duration_; @@ -130,6 +143,6 @@ static_assert(stoppable_awaitable); == See Also -* xref:concepts/dispatcher.adoc[dispatcher] -* xref:concepts/affine_awaitable.adoc[affine_awaitable] -* xref:advanced.adoc[Advanced Topics] +* xref:dispatcher.adoc[dispatcher] — The dispatcher concept +* xref:affine_awaitable.adoc[affine_awaitable] — Base protocol without cancellation +* xref:../coroutines/cancellation.adoc[Cancellation] — Tutorial on stop token propagation diff --git a/doc/modules/ROOT/pages/coroutines.adoc b/doc/modules/ROOT/pages/coroutines.adoc deleted file mode 100644 index 6592f9de..00000000 --- a/doc/modules/ROOT/pages/coroutines.adoc +++ /dev/null @@ -1,263 +0,0 @@ -// -// Copyright (c) 2025 Vinnie Falco (vinnie.falco@gmail.com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// -// Official repository: https://github.com/cppalliance/capy -// - -= Coroutines - -This page teaches you how to write coroutine functions using `task` and -launch them for execution using `async_run`. - -NOTE: Coroutine features require C++20 or later. - -== Introduction - -Capy provides lightweight coroutine support for C++20, enabling asynchronous -code that reads like synchronous code. The primary building blocks are: - -* `task` — A lazy coroutine type that produces a value of type `T` -* `async_run` — Launches a task for detached execution on an executor - -Tasks are _lazy_: they do not begin execution when created. A task remains -suspended until it is either awaited by another coroutine or launched -explicitly with `async_run`. This lazy evaluation enables structured -composition where parent coroutines naturally await their children. - -== The task Type - -A `task` represents an asynchronous operation that will eventually -produce a value of type `T`. You create a task by writing a coroutine -function—one that uses `co_await` or `co_return`: - -[source,cpp] ----- -#include - -using boost::capy::task; - -task compute() -{ - co_return 42; -} ----- - -The function `compute()` returns immediately with a suspended coroutine. -No code inside the function body executes until the task is started. - -=== Returning Values - -Use `co_return` to produce the task's result: - -[source,cpp] ----- -task greet(std::string name) -{ - co_return "Hello, " + name + "!"; -} ----- - -=== Void Tasks - -For operations that perform work without producing a value, use -`task`: - -[source,cpp] ----- -task log_message(std::string msg) -{ - std::cout << msg << std::endl; - co_return; -} ----- - -The explicit `co_return;` (or simply reaching the end of the function) -completes a void task. - -=== Awaiting Other Tasks - -Tasks can await other tasks using `co_await`. The calling coroutine -suspends until the awaited task completes: - -[source,cpp] ----- -task step_one() -{ - co_return 10; -} - -task step_two(int x) -{ - co_return x * 2; -} - -task pipeline() -{ - int a = co_await step_one(); - int b = co_await step_two(a); - co_return a + b; // 10 + 20 = 30 -} ----- - -Each `co_await` suspends the current coroutine, starts the child task, -and resumes when the child completes. The child's return value becomes -the result of the `co_await` expression. - -== Launching Tasks - -Tasks are lazy and require a driver to execute. The `async_run` function -launches a task for detached execution on an executor: - -[source,cpp] ----- -#include - -using boost::capy::async_run; - -void start(executor ex) -{ - async_run(ex)(compute()); -} ----- - -The syntax `async_run(ex)(task)` creates a runner bound to the executor, -then immediately launches the task. The task begins executing when the -executor schedules it; if inline execution is permitted, the task runs -synchronously until it suspends on an I/O operation. - -=== Fire and Forget - -The simplest form discards the result: - -[source,cpp] ----- -async_run(ex)(compute()); ----- - -If the task throws an exception, it is rethrown on the executor's thread. -This is appropriate for top-level tasks where errors should propagate. - -=== Handling Results - -To receive the task's result, provide a completion handler: - -[source,cpp] ----- -async_run(ex)(compute(), [](int result) { - std::cout << "Got: " << result << "\n"; -}); ----- - -The handler is called when the task completes successfully. If the task -throws, the exception is rethrown (default behavior). - -=== Handling Errors - -To handle both success and failure, provide a handler that accepts -`std::exception_ptr`: - -[source,cpp] ----- -async_run(ex)(compute(), overloaded{ - [](int result) { - std::cout << "Success: " << result << "\n"; - }, - [](std::exception_ptr ep) { - try { - if (ep) std::rethrow_exception(ep); - } catch (std::exception const& e) { - std::cerr << "Error: " << e.what() << "\n"; - } - } -}); ----- - -Alternatively, use separate handlers for success and error: - -[source,cpp] ----- -async_run(ex)(compute(), - [](int result) { std::cout << result << "\n"; }, - [](std::exception_ptr ep) { /* handle error */ } -); ----- - -== Exception Handling - -Exceptions thrown within a task are captured and stored. When the task -is awaited, the exception is rethrown in the awaiting coroutine: - -[source,cpp] ----- -task might_fail() -{ - throw std::runtime_error("oops"); - co_return 0; // never reached -} - -task caller() -{ - try { - int x = co_await might_fail(); - } catch (std::exception const& e) { - std::cerr << "Caught: " << e.what() << "\n"; - } -} ----- - -This enables natural exception handling across coroutine boundaries. - -== Complete Example - -This example demonstrates a typical pattern: a chain of tasks that -process data and produce a final result. - -[source,cpp] ----- -#include -#include -#include -#include - -using boost::capy::task; -using boost::capy::async_run; - -// Simulate fetching data -task fetch_data() -{ - co_return "Content-Length: 42\r\n\r\nHello"; -} - -// Parse content length from headers -task parse_content_length(std::string const& data) -{ - auto pos = data.find("Content-Length: "); - if (pos == std::string::npos) - co_return 0; - co_return std::stoi(data.substr(pos + 16)); -} - -// Compose the operations -task get_content_length() -{ - std::string data = co_await fetch_data(); - int length = co_await parse_content_length(data); - co_return length; -} - -void run_example(executor ex) -{ - async_run(ex)(get_content_length(), [](int length) { - std::cout << "Content length: " << length << "\n"; - }); -} ----- - -== Next Steps - -Now that you can write and launch coroutines, the next page explains -xref:execution.adoc[where they execute]—how executor affinity works -and how to control which thread or context runs your code. diff --git a/doc/modules/ROOT/pages/coroutines/affinity.adoc b/doc/modules/ROOT/pages/coroutines/affinity.adoc new file mode 100644 index 00000000..610f859f --- /dev/null +++ b/doc/modules/ROOT/pages/coroutines/affinity.adoc @@ -0,0 +1,234 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie.falco@gmail.com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + += Executor Affinity + +This page explains where your coroutines execute and how to control execution +context. + +NOTE: Code snippets assume `using namespace boost::capy;` is in effect. + +== The Problem Affinity Solves + +When an I/O operation completes, the operating system wakes up _some_ thread. +Without affinity tracking, your coroutine might resume on an arbitrary thread: + +---- +Thread 1: task starts → co_await read() → suspends +Thread 2: (I/O completes) → task resumes here (surprise!) +---- + +This forces you to add synchronization everywhere. Affinity solves this by +ensuring coroutines resume on their designated executor. + +== What is Affinity? + +Affinity means a coroutine is bound to a specific executor. When a coroutine +has affinity to executor `ex`, all of its resumptions occur through `ex`. + +You establish affinity when launching a task: + +[source,cpp] +---- +async_run(ex)(my_task()); // my_task has affinity to ex +---- + +== How Affinity Propagates + +Affinity propagates forward through `co_await` chains. When a coroutine with +affinity awaits a child task, the child inherits the same affinity: + +[source,cpp] +---- +task parent() // affinity: ex (from async_run) +{ + co_await child(); // child inherits ex +} + +task child() // affinity: ex (inherited) +{ + co_await io.async_read(); // I/O captures ex, resumes through it +} +---- + +The mechanism is the _affine awaitable protocol_: each `co_await` passes the +current dispatcher to the awaited operation, which stores it and uses it for +resumption. + +== Flow Diagrams + +To reason about where code executes, use this compact notation: + +[cols="1,3"] +|=== +| Symbol | Meaning + +| `c`, `c1`, `c2` +| Coroutines (lazy tasks) + +| `io` +| I/O operation + +| `->` +| `co_await` leading to a coroutine or I/O + +| `!` +| Coroutine with explicit executor affinity + +| `ex`, `ex1`, `ex2` +| Executors +|=== + +=== Simple Chain + +---- +!c -> io +---- + +Coroutine `c` has affinity to some executor. When the I/O completes, `c` +resumes through that executor. + +=== Nested Coroutines + +---- +!c1 -> c2 -> io +---- + +* `c1` has explicit affinity to `ex` +* `c2` inherits affinity from `c1` +* The I/O captures `ex` and resumes through it +* When `c2` completes, `c1` resumes via symmetric transfer (same executor) + +== Changing Affinity with run_on + +Sometimes you need a child coroutine to run on a _different_ executor. +The `run_on` function changes affinity for a subtree: + +[source,cpp] +---- +#include + +task parent() +{ + // This task runs on ex1 (inherited) + + // Run child on ex2 instead + co_await run_on(ex2, child_task()); + + // Back on ex1 after child completes +} +---- + +In flow diagram notation: + +---- +!c1 -> c2 -> !c3 -> io +---- + +The execution sequence: + +1. `c1` launches on `ex1` +2. `c2` continues on `ex1` (inherited) +3. `run_on` binds `c3` to `ex2` +4. I/O captures `ex2` +5. I/O completes → `c3` resumes through `ex2` +6. `c3` completes → `c2` resumes through `ex1` (caller's executor) + +== Symmetric Transfer + +When a child coroutine completes, it must resume its caller. If both share +the same executor, _symmetric transfer_ provides a direct tail call with +zero overhead—no executor involvement, no queuing. + +The decision logic: + +1. **Same executor** → symmetric transfer (direct jump) +2. **Different executors** → dispatch through caller's executor + +Symmetric transfer is automatic. The library detects when caller and callee +share the same dispatcher (pointer equality) and optimizes accordingly. + +== Type-Erased Dispatchers + +The `any_dispatcher` class provides type erasure for dispatchers: + +[source,cpp] +---- +#include + +void store_dispatcher(any_dispatcher d) +{ + // Can store any dispatcher type uniformly + d(some_handle); // Invoke through type-erased interface +} +---- + +`task` uses `any_dispatcher` internally, enabling tasks to work with any +executor type without templating everything. + +== Legacy Awaitable Compatibility + +Not all awaitables implement the affine protocol. For standard library +awaitables or third-party types, Capy provides automatic compatibility through +a trampoline coroutine. + +When `await_transform` encounters a non-affine awaitable, it wraps it: + +[source,cpp] +---- +// Inside task's await_transform (simplified): +if constexpr (affine_awaitable) + return affine_path(a); // Zero-overhead +else + return make_affine(a, ex_); // Trampoline fallback +---- + +The trampoline adds one extra coroutine frame but ensures correct affinity. +Prefer implementing the affine protocol for performance-critical awaitables. + +== When NOT to Use run_on + +Use `run_on` when: + +* You need CPU-bound work on a dedicated thread pool +* You need I/O on a specific context +* You're integrating with a library that requires a specific executor + +Do NOT use `run_on` when: + +* The child task should inherit the parent's executor (just `co_await` directly) +* You're worried about performance — the context switch cost is already paid + by the I/O operation itself + +== Summary + +[cols="1,3"] +|=== +| Concept | Description + +| Affinity +| A coroutine is bound to a specific executor + +| Propagation +| Children inherit affinity from parents via `co_await` + +| `run_on` +| Explicitly binds a child to a different executor + +| Symmetric transfer +| Zero-overhead resumption when executor matches + +| `any_dispatcher` +| Type-erased dispatcher for heterogeneous executor support +|=== + +== Next Steps + +* xref:cancellation.adoc[Cancellation] — Stop token propagation +* xref:../execution/executors.adoc[Executors] — The execution model in depth diff --git a/doc/modules/ROOT/pages/coroutines/cancellation.adoc b/doc/modules/ROOT/pages/coroutines/cancellation.adoc new file mode 100644 index 00000000..c5d879be --- /dev/null +++ b/doc/modules/ROOT/pages/coroutines/cancellation.adoc @@ -0,0 +1,194 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie.falco@gmail.com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + += Cancellation + +This page explains how to cancel running coroutines using `std::stop_token`. + +NOTE: Code snippets assume `using namespace boost::capy;` is in effect. + +== Cooperative Cancellation + +Capy supports cooperative cancellation through `std::stop_token`. When a task +is launched with stop support, the token propagates through the entire call +chain automatically. + +Cooperative means: + +* The framework delivers cancellation requests to operations +* Operations check the token and decide how to respond +* Nothing is forcibly terminated + +== How Stop Tokens Propagate + +Stop tokens propagate through `co_await` chains just like affinity. When you +await a stoppable operation inside a task with a stop token, the token is +forwarded automatically: + +[source,cpp] +---- +task cancellable_work() +{ + // If this task has a stop token, it's automatically + // passed to any stoppable awaitables we co_await + co_await some_stoppable_operation(); +} +---- + +== The Stoppable Awaitable Protocol + +Awaitables that support cancellation implement the `stoppable_awaitable` +concept. Their `await_suspend` receives both a dispatcher and a stop token: + +[source,cpp] +---- +template +auto await_suspend( + std::coroutine_handle<> h, + Dispatcher const& d, + std::stop_token token) +{ + if (token.stop_requested()) + { + // Already cancelled, resume immediately + return d(h); + } + + // Start async operation with cancellation support + start_async([h, &d, token] { + if (token.stop_requested()) + { + // Handle cancellation + } + d(h); + }); + return std::noop_coroutine(); +} +---- + +== Implementing a Stoppable Timer + +Here is a complete example of a stoppable timer: + +[source,cpp] +---- +struct stoppable_timer +{ + std::chrono::milliseconds duration_; + bool cancelled_ = false; + + bool await_ready() const noexcept + { + return duration_.count() <= 0; + } + + // Affine path (no cancellation) + template + auto await_suspend(coro h, Dispatcher const& d) + { + start_timer(duration_, [h, &d] { d(h); }); + return std::noop_coroutine(); + } + + // Stoppable path (with cancellation) + template + auto await_suspend( + coro h, + Dispatcher const& d, + std::stop_token token) + { + if (token.stop_requested()) + { + cancelled_ = true; + return d(h); // Resume immediately + } + + auto timer_handle = start_timer(duration_, [h, &d] { d(h); }); + + // Cancel timer if stop requested + std::stop_callback cb(token, [timer_handle] { + cancel_timer(timer_handle); + }); + + return std::noop_coroutine(); + } + + void await_resume() + { + if (cancelled_) + throw std::runtime_error("operation cancelled"); + } +}; +---- + +Key points: + +* Provide _both_ `await_suspend` overloads (with and without token) +* Check `stop_requested()` before starting work +* Register a `stop_callback` to cancel the underlying operation +* Signal cancellation in `await_resume` (typically via exception) + +== Checking Cancellation Status + +Within a coroutine, you can check if cancellation was requested: + +[source,cpp] +---- +task long_running_work() +{ + for (int i = 0; i < 1000; ++i) + { + // Periodically check for cancellation + if (/* stop requested */) + co_return; // Exit gracefully + + co_await process_chunk(i); + } +} +---- + +The mechanism for accessing the stop token depends on your task implementation. + +== When NOT to Use Cancellation + +Use cancellation when: + +* Operations may take a long time +* Users need to abort operations +* Timeouts are required + +Do NOT use cancellation when: + +* Operations are very short — the overhead is not worth it +* Operations cannot be interrupted meaningfully +* You need guaranteed completion + +== Summary + +[cols="1,3"] +|=== +| Concept | Description + +| Cooperative +| Operations check the token and decide how to respond + +| Automatic propagation +| Tokens flow through `co_await` chains + +| `stoppable_awaitable` +| Concept for awaitables that support cancellation + +| `stop_callback` +| Register cleanup when cancellation is requested +|=== + +== Next Steps + +* xref:../execution/executors.adoc[Executors] — Understand the execution model +* xref:../concepts/stoppable_awaitable.adoc[stoppable_awaitable] — Reference documentation diff --git a/doc/modules/ROOT/pages/coroutines/launching.adoc b/doc/modules/ROOT/pages/coroutines/launching.adoc new file mode 100644 index 00000000..01df2c41 --- /dev/null +++ b/doc/modules/ROOT/pages/coroutines/launching.adoc @@ -0,0 +1,175 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie.falco@gmail.com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + += Launching Tasks + +This page explains how to start lazy tasks for execution using `async_run`. + +NOTE: Code snippets assume `using namespace boost::capy;` is in effect. + +== Why Tasks Need a Driver + +Tasks are lazy. They remain suspended until something starts them. Within a +coroutine, `co_await` serves this purpose. But at the program's entry point, +you need a way to kick off the first coroutine. + +The `async_run` function provides this capability. It: + +1. Binds a task to a dispatcher (typically an executor) +2. Starts the task's execution +3. Optionally delivers the result to a completion handler + +== Basic Usage + +[source,cpp] +---- +#include + +void start(executor ex) +{ + async_run(ex)(compute()); +} +---- + +The syntax `async_run(ex)(task)` creates a runner bound to the executor, then +immediately launches the task. The task begins executing when the executor +schedules it. + +== Fire and Forget + +The simplest pattern discards the result: + +[source,cpp] +---- +async_run(ex)(compute()); +---- + +If the task throws an exception, it propagates to the executor's error handling +(typically rethrown from `run()`). This pattern is appropriate for top-level +tasks where errors should terminate the program. + +== Handling Results + +To receive the task's result, provide a completion handler: + +[source,cpp] +---- +async_run(ex)(compute(), [](int result) { + std::cout << "Got: " << result << "\n"; +}); +---- + +The handler is called when the task completes successfully. For `task`, +the handler takes no arguments: + +[source,cpp] +---- +async_run(ex)(do_work(), []() { + std::cout << "Work complete\n"; +}); +---- + +== Handling Errors + +To handle both success and failure, provide a handler that also accepts +`std::exception_ptr`: + +[source,cpp] +---- +async_run(ex)(compute(), overloaded{ + [](int result) { + std::cout << "Success: " << result << "\n"; + }, + [](std::exception_ptr ep) { + try { + if (ep) std::rethrow_exception(ep); + } catch (std::exception const& e) { + std::cerr << "Error: " << e.what() << "\n"; + } + } +}); +---- + +Alternatively, use separate handlers for success and error: + +[source,cpp] +---- +async_run(ex)(compute(), + [](int result) { std::cout << result << "\n"; }, + [](std::exception_ptr ep) { /* handle error */ } +); +---- + +== The Single-Expression Idiom + +The `async_run` return value enforces a specific usage pattern: + +[source,cpp] +---- +// CORRECT: Single expression +async_run(ex)(make_task()); + +// INCORRECT: Split across statements +auto runner = async_run(ex); // Sets thread-local state +// ... other code may interfere ... +runner(make_task()); // Won't compile (deleted move) +---- + +This design ensures the frame allocator is active when your task is created, +enabling frame recycling optimization. + +== Custom Frame Allocators + +By default, `async_run` uses a recycling allocator that caches deallocated +frames. For custom allocation strategies: + +[source,cpp] +---- +my_pool_allocator alloc{pool}; +async_run(ex, alloc)(my_task()); +---- + +The allocator is used for all coroutine frames in the launched call tree. + +== When NOT to Use async_run + +Use `async_run` when: + +* You need to start a coroutine from non-coroutine code +* You want fire-and-forget semantics +* You need to receive the result via callback + +Do NOT use `async_run` when: + +* You are already inside a coroutine — just `co_await` the task directly +* You need the result synchronously — `async_run` is asynchronous + +== Summary + +[cols="1,3"] +|=== +| Pattern | Code + +| Fire and forget +| `async_run(ex)(task)` + +| Success handler +| `async_run(ex)(task, handler)` + +| Success + error handlers +| `async_run(ex)(task, on_success, on_error)` + +| Custom allocator +| `async_run(ex, alloc)(task)` +|=== + +== Next Steps + +* xref:affinity.adoc[Executor Affinity] — Control where coroutines execute +* xref:../execution/frame-allocation.adoc[Frame Allocation] — Optimize memory usage diff --git a/doc/modules/ROOT/pages/coroutines/tasks.adoc b/doc/modules/ROOT/pages/coroutines/tasks.adoc new file mode 100644 index 00000000..498aa4bb --- /dev/null +++ b/doc/modules/ROOT/pages/coroutines/tasks.adoc @@ -0,0 +1,200 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie.falco@gmail.com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + += Tasks + +This page explains how to write coroutine functions using `task`. + +NOTE: Code snippets assume `using namespace boost::capy;` is in effect. + +== What is a Task? + +A `task` represents an asynchronous operation that will produce a value +of type `T`. Tasks are _lazy_: they do not begin execution when created. +A task remains suspended until it is either awaited by another coroutine +or launched explicitly with `async_run`. + +This laziness enables structured composition. When you write: + +[source,cpp] +---- +task parent() +{ + co_await child(); // child runs here, not when created +} +---- + +The child coroutine starts exactly when the parent awaits it, making the +control flow predictable. + +== Creating Tasks + +Write a coroutine function by using `co_return` or `co_await`: + +[source,cpp] +---- +task compute() +{ + co_return 42; +} +---- + +The function `compute()` returns immediately with a suspended coroutine. +No code inside the function body executes until the task is started. + +=== Returning Values + +Use `co_return` to produce the task's result: + +[source,cpp] +---- +task greet(std::string name) +{ + co_return "Hello, " + name + "!"; +} +---- + +=== Void Tasks + +For operations that perform work without producing a value, use `task`: + +[source,cpp] +---- +task log_message(std::string msg) +{ + std::cout << msg << std::endl; + co_return; +} +---- + +The explicit `co_return;` statement completes the task. Reaching the end of +the function body has the same effect. + +== Awaiting Tasks + +Tasks can await other tasks using `co_await`. The calling coroutine suspends +until the awaited task completes: + +[source,cpp] +---- +task step_one() +{ + co_return 10; +} + +task step_two(int x) +{ + co_return x * 2; +} + +task pipeline() +{ + int a = co_await step_one(); + int b = co_await step_two(a); + co_return a + b; // 10 + 20 = 30 +} +---- + +Each `co_await` suspends the current coroutine, starts the child task, and +resumes when the child completes. The child's return value becomes the result +of the `co_await` expression. + +== Exception Handling + +Exceptions thrown within a task are captured and stored. When the task is +awaited, the exception is rethrown in the awaiting coroutine: + +[source,cpp] +---- +task might_fail() +{ + throw std::runtime_error("oops"); + co_return 0; // never reached +} + +task caller() +{ + try { + int x = co_await might_fail(); + } catch (std::exception const& e) { + std::cerr << "Caught: " << e.what() << "\n"; + } +} +---- + +This enables natural exception handling across coroutine boundaries. + +== Move-Only Semantics + +Tasks are move-only. You cannot copy a task: + +[source,cpp] +---- +task t = compute(); +task t2 = t; // ERROR: deleted copy constructor +task t3 = std::move(t); // OK: move is allowed +---- + +This reflects the fact that a coroutine has unique state that cannot be +duplicated. + +== Releasing the Handle + +In advanced scenarios, you may need direct access to the coroutine handle. +The `release()` method transfers ownership: + +[source,cpp] +---- +task t = compute(); +auto handle = t.release(); // t no longer owns the coroutine +// ... use handle directly ... +handle.destroy(); // caller is responsible for cleanup +---- + +WARNING: After calling `release()`, the task is empty and must not be awaited. + +== When NOT to Use Tasks + +Tasks are appropriate when: + +* The operation may suspend (performs I/O, awaits other tasks) +* You want structured composition with parent/child relationships +* You need lazy evaluation + +Tasks are NOT appropriate when: + +* The operation is purely synchronous — just use a regular function +* You need parallel execution — tasks are sequential; use parallel composition +* You need to detach and forget — tasks must be awaited or explicitly launched + +== Summary + +[cols="1,3"] +|=== +| Feature | Description + +| Lazy execution +| Tasks do not start until awaited or launched + +| Move-only +| Cannot copy, can move + +| Exception propagation +| Exceptions rethrow at the await point + +| Structured +| Parent awaits child, control flow is predictable +|=== + +== Next Steps + +Now that you understand tasks, learn how to run them: + +* xref:launching.adoc[Launching Tasks] — Start tasks with `async_run` +* xref:affinity.adoc[Executor Affinity] — Control where tasks execute diff --git a/doc/modules/ROOT/pages/execution.adoc b/doc/modules/ROOT/pages/execution.adoc deleted file mode 100644 index 787d0d78..00000000 --- a/doc/modules/ROOT/pages/execution.adoc +++ /dev/null @@ -1,314 +0,0 @@ -// -// Copyright (c) 2025 Vinnie Falco (vinnie.falco@gmail.com) -// -// Distributed under the Boost Software License, Version 1.0. (See accompanying -// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) -// -// Official repository: https://github.com/cppalliance/capy -// - -= Execution Model - -This page explains where your coroutines execute, how executor affinity -propagates through call chains, and how to control execution context. - -== Executors and Contexts - -An *executor* is to coroutines what an allocator is to memory. It -encapsulates the rules for where, when, and how a coroutine resumes. -An executor is a lightweight, copyable handle to an *execution context* -such as a thread pool, I/O context, or strand. - -When you launch a task with `async_run(ex)`, the executor `ex` determines: - -* Which thread runs the coroutine -* When resumption occurs (inline vs. queued) -* What ordering guarantees apply - -== Executor Operations - -Executors provide three fundamental operations for scheduling work: - -[cols="1,3"] -|=== -| Operation | Behavior - -| `dispatch` -| Run inline if the executor permits, otherwise queue. This is the -cheapest path and is used when crossing execution context boundaries. - -| `post` -| Always queue, never inline. Use when guaranteed asynchrony is required. - -| `defer` -| Always queue, but hints "this is my continuation." Enables thread-local -optimizations for same-context resumption. -|=== - -In a pure coroutine model, *symmetric transfer* handles most continuation -chaining directly—the compiler generates tail calls between frames with -zero executor involvement. The executor operations become relevant when -crossing context boundaries or enforcing ordering constraints. - -== Flow Diagrams - -To reason about where code executes, we use a compact notation called -*flow diagrams*. These diagrams show the call chain of coroutines and -I/O operations: - -[cols="1,3"] -|=== -| Symbol | Meaning - -| `c`, `c1`, `c2` -| Coroutines (lazy tasks) - -| `io` -| I/O operation on an `io_object` - -| `->` -| `co_await` leading to a coroutine or I/O - -| `!` -| Coroutine with explicit executor affinity - -| `ex`, `ex1`, `ex2` -| Executors -|=== - -=== Simple Chain - -The diagram: - ----- -c -> io ----- - -represents: - -[source,cpp] ----- -task c(io_object& io) -{ - co_await io.async_read(); -} ----- - -The coroutine `c` awaits an I/O operation. When the I/O completes, -`c` resumes. - -=== Nested Coroutines - -The diagram: - ----- -c1 -> c2 -> io ----- - -represents: - -[source,cpp] ----- -task c1(io_object& io) -{ - co_await c2(io); -} - -task c2(io_object& io) -{ - co_await io.async_read(); -} ----- - -When `c1` awaits `c2`, control transfers to `c2`. When the I/O completes, -`c2` resumes and completes, then `c1` resumes. - -== Executor Affinity - -*Affinity* means a coroutine is bound to a specific executor. When a -coroutine has affinity to executor `ex`, all of its resumptions occur -through `ex`. - -In flow diagrams, a `!` prefix indicates explicit affinity: - ----- -!c1 -> io ----- - -This means `c1` has affinity to some executor `ex`. When the I/O -completes, `c1` is resumed through `ex`. - -You establish affinity when launching a task: - -[source,cpp] ----- -async_run(ex)(my_task()); // my_task has affinity to ex ----- - -=== Affinity Propagation - -Affinity propagates forward through `co_await` chains. When a coroutine -with affinity awaits a child task, the child inherits the same affinity: - ----- -!c1 -> c2 -> io ----- - -Here: - -* `c1` has explicit affinity to `ex` -* `c2` inherits affinity from `c1` -* The I/O captures `ex` and resumes through it -* When `c2` completes, `c1` resumes via symmetric transfer (same executor) - -The mechanism is the *affine awaitable protocol*: each `co_await` passes -the current dispatcher to the awaited operation, which stores it and -uses it for resumption. - -=== Why Affinity Matters - -Affinity provides important guarantees: - -1. **Predictable execution context** — Your code always runs where you expect -2. **Thread safety** — No surprise thread hops mid-operation -3. **Strand compatibility** — Strands enforce ordering; affinity ensures - resumption goes through the strand - -Without affinity, an I/O completion might resume your coroutine on an -arbitrary I/O thread, requiring explicit synchronization. - -== Changing Affinity with run_on - -Sometimes you need a child coroutine to run on a _different_ executor. -The `run_on` function changes affinity for a subtree of the call chain: - -[source,cpp] ----- -#include - -using boost::capy::run_on; - -task parent() -{ - // This task runs on ex1 (inherited) - - // Run child on ex2 instead - co_await run_on(ex2, child_task()); - - // Back on ex1 after child completes -} ----- - -In flow diagram notation: - ----- -!c1 -> c2 -> !c3 -> io ----- - -This represents: - -[source,cpp] ----- -task c1(io_object& io) // affinity: ex1 -{ - co_await c2(io); -} - -task c2(io_object& io) // affinity: ex1 (inherited) -{ - co_await run_on(ex2, c3(io)); -} - -task c3(io_object& io) // affinity: ex2 (explicit) -{ - co_await io.async_read(); -} ----- - -The execution sequence: - -1. `c1` launches on `ex1` -2. `c2` continues on `ex1` (inherited) -3. `run_on` binds `c3` to `ex2` -4. I/O captures `ex2` -5. I/O completes → `c3` resumes through `ex2` -6. `c3` completes → `c2` resumes through `ex1` (caller's executor) -7. `c2` completes → `c1` resumes via symmetric transfer (same executor) - -== Symmetric Transfer - -When a child coroutine completes, it must resume its caller. If both -share the same executor, *symmetric transfer* provides a direct tail -call with zero overhead—no executor involvement, no queuing. - -The decision logic: - -1. **Same executor, no constraints** → symmetric transfer -2. **Different executors** → dispatch through caller's executor -3. **Same executor, strand required** → defer through executor - -Symmetric transfer is automatic. The library detects when caller and -callee share the same dispatcher (pointer equality) and optimizes -accordingly. - -== Execution Guarantees - -Capy's coroutine model provides these guarantees: - -[cols="1,3"] -|=== -| Guarantee | Description - -| Affinity preservation -| A coroutine with affinity always resumes through its executor - -| Forward progress -| `post` and `defer` never block; work is queued for later execution - -| Exception safety -| Exceptions propagate cleanly to the awaiting coroutine - -| Completion order -| For a single executor, work completes in submission order (FIFO) -|=== - -== Example: Multi-Context Pipeline - -This example shows a pipeline where different stages run on different -executors: - -[source,cpp] ----- -#include -#include - -// CPU-bound work runs on compute pool -task process(Data input) -{ - // Heavy computation here - co_return transform(input); -} - -// I/O runs on I/O context -task handle_request(io_context& ioc, thread_pool& pool) -{ - auto io_ex = ioc.get_executor(); - auto cpu_ex = pool.get_executor(); - - // Read request (I/O executor) - Data request = co_await read_request(); - - // Process on compute pool - Data response = co_await run_on(cpu_ex, process(request)); - - // Back on I/O executor, write response - co_await write_response(response); -} ----- - -== Next Steps - -You now understand where coroutines execute and how to control execution -context. The next page covers xref:advanced.adoc[advanced topics] including -custom frame allocators, stop token propagation, and implementing your -own affine-aware awaitables. diff --git a/doc/modules/ROOT/pages/execution/contexts.adoc b/doc/modules/ROOT/pages/execution/contexts.adoc new file mode 100644 index 00000000..e2f71e9d --- /dev/null +++ b/doc/modules/ROOT/pages/execution/contexts.adoc @@ -0,0 +1,249 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie.falco@gmail.com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + += Execution Contexts + +This page explains execution contexts, service management, and the thread pool. + +NOTE: Code snippets assume `using namespace boost::capy;` is in effect. + +== What is an Execution Context? + +An execution context is a place where work runs. It provides: + +* A registry of services (polymorphic components) +* An associated executor type +* Lifecycle management (shutdown, destroy) + +The `execution_context` class is the base class for all contexts: + +[source,cpp] +---- +class io_context : public execution_context +{ +public: + using executor_type = /* ... */; + + executor_type get_executor(); + + ~io_context() + { + shutdown(); + destroy(); + } +}; +---- + +== Service Management + +Services are polymorphic components owned by an execution context. Each service +type can be registered at most once. + +=== Creating Services + +[source,cpp] +---- +// Get or create a service +my_service& svc = ctx.use_service(); + +// Explicitly create with arguments +my_service& svc = ctx.make_service(arg1, arg2); + +// Check if a service exists +if (ctx.has_service()) + // ... + +// Find without creating +my_service* svc = ctx.find_service(); // nullptr if not found +---- + +=== Implementing Services + +Services derive from `execution_context::service`: + +[source,cpp] +---- +struct my_service : execution_context::service +{ + explicit my_service(execution_context& ctx) + { + // Initialize... + } + +protected: + void shutdown() override + { + // Cancel pending operations + // Release resources + // Must not block or throw + } +}; +---- + +The `shutdown()` method is called when the context is destroyed, in reverse +order of service creation. + +=== Key Type Aliasing + +Services can specify a `key_type` to enable base-class lookup: + +[source,cpp] +---- +struct file_service : execution_context::service +{ +protected: + void shutdown() override {} +}; + +struct posix_file_service : file_service +{ + using key_type = file_service; // Register under base class + + explicit posix_file_service(execution_context& ctx) {} +}; + +// Usage: +ctx.make_service(); +file_service* svc = ctx.find_service(); // Returns posix_file_service* +---- + +== Handler Queue + +The `execution_context::queue` class stores completion handlers: + +[source,cpp] +---- +class queue +{ +public: + bool empty() const noexcept; + void push(handler* h) noexcept; + void push(queue& other) noexcept; // Splice + handler* pop() noexcept; +}; +---- + +Handlers implement the ownership contract: + +* Call `operator()` for normal invocation (handler cleans itself up) +* Call `destroy()` to discard without invoking (e.g., during shutdown) +* Never call both, and never use `delete` directly + +== Thread Pool + +The `thread_pool` class provides a pool of worker threads: + +[source,cpp] +---- +thread_pool pool(4); // 4 worker threads +auto ex = pool.get_executor(); + +async_run(ex)(my_task()); + +// ... work runs on pool threads ... +---- + +=== Construction + +[source,cpp] +---- +thread_pool(); // Default: hardware_concurrency threads +thread_pool(std::size_t n); // Explicit thread count +---- + +=== Destruction + +The destructor signals all threads to stop and waits for them to complete. +Pending work is discarded. + +== Lifecycle Pattern + +Derived contexts must follow this destruction pattern: + +[source,cpp] +---- +class my_context : public execution_context +{ +public: + ~my_context() + { + shutdown(); // Notify services, cancel pending work + destroy(); // Delete services in reverse order + // Now safe to destroy members + } +}; +---- + +Calling `shutdown()` and `destroy()` from the base class destructor is too +late—derived class members may already be destroyed. + +== The is_execution_context Concept + +Types satisfying `is_execution_context` can be used with framework components: + +[source,cpp] +---- +template +concept is_execution_context = + std::derived_from && + requires { typename X::executor_type; } && + executor && + requires(X& x) { + { x.get_executor() } -> std::same_as; + }; +---- + +== Thread Safety + +Service management functions (`use_service`, `make_service`, `find_service`) +are thread-safe. The `shutdown()` and `destroy()` functions are NOT thread-safe +and must only be called during destruction. + +== When NOT to Use execution_context Directly + +Use `execution_context` directly when: + +* Building a custom I/O context +* Implementing a new execution model +* Managing polymorphic services + +Do NOT use `execution_context` directly when: + +* You just need to run coroutines — use an existing context like Asio's +* You need a thread pool — use `thread_pool` directly + +== Summary + +[cols="1,3"] +|=== +| Component | Purpose + +| `execution_context` +| Base class providing service registry + +| `service` +| Polymorphic component owned by a context + +| `handler` +| Base class for completion callbacks + +| `queue` +| FIFO queue of handlers + +| `thread_pool` +| Multi-threaded execution context + +| `is_execution_context` +| Concept for valid execution contexts +|=== + +== Next Steps + +* xref:frame-allocation.adoc[Frame Allocation] — Optimize coroutine memory +* xref:../concepts/is_execution_context.adoc[is_execution_context] — Reference diff --git a/doc/modules/ROOT/pages/execution/executors.adoc b/doc/modules/ROOT/pages/execution/executors.adoc new file mode 100644 index 00000000..355b0d93 --- /dev/null +++ b/doc/modules/ROOT/pages/execution/executors.adoc @@ -0,0 +1,228 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie.falco@gmail.com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + += Executors + +This page explains the executor and dispatcher concepts that underpin Capy's +execution model. + +NOTE: Code snippets assume `using namespace boost::capy;` is in effect. + +== Two Concepts, Two Levels + +Capy distinguishes between two related but different concepts: + +[cols="1,2,2"] +|=== +| Concept | Purpose | Typical Use + +| `dispatcher` +| Schedule a coroutine handle for resumption +| Internal plumbing in awaitables + +| `executor` +| Full execution context interface +| User-facing API for work submission +|=== + +A dispatcher is simpler: just a callable that accepts a coroutine handle. An +executor adds work tracking, context access, and multiple submission methods. + +== The Dispatcher Concept + +A dispatcher is a callable that schedules coroutine resumption: + +[source,cpp] +---- +template +concept dispatcher = requires(D const& d, std::coroutine_handle

h) { + { d(h) } -> std::convertible_to; +}; +---- + +When invoked with a coroutine handle, the dispatcher: + +1. Schedules the handle for resumption (inline or queued) +2. Returns a handle suitable for symmetric transfer + +=== Example Dispatcher + +[source,cpp] +---- +struct inline_dispatcher +{ + coro operator()(coro h) const + { + return h; // Resume inline via symmetric transfer + } +}; + +struct queuing_dispatcher +{ + work_queue* queue_; + + coro operator()(coro h) const + { + queue_->push(h); + return std::noop_coroutine(); // Caller returns to event loop + } +}; +---- + +== The Executor Concept + +An executor provides the full interface for scheduling work: + +[source,cpp] +---- +template +concept executor = + std::copy_constructible && + std::equality_comparable && + requires(E const& ce, std::coroutine_handle<> h) { + { ce.context() } -> /* reference to execution context */; + { ce.on_work_started() } noexcept; + { ce.on_work_finished() } noexcept; + { ce.dispatch(h) } -> std::convertible_to>; + { ce.post(h) }; + { ce.defer(h) }; + }; +---- + +=== Scheduling Operations + +[cols="1,3"] +|=== +| Operation | Behavior + +| `dispatch(h)` +| Run inline if safe, otherwise queue. Cheapest path. + +| `post(h)` +| Always queue, never inline. Guaranteed asynchrony. + +| `defer(h)` +| Always queue with "this is my continuation" hint. Enables optimizations. +|=== + +**When to use each:** + +* `dispatch` — Default choice. Allows the executor to optimize. +* `post` — When you need guaranteed asynchrony (e.g., releasing a lock first). +* `defer` — When posting your own continuation (enables thread-local queuing). + +=== Work Tracking + +The `on_work_started()` and `on_work_finished()` calls track outstanding work. +This enables `run()` to know when to stop: + +[source,cpp] +---- +executor ex = ctx.get_executor(); + +ex.on_work_started(); // Increment work count +// ... submit work ... +ex.on_work_finished(); // Decrement work count +---- + +The `executor_work_guard` RAII wrapper simplifies this pattern. + +== Affine Awaitable Concept + +Awaitables that participate in affinity propagation implement `affine_awaitable`: + +[source,cpp] +---- +template +concept affine_awaitable = + dispatcher && + requires(A a, std::coroutine_handle

h, D const& d) { + a.await_suspend(h, d); + }; +---- + +The awaitable receives the dispatcher in `await_suspend` and uses it to +resume the caller when the operation completes. + +== Stoppable Awaitable Concept + +Awaitables with cancellation support implement `stoppable_awaitable`: + +[source,cpp] +---- +template +concept stoppable_awaitable = + affine_awaitable && + requires(A a, std::coroutine_handle

h, D const& d, std::stop_token t) { + a.await_suspend(h, d, t); + }; +---- + +Stoppable awaitables provide _both_ overloads of `await_suspend`. + +== Thread Safety + +Executors have specific thread safety guarantees: + +* Copy constructor, comparison, `context()` — always thread-safe +* `dispatch`, `post`, `defer` — thread-safe for concurrent calls +* `on_work_started`, `on_work_finished` — thread-safe, must not throw + +== Executor Validity + +An executor becomes invalid when its execution context shuts down: + +[source,cpp] +---- +io_context ctx; +auto ex = ctx.get_executor(); +ctx.stop(); // Begin shutdown + +// WARNING: Calling ex.dispatch() now is undefined behavior +---- + +The copy constructor and `context()` remain valid until the context is +destroyed, but work submission functions become undefined. + +== When NOT to Use Executors Directly + +Use executors directly when: + +* Implementing custom I/O operations +* Building framework-level abstractions +* Integrating with external event loops + +Do NOT use executors directly when: + +* Writing application code — use `async_run` and `task` instead +* You just need to run some code later — use the higher-level abstractions + +== Summary + +[cols="1,3"] +|=== +| Concept | Purpose + +| `dispatcher` +| Minimal interface for coroutine resumption + +| `executor` +| Full work submission with tracking + +| `affine_awaitable` +| Awaitable that accepts dispatcher for affinity + +| `stoppable_awaitable` +| Awaitable that also accepts stop token +|=== + +== Next Steps + +* xref:contexts.adoc[Execution Contexts] — Service management and thread pools +* xref:../concepts/executor.adoc[executor concept] — Reference documentation diff --git a/doc/modules/ROOT/pages/execution/frame-allocation.adoc b/doc/modules/ROOT/pages/execution/frame-allocation.adoc new file mode 100644 index 00000000..96ceb603 --- /dev/null +++ b/doc/modules/ROOT/pages/execution/frame-allocation.adoc @@ -0,0 +1,211 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie.falco@gmail.com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + += Frame Allocation + +This page explains coroutine frame allocation and how to optimize memory usage. + +NOTE: Code snippets assume `using namespace boost::capy;` is in effect. + +== What is a Coroutine Frame? + +Every coroutine requires memory for its _frame_—the compiler-generated structure +holding local variables, parameters, and suspension state. By default, frames +are allocated with `::operator new`. + +For high-frequency coroutine creation, custom allocators can significantly +reduce allocation overhead. + +== The frame_allocator Concept + +A type satisfying `frame_allocator` provides: + +[source,cpp] +---- +template +concept frame_allocator = + std::copy_constructible && + requires(A& a, void* p, std::size_t n) { + { a.allocate(n) } -> std::same_as; + { a.deallocate(p, n) }; + }; +---- + +Frame allocators must be cheaply copyable handles to an underlying memory +resource (e.g., a pointer to a pool). + +== Default Frame Allocator + +The `default_frame_allocator` passes through to global new/delete: + +[source,cpp] +---- +struct default_frame_allocator +{ + void* allocate(std::size_t n) + { + return ::operator new(n); + } + + void deallocate(void* p, std::size_t) + { + ::operator delete(p); + } +}; +---- + +== Recycling Frame Allocator + +By default, `async_run` uses a recycling frame allocator that caches deallocated +frames for reuse. This eliminates most allocation overhead for typical coroutine +patterns where frames are created and destroyed in LIFO order. + +The recycling allocator: + +* Maintains a thread-local free list +* Reuses frames of matching size +* Falls back to global new/delete for mismatched sizes + +== Custom Allocators with async_run + +Pass a custom allocator as the second argument to `async_run`: + +[source,cpp] +---- +my_pool_allocator alloc{pool}; + +async_run(ex, alloc)(my_task()); +---- + +The allocator is used for all coroutine frames in the launched call tree. + +== Implementing a Custom Allocator + +[source,cpp] +---- +class pool_frame_allocator +{ + memory_pool* pool_; + +public: + explicit pool_frame_allocator(memory_pool& pool) + : pool_(&pool) + { + } + + void* allocate(std::size_t n) + { + return pool_->allocate(n); + } + + void deallocate(void* p, std::size_t n) + { + pool_->deallocate(p, n); + } +}; + +static_assert(frame_allocator); +---- + +== Memory Layout + +Coroutine frames have this layout: + +---- +First frame: [coroutine frame | tagged_ptr | allocator_wrapper] +Child frames: [coroutine frame | ptr] +---- + +The pointer at the end of each frame enables correct deallocation regardless +of which allocator was active at allocation time. A tag bit distinguishes the +first frame (with embedded wrapper) from child frames. + +=== First Frame + +The first frame in a call tree (created by `async_run`) contains an embedded +`frame_allocator_wrapper` that holds a copy of the allocator. This ensures the +allocator outlives all frames that use it. + +=== Child Frames + +Child frames store only a pointer to the wrapper in the first frame. This +minimizes per-frame overhead while maintaining correct deallocation. + +== The frame_allocating_base Mixin + +Derive your promise type from `frame_allocating_base` to enable custom frame +allocation: + +[source,cpp] +---- +struct my_promise : frame_allocating_base +{ + // ... promise implementation ... +}; +---- + +This mixin provides `operator new` and `operator delete` that use the +thread-local allocator when available. + +== Thread-Local State + +The allocation mechanism uses thread-local storage: + +[source,cpp] +---- +// Set allocator for subsequent allocations +frame_allocating_base::set_frame_allocator(alloc); + +// Clear allocator (revert to global new) +frame_allocating_base::clear_frame_allocator(); + +// Get current allocator (may be nullptr) +auto* alloc = frame_allocating_base::get_frame_allocator(); +---- + +The `async_run` function manages this automatically—you rarely need to call +these directly. + +== When NOT to Use Custom Allocators + +Use custom allocators when: + +* Profiling shows allocation is a bottleneck +* You have a custom memory pool available +* You need deterministic allocation behavior + +Do NOT use custom allocators when: + +* The default recycling allocator is sufficient (it usually is) +* Your coroutines are long-lived (allocation amortizes over time) +* You're unsure — measure first, optimize second + +== Summary + +[cols="1,3"] +|=== +| Component | Purpose + +| `frame_allocator` +| Concept for custom allocators + +| `default_frame_allocator` +| Pass-through to global new/delete + +| Recycling allocator +| Default: caches deallocated frames + +| `frame_allocating_base` +| Promise mixin enabling custom allocation +|=== + +== Next Steps + +* xref:../utilities/containers.adoc[Containers] — Type-erased storage +* xref:../concepts/frame_allocator.adoc[frame_allocator] — Reference documentation diff --git a/doc/modules/ROOT/pages/index.adoc b/doc/modules/ROOT/pages/index.adoc index 855c0478..3ea3a5d9 100644 --- a/doc/modules/ROOT/pages/index.adoc +++ b/doc/modules/ROOT/pages/index.adoc @@ -1,6 +1,5 @@ // -// Copyright (c) 2023 Vinnie Falco (vinnie.falco@gmail.com) -// Copyright (c) 2024 Mohammad Nejati +// Copyright (c) 2025 Vinnie Falco (vinnie.falco@gmail.com) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) @@ -10,30 +9,90 @@ = Boost.Capy -Boost.Capy is a portable, low-level C++ library which provides -facilities for creating and accessing optional services during runtime. +Boost.Capy is a lightweight C++20 coroutine framework that provides lazy tasks +with automatic executor affinity propagation. -* Require only C++11 -* Works without exceptions -* Works without RTTI -* Fast compilation, few templates +== What This Library Does -== Embedded +Capy solves a specific problem: when you `co_await` a child coroutine, where +does it resume? Without affinity tracking, completions can arrive on arbitrary +threads, forcing you to add synchronization everywhere. -Boost.Capy works great on embedded devices. -It is designed to work without exceptions and RTTI if desired. +Capy provides: -== Tested Compilers +* **Lazy tasks** that do not start until awaited or explicitly launched +* **Automatic affinity propagation** through coroutine call chains +* **Zero-overhead dispatcher protocol** for custom awaitables +* **Frame allocation recycling** to minimize allocation overhead -Boost.Capy has been tested with the following compilers: +== What This Library Does Not Do -* clang: 3.8, 4, 5, 6, 7, 8, 9, 10, 11, 12 -* gcc: 4.8, 4.9, 5, 6, 7, 8, 9, 10, 11 -* msvc: 14.1, 14.2, 14.3 +Capy is not a general-purpose I/O framework. It does not include: -== Quality Assurance +* Event loops or I/O polling (use Asio, io_uring wrappers, etc.) +* Networking primitives (sockets, HTTP, etc.) +* The sender/receiver execution model (P2300) -The development infrastructure for the library includes these per-commit analyses: +Capy integrates with existing I/O frameworks by wrapping their completion +mechanisms in affine-aware awaitables. -* Coverage reports -* Compilation and tests on Drone.io and GitHub Actions +== Design Philosophy + +**Lazy by default.** Tasks suspend immediately on creation. This enables +structured composition where parent coroutines naturally await their children. +Eager execution is available through `async_run`. + +**Affinity through the protocol.** The dispatcher propagates through +`await_suspend` parameters, not through thread-local storage or global state. +This makes the data flow explicit and testable. + +**Type erasure at boundaries.** Tasks use type-erased dispatchers (`any_dispatcher`) +internally, paying the indirection cost once rather than templating everything. +For I/O-bound code, this cost is negligible. + +== Requirements + +* C++20 compiler with coroutine support +* Boost (for system::error_code, core::string_view) + +=== Tested Compilers + +* GCC 11+ +* Clang 14+ +* MSVC 19.29+ (Visual Studio 2019 16.10+) + +== Quick Example + +[source,cpp] +---- +#include +#include +#include + +using boost::capy::task; +using boost::capy::async_run; + +task compute() +{ + co_return 42; +} + +task run(auto executor) +{ + int result = co_await compute(); + std::cout << "Result: " << result << "\n"; +} + +int main() +{ + io_context ioc; + async_run(ioc.get_executor())(run(ioc.get_executor())); + ioc.run(); +} +---- + +== Next Steps + +* xref:quick-start.adoc[Quick Start] — Get a working program in 5 minutes +* xref:coroutines/tasks.adoc[Tasks] — Understand lazy coroutines +* xref:execution/executors.adoc[Executors] — Learn about the execution model diff --git a/doc/modules/ROOT/pages/quick-start.adoc b/doc/modules/ROOT/pages/quick-start.adoc new file mode 100644 index 00000000..e55b1a74 --- /dev/null +++ b/doc/modules/ROOT/pages/quick-start.adoc @@ -0,0 +1,122 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie.falco@gmail.com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + += Quick Start + +This page gets you from zero to a working coroutine program in five minutes. + +NOTE: Capy requires C++20 with coroutine support. + +== Minimal Example + +Create a file `hello_coro.cpp`: + +[source,cpp] +---- +#include +#include +#include +#include + +namespace capy = boost::capy; +namespace asio = boost::asio; + +// A coroutine that returns a value +capy::task answer() +{ + co_return 42; +} + +// A coroutine that awaits another coroutine +capy::task greet() +{ + int n = co_await answer(); + std::cout << "The answer is " << n << "\n"; +} + +int main() +{ + asio::io_context ioc; + + // Launch the coroutine on the io_context's executor + capy::async_run(ioc.get_executor())(greet()); + + // Run until all work completes + ioc.run(); +} +---- + +== Build and Run + +[source,bash] +---- +# With GCC +g++ -std=c++20 -o hello_coro hello_coro.cpp -lboost_system -pthread + +# Run +./hello_coro +---- + +Expected output: + +---- +The answer is 42 +---- + +== What Just Happened? + +1. `answer()` creates a suspended coroutine that will return 42 +2. `greet()` creates a suspended coroutine that will await `answer()` +3. `async_run(executor)(greet())` starts `greet()` on the io_context's executor +4. `greet()` runs until it hits `co_await answer()` +5. `answer()` runs and returns 42 +6. `greet()` resumes with the result and prints it +7. `greet()` completes, `ioc.run()` returns + +The key insight: both coroutines ran on the same executor because affinity +propagated automatically through the `co_await`. + +== Handling Results + +To receive a task's result outside a coroutine, provide a completion handler: + +[source,cpp] +---- +capy::async_run(executor)(answer(), [](int result) { + std::cout << "Got: " << result << "\n"; +}); +---- + +== Handling Errors + +Exceptions propagate through coroutine chains. To handle them at the top level: + +[source,cpp] +---- +capy::async_run(executor)(might_fail(), + [](int result) { + std::cout << "Success: " << result << "\n"; + }, + [](std::exception_ptr ep) { + try { + if (ep) std::rethrow_exception(ep); + } catch (std::exception const& e) { + std::cerr << "Error: " << e.what() << "\n"; + } + } +); +---- + +== Next Steps + +Now that you have a working program: + +* xref:coroutines/tasks.adoc[Tasks] — Learn how lazy tasks work +* xref:coroutines/launching.adoc[Launching Tasks] — Understand `async_run` in detail +* xref:coroutines/affinity.adoc[Executor Affinity] — Control where coroutines execute diff --git a/doc/modules/ROOT/pages/utilities/compression.adoc b/doc/modules/ROOT/pages/utilities/compression.adoc new file mode 100644 index 00000000..5cfae76b --- /dev/null +++ b/doc/modules/ROOT/pages/utilities/compression.adoc @@ -0,0 +1,261 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie.falco@gmail.com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + += Compression + +This page documents the Brotli and ZLib compression support in Capy. + +NOTE: Code snippets assume `using namespace boost::capy;` is in effect. + +== Overview + +Capy provides wrappers for two compression libraries: + +[cols="1,3"] +|=== +| Library | Use Case + +| **Brotli** +| High compression ratio, good for web content + +| **ZLib** +| Wide compatibility, DEFLATE/gzip/zlib formats +|=== + +Both are accessed through service objects installed in a `datastore`. + +== Brotli + +Brotli is a modern compression algorithm offering excellent compression ratios, +especially for text content. + +=== Setup + +[source,cpp] +---- +#include +#include + +datastore ctx; + +// Install encoder and decoder services +auto& encoder = brotli::install_encode_service(ctx); +auto& decoder = brotli::install_decode_service(ctx); +---- + +=== Encoding + +[source,cpp] +---- +std::vector input = get_data(); +std::vector output; + +// Compress +brotli::encode_result result = encoder.encode( + input.data(), input.size(), + output +); + +if (result.ec) + handle_error(result.ec); +---- + +=== Decoding + +[source,cpp] +---- +std::vector compressed = get_compressed(); +std::vector output; + +// Decompress +brotli::decode_result result = decoder.decode( + compressed.data(), compressed.size(), + output +); + +if (result.ec) + handle_error(result.ec); +---- + +=== Shared Dictionaries + +Brotli supports shared dictionaries for improved compression of similar content: + +[source,cpp] +---- +brotli::shared_dictionary dict = load_dictionary(); + +auto& encoder = brotli::install_encode_service(ctx, dict); +auto& decoder = brotli::install_decode_service(ctx, dict); +---- + +== ZLib + +ZLib implements the DEFLATE algorithm used in gzip, zlib, and raw formats. + +=== Setup + +[source,cpp] +---- +#include +#include + +datastore ctx; + +// Install deflate (compress) and inflate (decompress) services +auto& deflate_svc = zlib::install_deflate_service(ctx); +auto& inflate_svc = zlib::install_inflate_service(ctx); +---- + +=== Compression (Deflate) + +[source,cpp] +---- +std::vector input = get_data(); +std::vector output; + +// Compress with default settings +zlib::deflate_result result = deflate_svc.deflate( + input.data(), input.size(), + output +); + +if (result.ec) + handle_error(result.ec); +---- + +=== Decompression (Inflate) + +[source,cpp] +---- +std::vector compressed = get_compressed(); +std::vector output; + +// Decompress +zlib::inflate_result result = inflate_svc.inflate( + compressed.data(), compressed.size(), + output +); + +if (result.ec) + handle_error(result.ec); +---- + +=== Compression Options + +[source,cpp] +---- +// Compression level (0-9, higher = better compression, slower) +zlib::compression_level level = zlib::compression_level::best; + +// Compression strategy +zlib::compression_strategy strategy = zlib::compression_strategy::filtered; + +// Flush mode +zlib::flush flush = zlib::flush::sync_flush; +---- + +=== Format Selection + +The window bits parameter controls the format: + +[cols="1,2"] +|=== +| Window Bits | Format + +| 8-15 +| Raw DEFLATE + +| 16-31 (15 + 16) +| gzip + +| 32-47 (15 + 32) +| Auto-detect gzip or zlib +|=== + +== Error Handling + +Both libraries use error codes from their respective error categories: + +[source,cpp] +---- +if (result.ec == brotli::error::invalid_input) +{ + // Brotli-specific error +} + +if (result.ec == zlib::error::data_error) +{ + // ZLib-specific error +} + +// Generic error handling +if (result.ec) +{ + std::cerr << "Compression failed: " << result.ec.message() << "\n"; +} +---- + +== Streaming + +For large data that doesn't fit in memory, use streaming APIs: + +[source,cpp] +---- +zlib::stream stream; + +// Process in chunks +while (has_more_input()) +{ + auto chunk = get_next_chunk(); + auto result = stream.deflate_chunk( + chunk.data(), chunk.size(), + output_buffer, + zlib::flush::no_flush + ); + // Handle partial output... +} + +// Finish the stream +auto result = stream.deflate_finish(output_buffer); +---- + +== When to Use Each + +**Use Brotli when:** + +* Compression ratio is important +* Content is text-heavy (HTML, CSS, JS) +* Decompression speed is acceptable + +**Use ZLib when:** + +* Compatibility is important (gzip is universal) +* Fast decompression is needed +* Memory usage must be minimal + +== Summary + +[cols="1,1,3"] +|=== +| Library | Header | Purpose + +| Brotli +| `` +| High-ratio compression + +| ZLib +| `` +| DEFLATE/gzip/zlib compression +|=== + +== Next Steps + +* xref:containers.adoc[Containers] — Service container (`datastore`) +* xref:../index.adoc[Introduction] — Return to overview diff --git a/doc/modules/ROOT/pages/utilities/containers.adoc b/doc/modules/ROOT/pages/utilities/containers.adoc new file mode 100644 index 00000000..3e6ce919 --- /dev/null +++ b/doc/modules/ROOT/pages/utilities/containers.adoc @@ -0,0 +1,248 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie.falco@gmail.com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + += Containers + +This page documents the container and smart pointer utilities in Capy. + +NOTE: Code snippets assume `using namespace boost::capy;` is in effect. + +== polystore + +A container of type-erased objects indexed by their type: + +[source,cpp] +---- +polystore ps; + +// Insert and retrieve by type +A& a = ps.emplace(); +B& b = ps.insert(B{}); + +A& found = ps.get(); // Throws if not found +A* maybe = ps.find(); // Returns nullptr if not found +---- + +=== Key Type Aliasing + +Types can specify a nested `key_type` to register under a base class: + +[source,cpp] +---- +struct derived : base +{ + using key_type = base; +}; + +ps.emplace(); +base* b = ps.find(); // Returns derived* +---- + +=== Dependency Injection + +The `invoke` function calls a callable with arguments from the store: + +[source,cpp] +---- +ps.emplace(); +ps.emplace(); + +invoke(ps, [](database& db, logger& log) { + // Arguments are looked up by type +}); +---- + +== datastore + +A `polystore` with an explicit `clear()` method: + +[source,cpp] +---- +datastore ctx; +ctx.emplace(); +// ... use services ... +ctx.clear(); // Destroy all stored objects +---- + +Commonly used as a service container for compression services. + +== application + +A `polystore` with lifecycle management for application components: + +[source,cpp] +---- +application app; + +// Construct parts +app.emplace(); +app.emplace(); + +// Start all parts (calls start() on each) +app.start(); + +// ... run until shutdown ... + +// Stop all parts (calls stop() on each in reverse order) +app.stop(); + +// Wait for completion +app.join(); +---- + +Parts should implement `start()` and `stop()` methods. + +== intrusive_list + +A doubly-linked list where elements derive from `intrusive_list::node`: + +[source,cpp] +---- +struct my_item : intrusive_list::node +{ + int value; +}; + +intrusive_list list; +my_item item; + +list.push_back(&item); +my_item* front = list.pop_front(); +list.remove(&item); +---- + +=== When to Use + +* Elements are already allocated elsewhere +* Need O(1) removal from middle +* Cannot afford node allocation overhead + +== intrusive_queue + +A FIFO queue where elements derive from `intrusive_queue::node`: + +[source,cpp] +---- +struct work_item : intrusive_queue::node +{ + std::function fn; +}; + +intrusive_queue queue; +work_item item; + +queue.push(&item); +work_item* next = queue.pop(); +---- + +=== Operations + +[cols="1,3"] +|=== +| Operation | Description + +| `push(item*)` +| Add to back + +| `pop()` +| Remove from front (returns nullptr if empty) + +| `empty()` +| Check if queue is empty + +| `splice(other)` +| Move all items from another queue +|=== + +== small_unique_ptr + +A smart pointer with small buffer optimization: + +[source,cpp] +---- +// Uses SBO if Derived fits in 32-byte buffer +auto p = make_small_unique(constructor_args...); + +// Access like unique_ptr +p->method(); +Base& ref = *p; +---- + +=== SBO Requirements + +The small buffer path requires: + +* Object size ≤ buffer size +* Object alignment ≤ `alignof(std::max_align_t)` +* Type is nothrow move constructible + +Objects that don't meet these requirements are heap-allocated. + +=== Factory Function + +[source,cpp] +---- +template +small_unique_ptr make_small_unique(Args&&... args); +---- + +* `T` — Base type for the smart pointer +* `N` — Buffer size in bytes +* `U` — Concrete type to construct +* `Args` — Constructor arguments + +== embed + +A utility for embedding string literals: + +[source,cpp] +---- +embed text(R"( +Hello "world" +This has quotes and ) +)"); + +std::string_view sv = text; // Implicit conversion +---- + +The first character (typically newline) is removed, enabling clean formatting +in source code. + +== Summary + +[cols="1,3"] +|=== +| Class | Purpose + +| `polystore` +| Type-erased container indexed by type + +| `datastore` +| Polystore with clear() + +| `application` +| Lifecycle management for app components + +| `intrusive_list` +| Doubly-linked list without node allocation + +| `intrusive_queue` +| FIFO queue without node allocation + +| `small_unique_ptr` +| Unique pointer with small buffer optimization + +| `embed` +| String literal embedding helper +|=== + +== Next Steps + +* xref:file-io.adoc[File I/O] — Platform-independent file operations +* xref:compression.adoc[Compression] — Brotli and ZLib support diff --git a/doc/modules/ROOT/pages/utilities/file-io.adoc b/doc/modules/ROOT/pages/utilities/file-io.adoc new file mode 100644 index 00000000..48a7f0fe --- /dev/null +++ b/doc/modules/ROOT/pages/utilities/file-io.adoc @@ -0,0 +1,253 @@ +// +// Copyright (c) 2025 Vinnie Falco (vinnie.falco@gmail.com) +// +// Distributed under the Boost Software License, Version 1.0. (See accompanying +// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) +// +// Official repository: https://github.com/cppalliance/capy +// + += File I/O + +This page documents the file and path utilities in Capy. + +NOTE: Code snippets assume `using namespace boost::capy;` is in effect. + +== file + +A platform-independent file handle: + +[source,cpp] +---- +file f("data.txt", file_mode::read); + +// Read data +std::vector buf(1024); +std::size_t n = f.read(buf.data(), buf.size()); + +// Write data +f.write(data.data(), data.size()); + +// Query and seek +std::uint64_t sz = f.size(); +std::uint64_t pos = f.pos(); +f.seek(100); +---- + +=== File Modes + +[cols="1,3"] +|=== +| Mode | Description + +| `file_mode::read` +| Open for reading (must exist) + +| `file_mode::write` +| Create or truncate for writing + +| `file_mode::append` +| Open for appending (create if needed) + +| `file_mode::read_write` +| Open for both reading and writing +|=== + +=== Error Handling + +Two error handling styles are available: + +[source,cpp] +---- +// Exception style +try { + file f("data.txt", file_mode::read); + f.read(buf, n); +} catch (system_error const& e) { + // Handle error +} + +// Error code style +system::error_code ec; +f.open("data.txt", file_mode::read, ec); +if (ec) { + // Handle error +} +---- + +=== Platform Notes + +The file class uses the native API on each platform: + +* **Windows**: Win32 API (`CreateFile`, `ReadFile`, etc.) +* **POSIX**: POSIX API (`open`, `read`, etc.) +* **Fallback**: Standard C library (`fopen`, `fread`, etc.) + +== path + +An owning, mutable path string with UTF-8 encoding: + +[source,cpp] +---- +path p("C:/Users/data.txt"); + +// Decomposition +path_view dir = p.parent_path(); // "C:/Users" +path_view name = p.filename(); // "data.txt" +path_view stem = p.stem(); // "data" +path_view ext = p.extension(); // ".txt" + +// Modification +p /= "subdir"; // Append with separator +p.replace_extension(".json"); + +// Native conversion (Windows) +std::wstring native = p.native_wstring(); +---- + +=== Internal Format + +Paths use forward slashes internally, regardless of platform: + +[source,cpp] +---- +path p("C:\\Users\\data.txt"); // Input with backslashes +std::cout << p.string(); // "C:/Users/data.txt" +---- + +This enables cross-platform serialization. Native format conversion happens +at API boundaries. + +=== Validation + +Paths are validated at construction time: + +[source,cpp] +---- +// Throws system_error on invalid path +path p("\0invalid"); // Embedded null + +// Non-throwing alternative +auto result = try_parse_path(input); +if (result) + use(*result); +else + handle_error(result.error()); +---- + +=== Decomposition Reference + +[cols="1,2,2"] +|=== +| Method | Example Input | Result + +| `root_name()` +| `"C:/foo/bar"` +| `"C:"` + +| `root_directory()` +| `"C:/foo/bar"` +| `"/"` + +| `root_path()` +| `"C:/foo/bar"` +| `"C:/"` + +| `relative_path()` +| `"C:/foo/bar"` +| `"foo/bar"` + +| `parent_path()` +| `"C:/foo/bar"` +| `"C:/foo"` + +| `filename()` +| `"C:/foo/bar.txt"` +| `"bar.txt"` + +| `stem()` +| `"C:/foo/bar.txt"` +| `"bar"` + +| `extension()` +| `"C:/foo/bar.txt"` +| `".txt"` +|=== + +== path_view + +A non-owning reference to a valid path string: + +[source,cpp] +---- +void process(path_view p) +{ + // Decomposition works the same + path_view name = p.filename(); + + // Convert to owning path if needed + path owned(p); +} +---- + +Path views are validated at construction, just like paths. All decomposition +methods return path_view pointing into the original storage. + +== Path Generation + +[source,cpp] +---- +// Normalize: remove "." and "..", collapse separators +path normal = p.lexically_normal(); + +// Relative path from base to target +path rel = target.lexically_relative(base); + +// Same as relative, but returns target if not possible +path prox = target.lexically_proximate(base); +---- + +== Iteration + +Iterate over path components: + +[source,cpp] +---- +path p("C:/foo/bar"); +for (path_view component : p) +{ + // "C:", "/", "foo", "bar" +} + +// Or iterate as string_view +for (std::string_view segment : p.segments()) +{ + // Same components as string_view +} +---- + +== Summary + +[cols="1,3"] +|=== +| Class | Purpose + +| `file` +| Platform-independent file handle + +| `file_mode` +| File open mode enumeration + +| `path` +| Owning UTF-8 path string + +| `path_view` +| Non-owning path reference + +| `try_parse_path` +| Non-throwing path parsing +|=== + +== Next Steps + +* xref:compression.adoc[Compression] — Brotli and ZLib support