From ad3f9c2cb98ff9a4da157995b6f25a19e461ba55 Mon Sep 17 00:00:00 2001 From: "Jonathan \"Geenz\" Goodman" Date: Tue, 30 Dec 2025 19:10:45 -0500 Subject: [PATCH 1/6] Add code formatting, linting, and style configs Introduce .clang-format, .clang-tidy, .editorconfig, and .pre-commit-config.yaml for consistent code style and linting. Add linting integration to CMake and supporting scripts in tools/. Apply formatting and minor style cleanups across examples, source, and test files to match new standards. --- .clang-format | 33 + .clang-tidy | 39 + .editorconfig | 16 + .pre-commit-config.yaml | 17 + CMakeLists.txt | 18 +- Examples/CInteropExample.c | 18 +- Examples/ConcurrencyAPIExample.c | 95 +- Examples/CustomAppDelegateExample.cpp | 38 +- Examples/EntropyCppAppExample.cpp | 17 +- Examples/EntropyMainExample.c | 17 +- Examples/EntropyObjectExample.cpp | 22 +- Examples/EntropyObjectHandleExample.cpp | 52 +- Examples/VFSCAPIExample.c | 37 +- Examples/VFS_Basics.cpp | 16 +- Examples/VFS_CopyMove.cpp | 47 +- Examples/VFS_LinesAndBatch.cpp | 38 +- Examples/VFS_Metadata.cpp | 43 +- Examples/VFS_Streams.cpp | 16 +- Examples/VirtualFileStreamingExample.cpp | 14 +- Examples/VirtualFileSystemExample.cpp | 136 +- Examples/WorkContractExample.cpp | 10 +- Examples/WorkGraphCAPIExample.c | 87 +- Examples/WorkGraphYieldableExample.cpp | 264 ++- Tests/Concurrency/MainThreadWorkTests.cpp | 5 +- .../WorkContractGroupAccountingTests.cpp | 2 + Tests/Core/TimerTests.cpp | 59 +- Tests/MinimalGTestNoMain.cpp | 1 + Tests/TestHelpers/VFSTestHelpers.cpp | 4 +- Tests/TestHelpers/VFSTestHelpers.h | 39 +- Tests/TestWorkServiceSingleton.h | 21 +- .../VFSErrorMappingTests.cpp | 8 +- Tests/VirtualFileSystem/VFSSimpleTest.cpp | 9 +- Tests/VirtualFileSystem/VFSTests.cpp | 12 +- .../VFSTextFidelityTests.cpp | 23 +- cmake/EntropyLinting.cmake | 27 + include/entropy/entropy_concurrency_types.h | 57 +- include/entropy/entropy_directory_handle.h | 44 +- include/entropy/entropy_file_handle.h | 95 +- .../entropy/entropy_file_operation_handle.h | 59 +- include/entropy/entropy_vfs_types.h | 115 +- include/entropy/entropy_virtual_file_system.h | 36 +- include/entropy/entropy_work_contract_group.h | 106 +- .../entropy/entropy_work_contract_handle.h | 40 +- include/entropy/entropy_work_graph.h | 180 +- include/entropy/entropy_work_service.h | 84 +- include/entropy/entropy_write_batch.h | 81 +- src/Concurrency/AdaptiveRankingScheduler.cpp | 80 +- src/Concurrency/AdaptiveRankingScheduler.h | 142 +- src/Concurrency/DirectScheduler.h | 62 +- src/Concurrency/IConcurrencyProvider.h | 23 +- src/Concurrency/IWorkScheduler.h | 129 +- src/Concurrency/NodeScheduler.cpp | 111 +- src/Concurrency/NodeScheduler.h | 280 +-- src/Concurrency/NodeStateManager.cpp | 184 +- src/Concurrency/NodeStateManager.h | 90 +- src/Concurrency/RandomScheduler.cpp | 35 +- src/Concurrency/RandomScheduler.h | 80 +- src/Concurrency/RoundRobinScheduler.cpp | 34 +- src/Concurrency/RoundRobinScheduler.h | 74 +- src/Concurrency/SignalTree.h | 804 +++---- src/Concurrency/SpinningDirectScheduler.h | 74 +- src/Concurrency/WorkContractGroup.cpp | 1633 +++++++------- src/Concurrency/WorkContractGroup.h | 1262 +++++------ src/Concurrency/WorkContractHandle.cpp | 98 +- src/Concurrency/WorkContractHandle.h | 314 +-- src/Concurrency/WorkGraph.cpp | 358 ++- src/Concurrency/WorkGraph.h | 1971 +++++++++-------- src/Concurrency/WorkGraphEvents.h | 276 +-- src/Concurrency/WorkGraphTypes.h | 791 +++---- src/Concurrency/WorkService.cpp | 612 +++-- src/Concurrency/WorkService.h | 132 +- src/Core/EntropyApplication.cpp | 78 +- src/Core/EntropyApplication.h | 47 +- src/Core/EntropyCAPI.cpp | 71 +- src/Core/EntropyClass.h | 27 +- src/Core/EntropyInterop.h | 17 +- src/Core/EntropyMain.cpp | 14 +- src/Core/EntropyObject.cpp | 71 +- src/Core/EntropyObject.h | 114 +- src/Core/EntropyService.h | 171 +- src/Core/EntropyServiceRegistry.cpp | 331 +-- src/Core/EntropyServiceRegistry.h | 245 +- src/Core/EventBus.h | 163 +- src/Core/HandleSlot.h | 38 +- src/Core/RefObject.h | 172 +- src/Core/SlotPool.h | 44 +- src/Core/Timer.cpp | 33 +- src/Core/Timer.h | 29 +- src/Core/TimerService.cpp | 43 +- src/Core/TimerService.h | 59 +- src/Core/entropy_c_api.h | 100 +- src/Core/entropy_main.h | 15 +- src/CoreCommon.h | 63 +- src/Debug/Debug.h | 449 ++-- src/Debug/DebugUtilities.h | 603 +++-- src/Debug/INamed.h | 228 +- src/EntropyCore.h | 32 +- src/Graph/AcyclicNodeHandle.h | 263 ++- src/Graph/DirectedAcyclicGraph.h | 1354 +++++------ src/Logging/CLogger.cpp | 35 +- src/Logging/CLogger.h | 51 +- src/Logging/ConsoleSink.cpp | 195 +- src/Logging/ConsoleSink.h | 175 +- src/Logging/ILogSink.h | 198 +- src/Logging/LogEntry.h | 150 +- src/Logging/LogLevel.h | 229 +- src/Logging/Logger.cpp | 123 +- src/Logging/Logger.h | 674 +++--- src/TypeSystem/GenericHandle.h | 555 ++--- src/TypeSystem/Reflection.h | 763 +++---- src/TypeSystem/TypeID.h | 331 +-- src/VirtualFileSystem/DirectoryHandle.cpp | 16 +- src/VirtualFileSystem/DirectoryHandle.h | 61 +- src/VirtualFileSystem/FileHandle.cpp | 379 ++-- src/VirtualFileSystem/FileHandle.h | 110 +- src/VirtualFileSystem/FileOperationHandle.cpp | 22 +- src/VirtualFileSystem/FileOperationHandle.h | 73 +- src/VirtualFileSystem/FileStream.cpp | 57 +- src/VirtualFileSystem/FileStream.h | 49 +- src/VirtualFileSystem/FileWatch.cpp | 22 +- src/VirtualFileSystem/FileWatch.h | 81 +- src/VirtualFileSystem/FileWatchManager.cpp | 76 +- src/VirtualFileSystem/FileWatchManager.h | 51 +- src/VirtualFileSystem/IFileSystemBackend.h | 182 +- .../LocalFileSystemBackend.cpp | 931 ++++---- .../LocalFileSystemBackend.h | 49 +- src/VirtualFileSystem/VirtualFileSystem.cpp | 92 +- src/VirtualFileSystem/VirtualFileSystem.h | 127 +- src/VirtualFileSystem/WriteBatch.cpp | 258 ++- src/VirtualFileSystem/WriteBatch.h | 83 +- src/entropy/entropy_concurrency_types_c.cpp | 52 +- src/entropy/entropy_directory_handle_c.cpp | 49 +- src/entropy/entropy_file_handle_c.cpp | 108 +- .../entropy_file_operation_handle_c.cpp | 146 +- src/entropy/entropy_vfs_types_c.cpp | 74 +- src/entropy/entropy_virtual_file_system_c.cpp | 58 +- src/entropy/entropy_work_contract_group_c.cpp | 135 +- .../entropy_work_contract_handle_c.cpp | 89 +- src/entropy/entropy_work_graph_c.cpp | 244 +- src/entropy/entropy_work_service_c.cpp | 104 +- src/entropy/entropy_write_batch_c.cpp | 86 +- tools/format_code.sh | 31 + tools/lint_code.sh | 51 + tools/setup_hooks.sh | 50 + 144 files changed, 12431 insertions(+), 11764 deletions(-) create mode 100644 .clang-format create mode 100644 .clang-tidy create mode 100644 .editorconfig create mode 100644 .pre-commit-config.yaml create mode 100644 cmake/EntropyLinting.cmake create mode 100755 tools/format_code.sh create mode 100755 tools/lint_code.sh create mode 100755 tools/setup_hooks.sh diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000..01846c7 --- /dev/null +++ b/.clang-format @@ -0,0 +1,33 @@ +--- +BasedOnStyle: Google +IndentWidth: 4 +ColumnLimit: 120 +AccessModifierOffset: -4 +InsertNewlineAtEOF: true + + +# Brace breaking: Stroustrup style (functions same line, control same, else cuddled) +BreakBeforeBraces: Custom +BraceWrapping: + AfterCaseLabel: true + AfterClass: true + AfterControlStatement: false # "cuddled" } else { + AfterEnum: true + AfterFunction: false # Same line + AfterNamespace: true + AfterObjCDeclaration: true + AfterStruct: true + AfterUnion: true + AfterExternBlock: false + BeforeCatch: false + BeforeElse: false # Cuddled + IndentBraces: false + SplitEmptyFunction: true + SplitEmptyRecord: true + SplitEmptyNamespace: true + +AllowShortFunctionsOnASingleLine: Empty +KeepEmptyLinesAtTheStartOfBlocks: false + +# Naming? Clang-format doesn't enforce naming, clang-tidy does. +# This config handles layout. diff --git a/.clang-tidy b/.clang-tidy new file mode 100644 index 0000000..c2910dc --- /dev/null +++ b/.clang-tidy @@ -0,0 +1,39 @@ +--- +Checks: ' + readability-identifier-naming, + readability-named-parameter, + bugprone-argument-comment, + performance-*, + bugprone-*, + modernize-use-override, + modernize-use-nullptr, + -bugprone-narrowing-conversions, + -bugprone-easily-swappable-parameters +' +WarningsAsErrors: '' +CheckOptions: + - key: readability-identifier-naming.VariableCase + value: camelBack + - key: readability-identifier-naming.GlobalVariableCase + value: camelBack + - key: readability-identifier-naming.PrivateMemberPrefix + value: _ + - key: readability-identifier-naming.PrivateMemberCase + value: camelBack + - key: readability-identifier-naming.ProtectedMemberPrefix + value: _ + - key: readability-identifier-naming.ProtectedMemberCase + value: camelBack + - key: readability-identifier-naming.PublicMemberCase + value: camelBack + - key: readability-identifier-naming.FunctionCase + value: camelBack + - key: readability-identifier-naming.ClassCase + value: CamelCase + - key: readability-identifier-naming.StructCase + value: CamelCase + - key: readability-identifier-naming.ParameterCase + value: camelBack + - key: readability-named-parameter.IgnoreFailedSplit + value: false +... diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000..950c529 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,16 @@ +root = true + +[*] +indent_style = space +indent_size = 4 +charset = utf-8 +trim_trailing_whitespace = true +insert_final_newline = true + +[*.{c,cpp,h,hpp,cc,mm}] +indent_style = space +indent_size = 4 + +[CMakeLists.txt] +indent_style = space +indent_size = 4 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..8acf699 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,17 @@ +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-yaml + - id: check-added-large-files + + - repo: local + hooks: + - id: clang-format + name: clang-format + entry: clang-format + language: system + files: \.(cpp|h|c|hpp|cc|mm|m)$ + args: ["-style=file:.clang-format", "-i"] diff --git a/CMakeLists.txt b/CMakeLists.txt index d2bd250..acb61d1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,6 +1,6 @@ cmake_minimum_required(VERSION 3.28) -project(EntropyCore +project(EntropyCore VERSION 1.0.0 DESCRIPTION "Core utilities and concurrency primitives for Entropy Engine" LANGUAGES C CXX @@ -35,7 +35,9 @@ set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) # Configure sanitizers -if(ENABLE_SANITIZERS) +# Configure sanitizers +# Always enable ASAN+UBSAN for Debug builds on minimal supported compilers +if(ENABLE_SANITIZERS OR (CMAKE_BUILD_TYPE MATCHES "Debug" AND CMAKE_CXX_COMPILER_ID MATCHES "Clang|GNU")) if(CMAKE_CXX_COMPILER_ID MATCHES "Clang|GNU") # ThreadSanitizer is mutually exclusive with AddressSanitizer # To use TSAN instead: cmake -DENABLE_TSAN=ON @@ -375,7 +377,7 @@ set_target_properties(EntropyCore PROPERTIES # Testing if(ENTROPY_BUILD_TESTS) enable_testing() - + # Test executable # Test helper sources set(TEST_HELPERS @@ -454,6 +456,14 @@ if(ENTROPY_BUILD_TESTS) GTest::gtest_main ) + # ============================================================================= + # Linting + # ============================================================================= + if(EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/cmake/EntropyLinting.cmake") + include("${CMAKE_CURRENT_SOURCE_DIR}/cmake/EntropyLinting.cmake") + entropy_enable_linting(EntropyCore) + endif() + target_compile_features(EntropyCoreTests PRIVATE cxx_std_20) # Enable testing and discover tests automatically @@ -502,4 +512,4 @@ install(FILES "${CMAKE_CURRENT_BINARY_DIR}/EntropyCoreConfig.cmake" "${CMAKE_CURRENT_BINARY_DIR}/EntropyCoreConfigVersion.cmake" DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/EntropyCore -) \ No newline at end of file +) diff --git a/Examples/CInteropExample.c b/Examples/CInteropExample.c index 21c08d7..ab447b6 100644 --- a/Examples/CInteropExample.c +++ b/Examples/CInteropExample.c @@ -1,22 +1,26 @@ -#include -#include #include #include +#include +#include // Dummy owner and vtable callbacks for demonstration static int dummy_validate(const void* owner, uint32_t index, uint32_t generation) { // No backing store; nothing is valid in this minimal example - (void)owner; (void)index; (void)generation; + (void)owner; + (void)index; + (void)generation; return 0; } static EntropyObjectRef* dummy_resolve(const void* owner, uint32_t index, uint32_t generation) { // No backing store; cannot resolve - (void)owner; (void)index; (void)generation; + (void)owner; + (void)index; + (void)generation; return NULL; } int main(void) { - uint32_t maj=0, min=0, pat=0, abi=0; + uint32_t maj = 0, min = 0, pat = 0, abi = 0; entropy_get_version(&maj, &min, &pat, &abi); ENTROPY_LOG_INFO_F("EntropyCore C API version: %u.%u.%u (ABI %u)", maj, min, pat, abi); @@ -25,7 +29,7 @@ int main(void) { entropy_register_owner_vtable(dummy_owner, dummy_resolve, dummy_validate); // Construct a handle value (no real object behind it in this example) - EntropyHandle h = { dummy_owner, 42u, 7u, 0u }; + EntropyHandle h = {dummy_owner, 42u, 7u, 0u}; // Basic handle operations ENTROPY_LOG_INFO_F("Handle valid? %d", (int)entropy_handle_is_valid(h)); @@ -44,7 +48,7 @@ int main(void) { // Allocation helpers char* buf = (char*)entropy_alloc(32); if (buf) { - for (int i=0;i<31;i++) buf[i] = (i%10)+'0'; + for (int i = 0; i < 31; i++) buf[i] = (i % 10) + '0'; buf[31] = '\0'; ENTROPY_LOG_INFO_F("Allocated buffer: %s", buf); entropy_free(buf); diff --git a/Examples/ConcurrencyAPIExample.c b/Examples/ConcurrencyAPIExample.c index fa163e5..e0ccc5a 100644 --- a/Examples/ConcurrencyAPIExample.c +++ b/Examples/ConcurrencyAPIExample.c @@ -15,11 +15,11 @@ * and execute them using a work service (thread pool), along with the C logging API. */ +#include #include #include #include #include -#include #include #include @@ -31,13 +31,15 @@ #endif // Example work context -typedef struct { +typedef struct +{ int task_id; const char* message; } TaskContext; // Re-entrant work context (task spawns children) -typedef struct { +typedef struct +{ entropy_WorkContractGroup group; // Need group reference to schedule from within int depth; // Recursion depth int max_depth; // Maximum depth @@ -47,32 +49,25 @@ typedef struct { // Example work callback void example_work(void* user_data) { TaskContext* ctx = (TaskContext*)user_data; - ENTROPY_LOG_INFO_CAT_F("WorkerThread", - "Task %d: %s (executing on worker thread)", - ctx->task_id, ctx->message); + ENTROPY_LOG_INFO_CAT_F("WorkerThread", "Task %d: %s (executing on worker thread)", ctx->task_id, ctx->message); // Simulate some work - usleep(10000); // 10ms + usleep(10000); // 10ms - ENTROPY_LOG_DEBUG_CAT_F("WorkerThread", - "Task %d completed", ctx->task_id); + ENTROPY_LOG_DEBUG_CAT_F("WorkerThread", "Task %d completed", ctx->task_id); } // Main thread work callback void main_thread_work(void* user_data) { TaskContext* ctx = (TaskContext*)user_data; - ENTROPY_LOG_INFO_CAT_F("MainThread", - "Task %d: %s (executing on MAIN THREAD)", - ctx->task_id, ctx->message); + ENTROPY_LOG_INFO_CAT_F("MainThread", "Task %d: %s (executing on MAIN THREAD)", ctx->task_id, ctx->message); } // Re-entrant work callback (spawns child tasks) void reentrant_work(void* user_data) { ReentrantContext* ctx = (ReentrantContext*)user_data; - ENTROPY_LOG_INFO_CAT_F("Reentrant", - "Task %d at depth %d (max: %d)", - ctx->task_id, ctx->depth, ctx->max_depth); + ENTROPY_LOG_INFO_CAT_F("Reentrant", "Task %d at depth %d (max: %d)", ctx->task_id, ctx->depth, ctx->max_depth); // If we haven't reached max depth, spawn two child tasks if (ctx->depth < ctx->max_depth) { @@ -95,14 +90,12 @@ void reentrant_work(void* user_data) { right_child->max_depth = ctx->max_depth; right_child->task_id = ctx->task_id * 2 + 1; - ENTROPY_LOG_DEBUG_CAT_F("Reentrant", - "Task %d spawning children %d and %d", - ctx->task_id, left_child->task_id, right_child->task_id); + ENTROPY_LOG_DEBUG_CAT_F("Reentrant", "Task %d spawning children %d and %d", ctx->task_id, + left_child->task_id, right_child->task_id); // RE-ENTRANT SCHEDULING: Create and schedule child tasks from within this task entropy_WorkContractHandle left = entropy_work_contract_group_create_contract( - ctx->group, reentrant_work, left_child, ENTROPY_EXEC_ANY_THREAD, &status - ); + ctx->group, reentrant_work, left_child, ENTROPY_EXEC_ANY_THREAD, &status); if (status == ENTROPY_OK) { entropy_work_contract_schedule(left, &status); @@ -110,8 +103,7 @@ void reentrant_work(void* user_data) { entropy_work_contract_handle_destroy(left); entropy_WorkContractHandle right = entropy_work_contract_group_create_contract( - ctx->group, reentrant_work, right_child, ENTROPY_EXEC_ANY_THREAD, &status - ); + ctx->group, reentrant_work, right_child, ENTROPY_EXEC_ANY_THREAD, &status); if (status == ENTROPY_OK) { entropy_work_contract_schedule(right, &status); @@ -119,9 +111,7 @@ void reentrant_work(void* user_data) { entropy_work_contract_handle_destroy(right); } } else { - ENTROPY_LOG_DEBUG_CAT_F("Reentrant", - "Task %d reached max depth, completing as leaf", - ctx->task_id); + ENTROPY_LOG_DEBUG_CAT_F("Reentrant", "Task %d reached max depth, completing as leaf", ctx->task_id); } // Free our own context (we're done with it) @@ -136,12 +126,9 @@ int main(void) { // 1. Create a work contract group ENTROPY_LOG_INFO_CAT_F("Example", "Step 1: Creating work contract group (capacity: 1024)..."); - entropy_WorkContractGroup group = entropy_work_contract_group_create( - 1024, "ExampleGroup", &status - ); + entropy_WorkContractGroup group = entropy_work_contract_group_create(1024, "ExampleGroup", &status); if (status != ENTROPY_OK) { - ENTROPY_LOG_ERROR_CAT_F("Example", - "Failed to create group: %s", entropy_status_to_string(status)); + ENTROPY_LOG_ERROR_CAT_F("Example", "Failed to create group: %s", entropy_status_to_string(status)); return 1; } ENTROPY_LOG_INFO_CAT_F("Example", "Group created successfully"); @@ -154,20 +141,18 @@ int main(void) { entropy_WorkService service = entropy_work_service_create(&config, &status); if (status != ENTROPY_OK) { - ENTROPY_LOG_ERROR_CAT_F("Example", - "Failed to create service: %s", entropy_status_to_string(status)); + ENTROPY_LOG_ERROR_CAT_F("Example", "Failed to create service: %s", entropy_status_to_string(status)); entropy_work_contract_group_destroy(group); return 1; } ENTROPY_LOG_INFO_CAT_F("Example", "Service created with %zu threads", - entropy_work_service_get_thread_count(service)); + entropy_work_service_get_thread_count(service)); // 3. Register the group with the service ENTROPY_LOG_INFO_CAT_F("Example", "Step 3: Registering group with service..."); entropy_work_service_add_group(service, group, &status); if (status != ENTROPY_OK) { - ENTROPY_LOG_ERROR_CAT_F("Example", - "Failed to add group: %s", entropy_status_to_string(status)); + ENTROPY_LOG_ERROR_CAT_F("Example", "Failed to add group: %s", entropy_status_to_string(status)); entropy_work_service_destroy(service); entropy_work_contract_group_destroy(group); return 1; @@ -178,8 +163,7 @@ int main(void) { ENTROPY_LOG_INFO_CAT_F("Example", "Step 4: Starting work service..."); entropy_work_service_start(service, &status); if (status != ENTROPY_OK) { - ENTROPY_LOG_ERROR_CAT_F("Example", - "Failed to start service: %s", entropy_status_to_string(status)); + ENTROPY_LOG_ERROR_CAT_F("Example", "Failed to start service: %s", entropy_status_to_string(status)); entropy_work_service_destroy(service); entropy_work_contract_group_destroy(group); return 1; @@ -195,13 +179,12 @@ int main(void) { contexts[i].task_id = i; contexts[i].message = "Processing background data"; - handles[i] = entropy_work_contract_group_create_contract( - group, example_work, &contexts[i], ENTROPY_EXEC_ANY_THREAD, &status - ); + handles[i] = entropy_work_contract_group_create_contract(group, example_work, &contexts[i], + ENTROPY_EXEC_ANY_THREAD, &status); if (status != ENTROPY_OK) { - ENTROPY_LOG_WARNING_CAT_F("Example", - "Failed to create contract %d: %s", i, entropy_status_to_string(status)); + ENTROPY_LOG_WARNING_CAT_F("Example", "Failed to create contract %d: %s", i, + entropy_status_to_string(status)); continue; } @@ -221,10 +204,8 @@ int main(void) { main_contexts[i].task_id = 100 + i; main_contexts[i].message = "Updating UI"; - main_handles[i] = entropy_work_contract_group_create_contract( - group, main_thread_work, &main_contexts[i], - ENTROPY_EXEC_MAIN_THREAD, &status - ); + main_handles[i] = entropy_work_contract_group_create_contract(group, main_thread_work, &main_contexts[i], + ENTROPY_EXEC_MAIN_THREAD, &status); if (status == ENTROPY_OK) { entropy_work_contract_schedule(main_handles[i], &status); @@ -237,9 +218,8 @@ int main(void) { if (entropy_work_service_has_main_thread_work(service)) { EntropyMainThreadWorkResult result; entropy_work_service_execute_main_thread_work(service, 0, &result, &status); - ENTROPY_LOG_INFO_CAT_F("Example", - "Executed %zu contracts from %zu groups", - result.contracts_executed, result.groups_with_work); + ENTROPY_LOG_INFO_CAT_F("Example", "Executed %zu contracts from %zu groups", result.contracts_executed, + result.groups_with_work); } // Wait for initial work to complete before starting re-entrant example @@ -258,9 +238,8 @@ int main(void) { root->max_depth = 3; // Creates 2^3 - 1 = 7 tasks at depth 3, 15 total root->task_id = 1; // Root is task 1 - entropy_WorkContractHandle root_handle = entropy_work_contract_group_create_contract( - group, reentrant_work, root, ENTROPY_EXEC_ANY_THREAD, &status - ); + entropy_WorkContractHandle root_handle = + entropy_work_contract_group_create_contract(group, reentrant_work, root, ENTROPY_EXEC_ANY_THREAD, &status); if (status == ENTROPY_OK) { entropy_work_contract_schedule(root_handle, &status); @@ -278,14 +257,10 @@ int main(void) { // 9. Print statistics ENTROPY_LOG_INFO_CAT_F("Example", "Step 9: Final statistics:"); - ENTROPY_LOG_INFO_CAT_F("Example", " Capacity: %zu", - entropy_work_contract_group_capacity(group)); - ENTROPY_LOG_INFO_CAT_F("Example", " Active contracts: %zu", - entropy_work_contract_group_active_count(group)); - ENTROPY_LOG_INFO_CAT_F("Example", " Scheduled contracts: %zu", - entropy_work_contract_group_scheduled_count(group)); - ENTROPY_LOG_INFO_CAT_F("Example", " Executing contracts: %zu", - entropy_work_contract_group_executing_count(group)); + ENTROPY_LOG_INFO_CAT_F("Example", " Capacity: %zu", entropy_work_contract_group_capacity(group)); + ENTROPY_LOG_INFO_CAT_F("Example", " Active contracts: %zu", entropy_work_contract_group_active_count(group)); + ENTROPY_LOG_INFO_CAT_F("Example", " Scheduled contracts: %zu", entropy_work_contract_group_scheduled_count(group)); + ENTROPY_LOG_INFO_CAT_F("Example", " Executing contracts: %zu", entropy_work_contract_group_executing_count(group)); // 10. Cleanup ENTROPY_LOG_INFO_CAT_F("Example", "Step 10: Cleaning up..."); diff --git a/Examples/CustomAppDelegateExample.cpp b/Examples/CustomAppDelegateExample.cpp index 73546d3..4ec9619 100644 --- a/Examples/CustomAppDelegateExample.cpp +++ b/Examples/CustomAppDelegateExample.cpp @@ -7,10 +7,11 @@ * This file is part of the Entropy Core project. */ -#include -#include #include +#include +#include #include + #include #include @@ -26,7 +27,8 @@ using namespace EntropyEngine::Core::Concurrency; * 3. Coordinating between background work and main thread work * 4. Proper cleanup and termination */ -class CustomAppDelegate : public EntropyAppDelegate { +class CustomAppDelegate : public EntropyAppDelegate +{ std::shared_ptr workService_; std::unique_ptr backgroundGroup_; std::unique_ptr mainThreadGroup_; @@ -88,8 +90,8 @@ class CustomAppDelegate : public EntropyAppDelegate { std::this_thread::sleep_for(std::chrono::milliseconds(50)); int completed = ++backgroundTasksCompleted_; - ENTROPY_LOG_INFO(std::format(" [Background] Task {} completed ({}/{})", - i, completed, TOTAL_BACKGROUND_TASKS)); + ENTROPY_LOG_INFO( + std::format(" [Background] Task {} completed ({}/{})", i, completed, TOTAL_BACKGROUND_TASKS)); // When background work completes, schedule a main thread task scheduleMainThreadTask(i); @@ -101,8 +103,8 @@ class CustomAppDelegate : public EntropyAppDelegate { } allWorkScheduled_.store(true, std::memory_order_release); - ENTROPY_LOG_INFO(std::format("[CustomAppDelegateExample] Scheduled {} background tasks", - TOTAL_BACKGROUND_TASKS)); + ENTROPY_LOG_INFO( + std::format("[CustomAppDelegateExample] Scheduled {} background tasks", TOTAL_BACKGROUND_TASKS)); } void scheduleMainThreadTask(int taskId) { @@ -111,8 +113,8 @@ class CustomAppDelegate : public EntropyAppDelegate { [this, taskId]() noexcept { // This executes on the main thread via executeMainThreadWork() int completed = ++mainThreadTasksCompleted_; - ENTROPY_LOG_INFO(std::format(" [MainThread] Task {} completed ({}/{})", - taskId, completed, TOTAL_MAIN_THREAD_TASKS)); + ENTROPY_LOG_INFO(std::format(" [MainThread] Task {} completed ({}/{})", taskId, completed, + TOTAL_MAIN_THREAD_TASKS)); // Check if all work is done checkCompletion(); @@ -144,13 +146,10 @@ class CustomAppDelegate : public EntropyAppDelegate { if (std::chrono::duration_cast(now - lastLog).count() >= 1) { if (allWorkScheduled_.load(std::memory_order_acquire)) { - ENTROPY_LOG_DEBUG(std::format( - "[CustomAppDelegateExample] applicationMainLoop - Background: {}/{}, MainThread: {}/{}", - backgroundTasksCompleted_.load(), - TOTAL_BACKGROUND_TASKS, - mainThreadTasksCompleted_.load(), - TOTAL_BACKGROUND_TASKS - )); + ENTROPY_LOG_DEBUG( + std::format("[CustomAppDelegateExample] applicationMainLoop - Background: {}/{}, MainThread: {}/{}", + backgroundTasksCompleted_.load(), TOTAL_BACKGROUND_TASKS, + mainThreadTasksCompleted_.load(), TOTAL_BACKGROUND_TASKS)); } lastLog = now; } @@ -178,11 +177,8 @@ class CustomAppDelegate : public EntropyAppDelegate { backgroundGroup_.reset(); workService_.reset(); - ENTROPY_LOG_INFO(std::format( - "[CustomAppDelegateExample] Final stats - Background: {}, MainThread: {}", - backgroundTasksCompleted_.load(), - mainThreadTasksCompleted_.load() - )); + ENTROPY_LOG_INFO(std::format("[CustomAppDelegateExample] Final stats - Background: {}, MainThread: {}", + backgroundTasksCompleted_.load(), mainThreadTasksCompleted_.load())); } void applicationDidCatchUnhandledException(std::exception_ptr) override { diff --git a/Examples/EntropyCppAppExample.cpp b/Examples/EntropyCppAppExample.cpp index ba135ff..059054e 100644 --- a/Examples/EntropyCppAppExample.cpp +++ b/Examples/EntropyCppAppExample.cpp @@ -1,14 +1,16 @@ -#include -#include #include +#include +#include #include -#include + #include +#include using namespace EntropyEngine::Core; using namespace EntropyEngine::Core::Concurrency; -class MyDelegate : public EntropyAppDelegate { +class MyDelegate : public EntropyAppDelegate +{ std::shared_ptr work_; std::unique_ptr group_; std::atomic remaining_{0}; @@ -32,8 +34,7 @@ class MyDelegate : public EntropyAppDelegate { // Create a work group and register it with the work service group_ = std::make_unique(64, "ExampleGroup"); auto status = work_->addWorkContractGroup(group_.get()); - if (status != WorkService::GroupOperationStatus::Added && - status != WorkService::GroupOperationStatus::Exists) { + if (status != WorkService::GroupOperationStatus::Added && status != WorkService::GroupOperationStatus::Exists) { ENTROPY_LOG_ERROR("[EntropyCppAppExample] Failed to register WorkContractGroup"); EntropyApplication::shared().terminate(1); return; @@ -50,7 +51,7 @@ class MyDelegate : public EntropyAppDelegate { // If this was the last contract to finish, request app termination if (--remaining_ == 0) { ENTROPY_LOG_INFO("[EntropyCppAppExample] All work completed; requesting terminate"); - //EntropyApplication::shared().terminate(0); + // EntropyApplication::shared().terminate(0); } }); if (handle.valid()) { @@ -92,7 +93,7 @@ int main() { auto& app = EntropyApplication::shared(); EntropyApplicationConfig cfg; - cfg.workerThreads = 0; // auto + cfg.workerThreads = 0; // auto cfg.shutdownDeadline = std::chrono::milliseconds(3000); app.configure(cfg); diff --git a/Examples/EntropyMainExample.c b/Examples/EntropyMainExample.c index 14b3b50..80eb705 100644 --- a/Examples/EntropyMainExample.c +++ b/Examples/EntropyMainExample.c @@ -2,36 +2,41 @@ #include static void will_finish(EntropyApp* app, void* userdata) { - (void)app; (void)userdata; + (void)app; + (void)userdata; ENTROPY_LOG_INFO_F("[EntropyMainExample] will_finish_launching"); } static void did_finish(EntropyApp* app, void* userdata) { - (void)app; (void)userdata; + (void)app; + (void)userdata; ENTROPY_LOG_INFO_F("[EntropyMainExample] did_finish_launching"); // Request termination directly; no main-thread posting API entropy_main_terminate(0); } static bool should_terminate(EntropyApp* app, void* userdata) { - (void)app; (void)userdata; + (void)app; + (void)userdata; // Allow termination when requested return true; } static void will_terminate(EntropyApp* app, void* userdata) { - (void)app; (void)userdata; + (void)app; + (void)userdata; ENTROPY_LOG_INFO_F("[EntropyMainExample] will_terminate"); } static void did_catch(EntropyApp* app, void* userdata) { - (void)app; (void)userdata; + (void)app; + (void)userdata; ENTROPY_LOG_WARNING_F("[EntropyMainExample] did_catch_unhandled_exception (if any)"); } int main(void) { EntropyMainConfig cfg = {0}; - cfg.worker_threads = 0; // auto + cfg.worker_threads = 0; // auto cfg.shutdown_deadline_ms = 3000; EntropyAppDelegateC del = {0}; diff --git a/Examples/EntropyObjectExample.cpp b/Examples/EntropyObjectExample.cpp index f67cbb8..388df8c 100644 --- a/Examples/EntropyObjectExample.cpp +++ b/Examples/EntropyObjectExample.cpp @@ -1,15 +1,17 @@ #define NOMINMAX #include + #include using namespace EntropyEngine::Core; using namespace EntropyEngine::Core::Logging; -class GameObject : public EntropyObject { +class GameObject : public EntropyObject +{ ENTROPY_CLASS_BODY(GameObject) - + std::string _name; - + public: explicit GameObject(std::string name) : _name(std::move(name)) {} @@ -17,7 +19,9 @@ class GameObject : public EntropyObject { return std::format("GameObject('{}')@{}", _name, static_cast(this)); } - const std::string& name() const { return _name; } + const std::string& name() const { + return _name; + } }; int main() { @@ -25,19 +29,19 @@ int main() { std::vector> objects; objects.emplace_back(makeRef("Player")); objects.emplace_back(makeRef("Enemy")); - + // Reference counted sharing auto shared = makeRef("PowerUp"); std::vector> references; references.push_back(shared); references.push_back(shared); - + ENTROPY_LOG_INFO_CAT("Example", std::format("Shared object refcount: {}", shared->refCount())); ENTROPY_LOG_INFO_CAT("Example", std::format("Object info: {}", shared->debugString())); - + // shared_ptr interop auto sp = toSharedPtr(shared); ENTROPY_LOG_INFO_CAT("Example", std::format("Can convert to shared_ptr: {}", sp != nullptr)); - + return 0; -} \ No newline at end of file +} diff --git a/Examples/EntropyObjectHandleExample.cpp b/Examples/EntropyObjectHandleExample.cpp index 85579af..0eaf154 100644 --- a/Examples/EntropyObjectHandleExample.cpp +++ b/Examples/EntropyObjectHandleExample.cpp @@ -1,42 +1,52 @@ #define NOMINMAX #include -#include + +#include #include #include -#include +#include using namespace EntropyEngine::Core; using namespace EntropyEngine::Core::Logging; using namespace std::chrono_literals; // A simple EntropyObject-derived type -class GameObject : public EntropyObject { +class GameObject : public EntropyObject +{ ENTROPY_CLASS_BODY(GameObject) std::string _name; + public: explicit GameObject(std::string name) : _name(std::move(name)) {} std::string toString() const override { if (hasHandle()) { - return std::format("GameObject('{}')#({:p}, idx={}, gen={})", _name, handleOwner(), handleIndex(), handleGeneration()); + return std::format("GameObject('{}')#({:p}, idx={}, gen={})", _name, handleOwner(), handleIndex(), + handleGeneration()); } else { return std::format("GameObject('{}')@{:p}", _name, static_cast(this)); } } - const std::string& name() const { return _name; } + const std::string& name() const { + return _name; + } }; // Tag type for typed handle -struct GameObjectTag {}; +struct GameObjectTag +{ +}; // A tiny owner/registry that stamps handle identity onto objects -class GameObjectPool { +class GameObjectPool +{ public: using Handle = TypeSystem::TypedHandle; private: - struct Slot { - RefObject obj; // owning reference - uint32_t generation = 1; // start at 1 so {0,0} remains invalid + struct Slot + { + RefObject obj; // owning reference + uint32_t generation = 1; // start at 1 so {0,0} remains invalid bool occupied = false; }; @@ -77,7 +87,7 @@ class GameObjectPool { // Resolve typed handle to a retained reference (copy of RefObject) RefObject resolve(const Handle& h) const noexcept { if (!validate(h)) return {}; - return _slots[h.getIndex()].obj; // copy retains + return _slots[h.getIndex()].obj; // copy retains } // Erase object by handle (clears identity, bumps generation) @@ -91,7 +101,7 @@ class GameObjectPool { s.occupied = false; // Bump generation so stale handles become invalid s.generation++; - if (s.generation == 0) s.generation = 1; // avoid 0 + if (s.generation == 0) s.generation = 1; // avoid 0 } // Convenience: make a handle from a live object that is currently stamped @@ -110,39 +120,43 @@ int main() { // Create a couple of objects auto player = makeRef("Player"); - auto enemy = makeRef("Enemy"); + auto enemy = makeRef("Enemy"); // Insert into the pool; this stamps handle identity on the objects auto hPlayer = pool.insert(player); - auto hEnemy = pool.insert(enemy); + auto hEnemy = pool.insert(enemy); ENTROPY_LOG_INFO_CAT("HandleExample", std::format("Player debug: {}", player->debugString())); ENTROPY_LOG_INFO_CAT("HandleExample", std::format("Enemy debug: {}", enemy->debugString())); // Resolve handles back to objects auto playerRef = pool.resolve(hPlayer); - auto enemyRef = pool.resolve(hEnemy); + auto enemyRef = pool.resolve(hEnemy); ENTROPY_LOG_INFO_CAT("HandleExample", std::format("Resolved Player valid: {}", static_cast(playerRef))); ENTROPY_LOG_INFO_CAT("HandleExample", std::format("Resolved Enemy valid: {}", static_cast(enemyRef))); // Demonstrate object -> handle via stamped identity if (auto objHandle = pool.toHandle(player)) { - ENTROPY_LOG_INFO_CAT("HandleExample", std::format("Object->Handle roundtrip: index={} gen={}", objHandle->getIndex(), objHandle->getGeneration())); + ENTROPY_LOG_INFO_CAT("HandleExample", std::format("Object->Handle roundtrip: index={} gen={}", + objHandle->getIndex(), objHandle->getGeneration())); } // Erase the enemy; its handle should become invalid after generation bump pool.erase(hEnemy); auto enemyAfterErase = pool.resolve(hEnemy); - ENTROPY_LOG_INFO_CAT("HandleExample", std::format("After erase, old enemy handle valid: {}", static_cast(enemyAfterErase))); + ENTROPY_LOG_INFO_CAT("HandleExample", + std::format("After erase, old enemy handle valid: {}", static_cast(enemyAfterErase))); // Re-insert a new object; likely reuses the same slot but with new generation auto newEnemy = makeRef("Enemy#2"); auto hEnemy2 = pool.insert(newEnemy); ENTROPY_LOG_INFO_CAT("HandleExample", std::format("New enemy debug: {}", newEnemy->debugString())); - ENTROPY_LOG_INFO_CAT("HandleExample", std::format("Old enemy handle index/gen = {}/{}; New handle index/gen = {}/{}", - hEnemy.getIndex(), hEnemy.getGeneration(), hEnemy2.getIndex(), hEnemy2.getGeneration())); + ENTROPY_LOG_INFO_CAT( + "HandleExample", + std::format("Old enemy handle index/gen = {}/{}; New handle index/gen = {}/{}", hEnemy.getIndex(), + hEnemy.getGeneration(), hEnemy2.getIndex(), hEnemy2.getGeneration())); // Small pause to make logs readable if needed std::this_thread::sleep_for(10ms); diff --git a/Examples/VFSCAPIExample.c b/Examples/VFSCAPIExample.c index 2e9eb6e..1465855 100644 --- a/Examples/VFSCAPIExample.c +++ b/Examples/VFSCAPIExample.c @@ -16,13 +16,13 @@ #include /* EntropyCore C API headers */ +#include "Logging/CLogger.h" +#include "entropy/entropy_directory_handle.h" +#include "entropy/entropy_file_handle.h" +#include "entropy/entropy_virtual_file_system.h" #include "entropy/entropy_work_contract_group.h" #include "entropy/entropy_work_service.h" -#include "entropy/entropy_virtual_file_system.h" -#include "entropy/entropy_file_handle.h" -#include "entropy/entropy_directory_handle.h" #include "entropy/entropy_write_batch.h" -#include "Logging/CLogger.h" /* ============================================================================ * Helper Functions @@ -78,8 +78,7 @@ static void example_basic_file_ops(entropy_VirtualFileSystem vfs) { EntropyFileOpStatus write_status = entropy_file_operation_handle_status(write_op, &status); if (write_status != ENTROPY_FILE_OP_COMPLETE) { - const EntropyFileErrorInfo* err = - entropy_file_operation_handle_error_info(write_op, &status); + const EntropyFileErrorInfo* err = entropy_file_operation_handle_error_info(write_op, &status); ENTROPY_LOG_ERROR_F("Write failed:"); print_file_error(err); } else { @@ -102,8 +101,7 @@ static void example_basic_file_ops(entropy_VirtualFileSystem vfs) { const char* content = entropy_file_operation_handle_contents_text(read_op, &status); ENTROPY_LOG_INFO_CAT_F("Example", "Read content:\n%s", content); } else { - const EntropyFileErrorInfo* err = - entropy_file_operation_handle_error_info(read_op, &status); + const EntropyFileErrorInfo* err = entropy_file_operation_handle_error_info(read_op, &status); ENTROPY_LOG_ERROR_F("Read failed:"); print_file_error(err); } @@ -141,8 +139,7 @@ static void example_directory_ops(entropy_VirtualFileSystem vfs) { /* Create directory */ ENTROPY_LOG_INFO_CAT_F("Example", "Creating directory: %s", test_dir); - entropy_FileOperationHandle create_op = - entropy_directory_handle_create(dh, ENTROPY_TRUE, &status); + entropy_FileOperationHandle create_op = entropy_directory_handle_create(dh, ENTROPY_TRUE, &status); check_status(status, "create directory"); entropy_file_operation_handle_wait(create_op, &status); @@ -185,8 +182,7 @@ static void example_directory_ops(entropy_VirtualFileSystem vfs) { list_opts.include_hidden = ENTROPY_FALSE; list_opts.sort_by = ENTROPY_SORT_BY_NAME; - entropy_FileOperationHandle list_op = - entropy_directory_handle_list(dh, &list_opts, &status); + entropy_FileOperationHandle list_op = entropy_directory_handle_list(dh, &list_opts, &status); check_status(status, "list directory"); entropy_file_operation_handle_wait(list_op, &status); @@ -199,10 +195,7 @@ static void example_directory_ops(entropy_VirtualFileSystem vfs) { ENTROPY_LOG_INFO_CAT_F("Example", "Found %zu entries:", count); for (size_t i = 0; i < count; i++) { - printf(" [%zu] %s (%llu bytes)\n", - i, - entries[i].name, - (unsigned long long)entries[i].metadata.size); + printf(" [%zu] %s (%llu bytes)\n", i, entries[i].name, (unsigned long long)entries[i].metadata.size); } } @@ -210,8 +203,7 @@ static void example_directory_ops(entropy_VirtualFileSystem vfs) { /* Remove directory recursively */ ENTROPY_LOG_INFO_CAT_F("Example", "Removing directory recursively..."); - entropy_FileOperationHandle remove_op = - entropy_directory_handle_remove(dh, ENTROPY_TRUE, &status); + entropy_FileOperationHandle remove_op = entropy_directory_handle_remove(dh, ENTROPY_TRUE, &status); check_status(status, "remove directory"); entropy_file_operation_handle_wait(remove_op, &status); @@ -246,8 +238,7 @@ static void example_write_batch(entropy_VirtualFileSystem vfs) { opts.create_if_missing = ENTROPY_TRUE; opts.truncate = ENTROPY_TRUE; - entropy_FileOperationHandle write_op = - entropy_file_handle_write_all_text_with_options(fh, initial, &opts, &status); + entropy_FileOperationHandle write_op = entropy_file_handle_write_all_text_with_options(fh, initial, &opts, &status); entropy_file_operation_handle_wait(write_op, &status); entropy_file_operation_handle_destroy(write_op); @@ -294,8 +285,7 @@ static void example_write_batch(entropy_VirtualFileSystem vfs) { if (entropy_file_operation_handle_status(commit_op, &status) == ENTROPY_FILE_OP_COMPLETE) { ENTROPY_LOG_INFO_CAT_F("Example", "Batch committed successfully"); } else { - const EntropyFileErrorInfo* err = - entropy_file_operation_handle_error_info(commit_op, &status); + const EntropyFileErrorInfo* err = entropy_file_operation_handle_error_info(commit_op, &status); ENTROPY_LOG_ERROR_F("Batch commit failed:"); print_file_error(err); } @@ -331,8 +321,7 @@ int main(void) { /* Create work contract group */ ENTROPY_LOG_INFO_F("Creating WorkContractGroup..."); - entropy_WorkContractGroup group = - entropy_work_contract_group_create(2048, "VFS_Example", &status); + entropy_WorkContractGroup group = entropy_work_contract_group_create(2048, "VFS_Example", &status); check_status(status, "create work contract group"); /* Create work service */ diff --git a/Examples/VFS_Basics.cpp b/Examples/VFS_Basics.cpp index a3762ca..a17fac6 100644 --- a/Examples/VFS_Basics.cpp +++ b/Examples/VFS_Basics.cpp @@ -1,11 +1,12 @@ -#include "EntropyCore.h" -#include "Concurrency/WorkService.h" -#include "Concurrency/WorkContractGroup.h" -#include "VirtualFileSystem/VirtualFileSystem.h" -#include "VirtualFileSystem/FileHandle.h" #include #include +#include "Concurrency/WorkContractGroup.h" +#include "Concurrency/WorkService.h" +#include "EntropyCore.h" +#include "VirtualFileSystem/FileHandle.h" +#include "VirtualFileSystem/VirtualFileSystem.h" + using namespace EntropyEngine::Core; using namespace EntropyEngine::Core::Concurrency; using namespace EntropyEngine::Core::IO; @@ -30,7 +31,10 @@ int main() { // Write text to a file via FileHandle const std::string text = "Hello, VirtualFileSystem!"; - WriteOptions wo; wo.truncate = true; wo.createIfMissing = true; wo.createParentDirs = true; + WriteOptions wo; + wo.truncate = true; + wo.createIfMissing = true; + wo.createParentDirs = true; auto w = fh.writeAll(text, wo); w.wait(); if (w.status() != FileOpStatus::Complete) { diff --git a/Examples/VFS_CopyMove.cpp b/Examples/VFS_CopyMove.cpp index 147f6c8..d812b6a 100644 --- a/Examples/VFS_CopyMove.cpp +++ b/Examples/VFS_CopyMove.cpp @@ -1,11 +1,12 @@ -#include "EntropyCore.h" -#include "Concurrency/WorkService.h" -#include "Concurrency/WorkContractGroup.h" -#include "VirtualFileSystem/VirtualFileSystem.h" -#include "VirtualFileSystem/FileHandle.h" #include #include +#include "Concurrency/WorkContractGroup.h" +#include "Concurrency/WorkService.h" +#include "EntropyCore.h" +#include "VirtualFileSystem/FileHandle.h" +#include "VirtualFileSystem/VirtualFileSystem.h" + using namespace EntropyEngine::Core; using namespace EntropyEngine::Core::Concurrency; using namespace EntropyEngine::Core::IO; @@ -28,32 +29,46 @@ int main() { // Seed source file via FileHandle const std::string payload = std::string(4096, 'A'); - WriteOptions wo; wo.truncate = true; wo.createIfMissing = true; - auto w = srcHandle.writeAll(payload, wo); w.wait(); + WriteOptions wo; + wo.truncate = true; + wo.createIfMissing = true; + auto w = srcHandle.writeAll(payload, wo); + w.wait(); if (w.status() != FileOpStatus::Complete) { ENTROPY_LOG_ERROR(std::string("Seed write failed: ") + w.errorInfo().message); - svc.stop(); return 1; + svc.stop(); + return 1; } // Copy using FileHandle read/write (simple demo) - auto r = srcHandle.readAll(); r.wait(); + auto r = srcHandle.readAll(); + r.wait(); if (r.status() != FileOpStatus::Complete) { ENTROPY_LOG_ERROR(std::string("Copy read failed: ") + r.errorInfo().message); - svc.stop(); return 1; + svc.stop(); + return 1; } - WriteOptions cwo; cwo.truncate = true; cwo.createIfMissing = true; - auto cw = dstHandle.writeAll(r.contentsBytes(), cwo); cw.wait(); + WriteOptions cwo; + cwo.truncate = true; + cwo.createIfMissing = true; + auto cw = dstHandle.writeAll(r.contentsBytes(), cwo); + cw.wait(); if (cw.status() != FileOpStatus::Complete) { ENTROPY_LOG_ERROR(std::string("Copy write failed: ") + cw.errorInfo().message); - svc.stop(); return 1; + svc.stop(); + return 1; } ENTROPY_LOG_INFO(std::string("Copy complete, bytes: ") + std::to_string(cw.bytesWritten())); // Move: write to destination (overwrite) then remove source - auto r2 = dstHandle.readAll(); r2.wait(); + auto r2 = dstHandle.readAll(); + r2.wait(); if (r2.status() == FileOpStatus::Complete) { - WriteOptions mwo; mwo.truncate = true; mwo.createIfMissing = true; - auto mw = dst2Handle.writeAll(r2.contentsBytes(), mwo); mw.wait(); + WriteOptions mwo; + mwo.truncate = true; + mwo.createIfMissing = true; + auto mw = dst2Handle.writeAll(r2.contentsBytes(), mwo); + mw.wait(); if (mw.status() == FileOpStatus::Complete) { dstHandle.remove().wait(); ENTROPY_LOG_INFO(std::string("Move complete: ") + dst2Handle.metadata().path); diff --git a/Examples/VFS_LinesAndBatch.cpp b/Examples/VFS_LinesAndBatch.cpp index 4f0169a..7af73d2 100644 --- a/Examples/VFS_LinesAndBatch.cpp +++ b/Examples/VFS_LinesAndBatch.cpp @@ -1,11 +1,12 @@ -#include "EntropyCore.h" -#include "Concurrency/WorkService.h" -#include "Concurrency/WorkContractGroup.h" -#include "VirtualFileSystem/VirtualFileSystem.h" -#include "VirtualFileSystem/FileHandle.h" #include #include +#include "Concurrency/WorkContractGroup.h" +#include "Concurrency/WorkService.h" +#include "EntropyCore.h" +#include "VirtualFileSystem/FileHandle.h" +#include "VirtualFileSystem/VirtualFileSystem.h" + using namespace EntropyEngine::Core; using namespace EntropyEngine::Core::Concurrency; using namespace EntropyEngine::Core::IO; @@ -25,8 +26,12 @@ int main() { auto fh = vfs.createFileHandle(makeTempFile("vfs_lines_batch")); // Seed file with two lines (no trailing newline on purpose) - WriteOptions wo; wo.truncate = true; wo.ensureFinalNewline = false; wo.createIfMissing = true; - auto w = fh.writeAll("Line one\nLine two", wo); w.wait(); + WriteOptions wo; + wo.truncate = true; + wo.ensureFinalNewline = false; + wo.createIfMissing = true; + auto w = fh.writeAll("Line one\nLine two", wo); + w.wait(); if (w.status() != FileOpStatus::Complete) { ENTROPY_LOG_ERROR(std::string("Initial write failed: ") + w.errorInfo().message); svc.stop(); @@ -34,11 +39,13 @@ int main() { } // Read the second line (index 1) - auto r2 = fh.readLine(1); r2.wait(); + auto r2 = fh.readLine(1); + r2.wait(); ENTROPY_LOG_INFO(std::string("Line 2: '") + r2.contentsText() + "'"); // Replace line 1 (index 0) using writeLine - auto wl = fh.writeLine(0, "LINE ONE (modified)"); wl.wait(); + auto wl = fh.writeLine(0, "LINE ONE (modified)"); + wl.wait(); if (wl.status() != FileOpStatus::Complete) { ENTROPY_LOG_ERROR(std::string("writeLine failed: ") + wl.errorInfo().message); svc.stop(); @@ -46,8 +53,11 @@ int main() { } // Ensure final newline on whole-file rewrite - WriteOptions wo2; wo2.truncate = true; wo2.ensureFinalNewline = true; - auto w2 = fh.writeAll("A single line without LF", wo2); w2.wait(); + WriteOptions wo2; + wo2.truncate = true; + wo2.ensureFinalNewline = true; + auto w2 = fh.writeAll("A single line without LF", wo2); + w2.wait(); if (w2.status() != FileOpStatus::Complete) { ENTROPY_LOG_ERROR(std::string("ensureFinalNewline write failed: ") + w2.errorInfo().message); svc.stop(); @@ -55,13 +65,15 @@ int main() { } // Verify last byte is a newline by reading all - auto rAll = fh.readAll(); rAll.wait(); + auto rAll = fh.readAll(); + rAll.wait(); auto bytes = rAll.contentsBytes(); bool endsWithLF = !bytes.empty() && bytes.back() == std::byte('\n'); ENTROPY_LOG_INFO(std::string("Final newline present: ") + (endsWithLF ? "true" : "false")); // Cleanup - auto rm = fh.remove(); rm.wait(); + auto rm = fh.remove(); + rm.wait(); svc.stop(); return 0; diff --git a/Examples/VFS_Metadata.cpp b/Examples/VFS_Metadata.cpp index 26b8b0e..7906d76 100644 --- a/Examples/VFS_Metadata.cpp +++ b/Examples/VFS_Metadata.cpp @@ -1,22 +1,26 @@ -#include "EntropyCore.h" -#include "Concurrency/WorkService.h" -#include "Concurrency/WorkContractGroup.h" -#include "VirtualFileSystem/VirtualFileSystem.h" -#include "VirtualFileSystem/FileHandle.h" -#include "VirtualFileSystem/DirectoryHandle.h" #include #include +#include "Concurrency/WorkContractGroup.h" +#include "Concurrency/WorkService.h" +#include "EntropyCore.h" +#include "VirtualFileSystem/DirectoryHandle.h" +#include "VirtualFileSystem/FileHandle.h" +#include "VirtualFileSystem/VirtualFileSystem.h" + using namespace EntropyEngine::Core; using namespace EntropyEngine::Core::Concurrency; using namespace EntropyEngine::Core::IO; -static std::string tp(const std::string& name) { return (std::filesystem::temp_directory_path() / name).string(); } +static std::string tp(const std::string& name) { + return (std::filesystem::temp_directory_path() / name).string(); +} int main() { WorkService svc({}); WorkContractGroup group(128, "VFS_Metadata"); - svc.start(); svc.addWorkContractGroup(&group); + svc.start(); + svc.addWorkContractGroup(&group); VirtualFileSystem vfs(&group); @@ -24,28 +28,37 @@ int main() { auto fh1 = vfs.createFileHandle(tp("vfs_meta_1.txt")); auto fh2 = vfs.createFileHandle(tp("vfs_meta_2.txt")); const std::string data = "metadata demo"; - WriteOptions wo; wo.truncate = true; wo.createIfMissing = true; + WriteOptions wo; + wo.truncate = true; + wo.createIfMissing = true; fh1.writeAll(data, wo).wait(); fh2.writeAll(data, wo).wait(); // Single file metadata via DirectoryHandle listing with glob auto tempDir = vfs.createDirectoryHandle(std::filesystem::temp_directory_path().string()); - ListDirectoryOptions single; single.globPattern = std::filesystem::path(fh1.metadata().path).filename().string(); - auto singleList = tempDir.list(single); singleList.wait(); + ListDirectoryOptions single; + single.globPattern = std::filesystem::path(fh1.metadata().path).filename().string(); + auto singleList = tempDir.list(single); + singleList.wait(); if (singleList.status() == FileOpStatus::Complete) { const auto& entries = singleList.directoryEntries(); if (!entries.empty()) { const auto& meta = entries.front().metadata; - ENTROPY_LOG_INFO(entries.front().fullPath + ": size=" + std::to_string(meta.size) + ", exists=" + std::to_string(meta.exists ? 1 : 0)); + ENTROPY_LOG_INFO(entries.front().fullPath + ": size=" + std::to_string(meta.size) + + ", exists=" + std::to_string(meta.exists ? 1 : 0)); } } // Batch-like metadata via listing of temp directory with glob - ListDirectoryOptions batch; batch.globPattern = std::string("vfs_meta_*.txt"); batch.sortBy = ListDirectoryOptions::ByName; - auto batchList = tempDir.list(batch); batchList.wait(); + ListDirectoryOptions batch; + batch.globPattern = std::string("vfs_meta_*.txt"); + batch.sortBy = ListDirectoryOptions::ByName; + auto batchList = tempDir.list(batch); + batchList.wait(); if (batchList.status() == FileOpStatus::Complete) { for (const auto& e : batchList.directoryEntries()) { - ENTROPY_LOG_INFO(e.fullPath + ": exists=" + std::to_string(e.metadata.exists ? 1 : 0) + ", size=" + std::to_string(e.metadata.size)); + ENTROPY_LOG_INFO(e.fullPath + ": exists=" + std::to_string(e.metadata.exists ? 1 : 0) + + ", size=" + std::to_string(e.metadata.size)); } } diff --git a/Examples/VFS_Streams.cpp b/Examples/VFS_Streams.cpp index ed19098..8c7fe16 100644 --- a/Examples/VFS_Streams.cpp +++ b/Examples/VFS_Streams.cpp @@ -1,12 +1,13 @@ -#include "EntropyCore.h" -#include "Concurrency/WorkService.h" +#include +#include +#include + #include "Concurrency/WorkContractGroup.h" -#include "VirtualFileSystem/VirtualFileSystem.h" +#include "Concurrency/WorkService.h" +#include "EntropyCore.h" #include "VirtualFileSystem/FileHandle.h" #include "VirtualFileSystem/FileStream.h" -#include -#include -#include +#include "VirtualFileSystem/VirtualFileSystem.h" using namespace EntropyEngine::Core; using namespace EntropyEngine::Core::Concurrency; @@ -44,7 +45,8 @@ int main() { BufferedFileStream buffered(std::move(base), 4096); const char* msgB = "Buffered block\n"; buffered.seek(0, std::ios::end); - auto wroteB = buffered.write(std::span(reinterpret_cast(msgB), strlen(msgB))); + auto wroteB = + buffered.write(std::span(reinterpret_cast(msgB), strlen(msgB))); buffered.flush(); ENTROPY_LOG_INFO(std::string("Buffered wrote: ") + std::to_string(wroteB.bytesTransferred) + " bytes"); diff --git a/Examples/VirtualFileStreamingExample.cpp b/Examples/VirtualFileStreamingExample.cpp index fd80eb5..e9e384b 100644 --- a/Examples/VirtualFileStreamingExample.cpp +++ b/Examples/VirtualFileStreamingExample.cpp @@ -1,11 +1,12 @@ -#include "EntropyCore.h" -#include "Concurrency/WorkService.h" +#include +#include + #include "Concurrency/WorkContractGroup.h" -#include "VirtualFileSystem/VirtualFileSystem.h" +#include "Concurrency/WorkService.h" +#include "EntropyCore.h" #include "VirtualFileSystem/DirectoryHandle.h" #include "VirtualFileSystem/FileStream.h" -#include -#include +#include "VirtualFileSystem/VirtualFileSystem.h" using namespace EntropyEngine::Core; using namespace EntropyEngine::Core::Concurrency; @@ -45,7 +46,8 @@ int main() { BufferedFileStream buffered(std::move(base), 4096); const char* msgB = "Buffered streaming block B\n"; buffered.seek(0, std::ios::end); - auto wroteB = buffered.write(std::span(reinterpret_cast(msgB), strlen(msgB))); + auto wroteB = + buffered.write(std::span(reinterpret_cast(msgB), strlen(msgB))); buffered.flush(); ENTROPY_LOG_INFO(std::string("Buffered: wrote ") + std::to_string(wroteB.bytesTransferred) + " bytes"); diff --git a/Examples/VirtualFileSystemExample.cpp b/Examples/VirtualFileSystemExample.cpp index 42174bb..6717074 100644 --- a/Examples/VirtualFileSystemExample.cpp +++ b/Examples/VirtualFileSystemExample.cpp @@ -1,14 +1,15 @@ -#include "EntropyCore.h" -#include "Concurrency/WorkService.h" +#include +#include +#include +#include + #include "Concurrency/WorkContractGroup.h" -#include "VirtualFileSystem/VirtualFileSystem.h" +#include "Concurrency/WorkService.h" +#include "EntropyCore.h" #include "VirtualFileSystem/DirectoryHandle.h" #include "VirtualFileSystem/FileStream.h" +#include "VirtualFileSystem/VirtualFileSystem.h" #include "VirtualFileSystem/WriteBatch.h" -#include -#include -#include -#include using namespace EntropyEngine::Core; using namespace EntropyEngine::Core::Concurrency; @@ -78,7 +79,8 @@ int main() { // Write range (append-like by offset) const char* tail = "\nAppended via writeRange."; - auto wr = handle.writeRange( rAll.contentsBytes().size(), std::span(reinterpret_cast(tail), strlen(tail)) ); + auto wr = handle.writeRange(rAll.contentsBytes().size(), + std::span(reinterpret_cast(tail), strlen(tail))); wr.wait(); if (wr.status() != FileOpStatus::Complete) { ENTROPY_LOG_ERROR("writeRange failed"); @@ -90,7 +92,7 @@ int main() { if (rLine.status() == FileOpStatus::Complete) { ENTROPY_LOG_INFO("readLine(1): '" + rLine.contentsText() + "'"); } - + // Test writeLine with atomic rename (improved performance) auto wLine = handle.writeLine(1, "Modified line two via atomic rename"); wLine.wait(); @@ -99,7 +101,7 @@ int main() { } else { ENTROPY_LOG_ERROR("writeLine failed: " + wLine.errorInfo().message); } - + // Test streaming API for large files ENTROPY_LOG_INFO("Testing streaming API:"); auto stream = handle.openReadWriteStream(); @@ -107,8 +109,8 @@ int main() { // Write some data using stream const char* streamData = "\nData written via streaming API"; stream->seek(0, std::ios::end); - auto writeResult = stream->write(std::span( - reinterpret_cast(streamData), strlen(streamData))); + auto writeResult = stream->write( + std::span(reinterpret_cast(streamData), strlen(streamData))); stream->flush(); ENTROPY_LOG_INFO("Wrote " + std::to_string(writeResult.bytesTransferred) + " bytes via stream"); @@ -118,7 +120,7 @@ int main() { auto readResult = stream->read(buffer); ENTROPY_LOG_INFO("Read " + std::to_string(readResult.bytesTransferred) + " bytes via stream"); } - + // Test multiple concurrent operations to verify thread safety ENTROPY_LOG_INFO("Testing concurrent operations:"); std::vector concurrentOps; @@ -135,7 +137,7 @@ int main() { } } ENTROPY_LOG_INFO("All concurrent operations completed"); - + // Demonstrate concurrency: many file operations writing to the SAME file ENTROPY_LOG_INFO("1000 file operations writing to the same file (serialized by VFS lock):"); const std::string sameFile = "vfs_contracts_same_file.txt"; @@ -149,8 +151,7 @@ int main() { auto fh = vfs.createFileHandle(sameFile); // Each operation writes its own line index. Lines are: // "Work contract N wrote!" - writeOps.push_back(fh.writeLine(static_cast(i), - "Work contract " + std::to_string(i + 1) + " wrote!")); + writeOps.push_back(fh.writeLine(static_cast(i), "Work contract " + std::to_string(i + 1) + " wrote!")); ENTROPY_LOG_INFO("Scheduled write " + std::to_string(i + 1)); } // Wait for all write operations to finish @@ -192,7 +193,8 @@ int main() { batchOp.wait(); if (batchOp.status() == FileOpStatus::Complete) { - ENTROPY_LOG_INFO("Batch write completed successfully, wrote " + std::to_string(batchOp.bytesWritten()) + " bytes"); + ENTROPY_LOG_INFO("Batch write completed successfully, wrote " + std::to_string(batchOp.bytesWritten()) + + " bytes"); // Read back the result auto batchRead = batchHandle.readAll(); @@ -221,7 +223,8 @@ int main() { op.wait(); } auto endIndividual = std::chrono::high_resolution_clock::now(); - auto individualDuration = std::chrono::duration_cast(endIndividual - startIndividual).count(); + auto individualDuration = + std::chrono::duration_cast(endIndividual - startIndividual).count(); // Benchmark: Batch operation auto bulkHandle = vfs.createFileHandle("bulk_update_test.txt"); @@ -240,7 +243,8 @@ int main() { ENTROPY_LOG_INFO("Individual writes (100 lines): " + std::to_string(individualDuration) + "ms"); ENTROPY_LOG_INFO("Batch write (100 lines): " + std::to_string(batchDuration) + "ms"); if (individualDuration > 0) { - float speedup = static_cast(individualDuration) / static_cast(batchDuration > 0 ? batchDuration : 1); + float speedup = + static_cast(individualDuration) / static_cast(batchDuration > 0 ? batchDuration : 1); ENTROPY_LOG_INFO("Batch is " + std::to_string(speedup) + "x faster than individual operations"); } @@ -267,13 +271,17 @@ int main() { // Query metadata via DirectoryHandle listing with glob (no raw backend) auto currentDir = vfs.createDirectoryHandle("."); - ListDirectoryOptions bList; bList.globPattern = std::string("metadata_test_*.txt"); bList.sortBy = ListDirectoryOptions::ByName; - auto metaList = currentDir.list(bList); metaList.wait(); + ListDirectoryOptions bList; + bList.globPattern = std::string("metadata_test_*.txt"); + bList.sortBy = ListDirectoryOptions::ByName; + auto metaList = currentDir.list(bList); + metaList.wait(); if (metaList.status() == FileOpStatus::Complete) { const auto& entries = metaList.directoryEntries(); ENTROPY_LOG_INFO("Retrieved metadata for " + std::to_string(entries.size()) + " files via directory listing"); for (const auto& e : entries) { - ENTROPY_LOG_INFO(" " + e.fullPath + ": exists=" + std::to_string(e.metadata.exists ? 1 : 0) + ", size=" + std::to_string(e.metadata.size) + " bytes"); + ENTROPY_LOG_INFO(" " + e.fullPath + ": exists=" + std::to_string(e.metadata.exists ? 1 : 0) + + ", size=" + std::to_string(e.metadata.size) + " bytes"); } } @@ -297,17 +305,21 @@ int main() { copyOpts.progressCallback = [](size_t copied, size_t total) { int percent = static_cast((copied * 100) / total); if (percent % 10 == 0 && copied > 0) { - ENTROPY_LOG_INFO(" Copy progress: " + std::to_string(percent) + "% (" + - std::to_string(copied) + "/" + std::to_string(total) + " bytes)"); + ENTROPY_LOG_INFO(" Copy progress: " + std::to_string(percent) + "% (" + std::to_string(copied) + "/" + + std::to_string(total) + " bytes)"); } return true; }; // Copy using FileHandle read/write (no raw backend) - auto srcRead = sourceHandle.readAll(); srcRead.wait(); + auto srcRead = sourceHandle.readAll(); + srcRead.wait(); if (srcRead.status() == FileOpStatus::Complete) { - WriteOptions cwo; cwo.truncate = true; cwo.createIfMissing = true; - auto copyWrite = destHandle.writeAll(srcRead.contentsBytes(), cwo); copyWrite.wait(); + WriteOptions cwo; + cwo.truncate = true; + cwo.createIfMissing = true; + auto copyWrite = destHandle.writeAll(srcRead.contentsBytes(), cwo); + copyWrite.wait(); if (copyWrite.status() == FileOpStatus::Complete) { ENTROPY_LOG_INFO("Copy completed: " + std::to_string(copyWrite.bytesWritten()) + " bytes copied"); } else { @@ -326,10 +338,14 @@ int main() { moveSourceHandle.writeAll("This file will be moved").wait(); // Move implemented as copy then remove via FileHandle operations - auto moveRead = moveSourceHandle.readAll(); moveRead.wait(); + auto moveRead = moveSourceHandle.readAll(); + moveRead.wait(); if (moveRead.status() == FileOpStatus::Complete) { - WriteOptions mow; mow.truncate = true; mow.createIfMissing = true; - auto moveWrite = moveDestHandle.writeAll(moveRead.contentsBytes(), mow); moveWrite.wait(); + WriteOptions mow; + mow.truncate = true; + mow.createIfMissing = true; + auto moveWrite = moveDestHandle.writeAll(moveRead.contentsBytes(), mow); + moveWrite.wait(); if (moveWrite.status() == FileOpStatus::Complete) { moveSourceHandle.remove().wait(); ENTROPY_LOG_INFO("Move completed successfully"); @@ -343,10 +359,14 @@ int main() { // Move with overwrite: write/truncate destination and remove source auto moveSource2 = vfs.createFileHandle("file_to_move2.txt"); moveSource2.writeAll("Second file to move").wait(); - auto moveRead2 = moveSource2.readAll(); moveRead2.wait(); + auto moveRead2 = moveSource2.readAll(); + moveRead2.wait(); if (moveRead2.status() == FileOpStatus::Complete) { - WriteOptions mow2; mow2.truncate = true; mow2.createIfMissing = true; - auto moveWrite2 = moveDestHandle.writeAll(moveRead2.contentsBytes(), mow2); moveWrite2.wait(); + WriteOptions mow2; + mow2.truncate = true; + mow2.createIfMissing = true; + auto moveWrite2 = moveDestHandle.writeAll(moveRead2.contentsBytes(), mow2); + moveWrite2.wait(); if (moveWrite2.status() == FileOpStatus::Complete) { moveSource2.remove().wait(); ENTROPY_LOG_INFO("Move with overwrite completed successfully"); @@ -372,26 +392,37 @@ int main() { // Set up watch options with filtering WatchOptions watchOpts; watchOpts.recursive = true; - watchOpts.includePatterns = {"*.txt", "*.log"}; // Only watch text and log files - watchOpts.excludePatterns = {"*.tmp", "*.temp"}; // Ignore temp files + watchOpts.includePatterns = {"*.txt", "*.log"}; // Only watch text and log files + watchOpts.excludePatterns = {"*.tmp", "*.temp"}; // Ignore temp files // Create a watch with a callback std::atomic eventCount{0}; - auto watch = vfs.watchDirectory("watch_test_dir", [&eventCount](const FileWatchInfo& info) { - eventCount++; - std::string eventType; - switch (info.event) { - case FileWatchEvent::Created: eventType = "Created"; break; - case FileWatchEvent::Modified: eventType = "Modified"; break; - case FileWatchEvent::Deleted: eventType = "Deleted"; break; - case FileWatchEvent::Renamed: eventType = "Renamed"; break; - } - - ENTROPY_LOG_INFO("File event: " + eventType + " - " + info.path); - if (info.oldPath.has_value()) { - ENTROPY_LOG_INFO(" Old path: " + info.oldPath.value()); - } - }, watchOpts); + auto watch = vfs.watchDirectory( + "watch_test_dir", + [&eventCount](const FileWatchInfo& info) { + eventCount++; + std::string eventType; + switch (info.event) { + case FileWatchEvent::Created: + eventType = "Created"; + break; + case FileWatchEvent::Modified: + eventType = "Modified"; + break; + case FileWatchEvent::Deleted: + eventType = "Deleted"; + break; + case FileWatchEvent::Renamed: + eventType = "Renamed"; + break; + } + + ENTROPY_LOG_INFO("File event: " + eventType + " - " + info.path); + if (info.oldPath.has_value()) { + ENTROPY_LOG_INFO(" Old path: " + info.oldPath.value()); + } + }, + watchOpts); if (watch && watch->isWatching()) { ENTROPY_LOG_INFO("Started watching directory: watch_test_dir"); @@ -492,7 +523,7 @@ int main() { if (listingWithHidden.status() == FileOpStatus::Complete) { ENTROPY_LOG_INFO("Directory contains " + std::to_string(listingWithHidden.directoryEntries().size()) + - " total entries (including hidden)"); + " total entries (including hidden)"); } // Recursive listing @@ -502,7 +533,8 @@ int main() { recursiveListing.wait(); if (recursiveListing.status() == FileOpStatus::Complete) { - ENTROPY_LOG_INFO("Recursive listing found " + std::to_string(recursiveListing.directoryEntries().size()) + " entries:"); + ENTROPY_LOG_INFO("Recursive listing found " + std::to_string(recursiveListing.directoryEntries().size()) + + " entries:"); for (const auto& entry : recursiveListing.directoryEntries()) { ENTROPY_LOG_INFO(" " + entry.fullPath); } diff --git a/Examples/WorkContractExample.cpp b/Examples/WorkContractExample.cpp index 06442d8..b259679 100644 --- a/Examples/WorkContractExample.cpp +++ b/Examples/WorkContractExample.cpp @@ -2,9 +2,9 @@ // Created by Geenz on 8/8/25. // - #define NOMINMAX #include + #include using namespace EntropyEngine; using namespace Core; @@ -23,9 +23,8 @@ int main() { // Submit a bunch of contracts. Log a few handle debug strings and periodic group state. for (int i = 0; i < 1000; i++) { - auto h = group.createContract([=]() { - ENTROPY_LOG_INFO_CAT("WorkContractExample", std::format("Executing contract {}", i)); - }); + auto h = group.createContract( + [=]() { ENTROPY_LOG_INFO_CAT("WorkContractExample", std::format("Executing contract {}", i)); }); if (i < 3) { ENTROPY_LOG_INFO_CAT("WorkContractExample", std::format("Created handle: {}", h.debugString())); @@ -34,7 +33,8 @@ int main() { h.schedule(); if ((i + 1) % 250 == 0) { - ENTROPY_LOG_INFO_CAT("WorkContractExample", std::format("After scheduling {}: {}", i + 1, group.debugString())); + ENTROPY_LOG_INFO_CAT("WorkContractExample", + std::format("After scheduling {}: {}", i + 1, group.debugString())); } } diff --git a/Examples/WorkGraphCAPIExample.c b/Examples/WorkGraphCAPIExample.c index 9c54c79..43e34fb 100644 --- a/Examples/WorkGraphCAPIExample.c +++ b/Examples/WorkGraphCAPIExample.c @@ -21,11 +21,10 @@ * - Yieldable tasks that can suspend and resume */ -#include +#include #include +#include #include -#include - #include #include #include @@ -34,13 +33,15 @@ // Example Context Data // ============================================================================ -typedef struct { +typedef struct +{ int task_id; const char* task_name; int sleep_ms; } TaskContext; -typedef struct { +typedef struct +{ int poll_count; int max_polls; } YieldableContext; @@ -59,8 +60,7 @@ void load_data_task(void* user_data) { void process_data_task(void* user_data) { TaskContext* ctx = (TaskContext*)user_data; - ENTROPY_LOG_INFO_CAT_F("ProcessData", "Task %d: Processing %s...", - ctx->task_id, ctx->task_name); + ENTROPY_LOG_INFO_CAT_F("ProcessData", "Task %d: Processing %s...", ctx->task_id, ctx->task_name); // Simulate work for (volatile int i = 0; i < 2000000; i++); ENTROPY_LOG_INFO_CAT_F("ProcessData", "Task %d: Processing complete", ctx->task_id); @@ -86,8 +86,7 @@ EntropyWorkResult polling_task(void* user_data) { YieldableContext* ctx = (YieldableContext*)user_data; ctx->poll_count++; - ENTROPY_LOG_DEBUG_CAT_F("Poller", "Poll attempt %d/%d", - ctx->poll_count, ctx->max_polls); + ENTROPY_LOG_DEBUG_CAT_F("Poller", "Poll attempt %d/%d", ctx->poll_count, ctx->max_polls); if (ctx->poll_count < ctx->max_polls) { // Not ready yet - yield and try again later @@ -95,8 +94,7 @@ EntropyWorkResult polling_task(void* user_data) { } // Condition met - complete the task - ENTROPY_LOG_INFO_CAT_F("Poller", "Polling complete after %d attempts", - ctx->poll_count); + ENTROPY_LOG_INFO_CAT_F("Poller", "Polling complete after %d attempts", ctx->poll_count); return ENTROPY_WORK_COMPLETE; } @@ -106,8 +104,7 @@ EntropyWorkResult polling_task(void* user_data) { void check_status(const char* operation, EntropyStatus status) { if (status != ENTROPY_OK) { - ENTROPY_LOG_ERROR_CAT_F("Error", "%s failed: %s", - operation, entropy_status_to_string(status)); + ENTROPY_LOG_ERROR_CAT_F("Error", "%s failed: %s", operation, entropy_status_to_string(status)); exit(1); } } @@ -118,14 +115,9 @@ void print_stats(entropy_WorkGraph graph) { entropy_work_graph_get_stats(graph, &stats, &status); if (status == ENTROPY_OK) { - ENTROPY_LOG_INFO_CAT_F("Stats", - "Total: %u | Completed: %u | Failed: %u | Pending: %u | Executing: %u", - stats.total_nodes, - stats.completed_nodes, - stats.failed_nodes, - stats.pending_nodes, - stats.executing_nodes - ); + ENTROPY_LOG_INFO_CAT_F("Stats", "Total: %u | Completed: %u | Failed: %u | Pending: %u | Executing: %u", + stats.total_nodes, stats.completed_nodes, stats.failed_nodes, stats.pending_nodes, + stats.executing_nodes); } } @@ -145,9 +137,7 @@ int main(void) { // ======================================================================== ENTROPY_LOG_INFO_CAT_F("Setup", "Creating work contract group..."); - entropy_WorkContractGroup group = entropy_work_contract_group_create( - 2048, "WorkGraphExample", &status - ); + entropy_WorkContractGroup group = entropy_work_contract_group_create(2048, "WorkGraphExample", &status); check_status("Create work contract group", status); // ======================================================================== @@ -177,9 +167,7 @@ int main(void) { entropy_work_graph_config_init(&graph_config); graph_config.expected_node_count = 10; - entropy_WorkGraph graph = entropy_work_graph_create_with_config( - group, &graph_config, &status - ); + entropy_WorkGraph graph = entropy_work_graph_create_with_config(group, &graph_config, &status); check_status("Create work graph", status); // ======================================================================== @@ -193,10 +181,8 @@ int main(void) { load_ctx->task_id = 1; load_ctx->task_name = "loader"; - entropy_NodeHandle load_node = entropy_work_graph_add_node( - graph, load_data_task, load_ctx, "LoadData", - ENTROPY_EXEC_ANY_THREAD, &status - ); + entropy_NodeHandle load_node = + entropy_work_graph_add_node(graph, load_data_task, load_ctx, "LoadData", ENTROPY_EXEC_ANY_THREAD, &status); check_status("Add load node", status); // Fan-out: Three parallel processing tasks @@ -212,22 +198,16 @@ int main(void) { process_ctx3->task_id = 4; process_ctx3->task_name = "chunk-3"; - entropy_NodeHandle process1 = entropy_work_graph_add_node( - graph, process_data_task, process_ctx1, "Process-1", - ENTROPY_EXEC_ANY_THREAD, &status - ); + entropy_NodeHandle process1 = entropy_work_graph_add_node(graph, process_data_task, process_ctx1, "Process-1", + ENTROPY_EXEC_ANY_THREAD, &status); check_status("Add process1 node", status); - entropy_NodeHandle process2 = entropy_work_graph_add_node( - graph, process_data_task, process_ctx2, "Process-2", - ENTROPY_EXEC_ANY_THREAD, &status - ); + entropy_NodeHandle process2 = entropy_work_graph_add_node(graph, process_data_task, process_ctx2, "Process-2", + ENTROPY_EXEC_ANY_THREAD, &status); check_status("Add process2 node", status); - entropy_NodeHandle process3 = entropy_work_graph_add_node( - graph, process_data_task, process_ctx3, "Process-3", - ENTROPY_EXEC_ANY_THREAD, &status - ); + entropy_NodeHandle process3 = entropy_work_graph_add_node(graph, process_data_task, process_ctx3, "Process-3", + ENTROPY_EXEC_ANY_THREAD, &status); check_status("Add process3 node", status); // Yieldable polling task (runs in parallel with processing) @@ -236,8 +216,7 @@ int main(void) { yield_ctx->max_polls = 5; entropy_NodeHandle poller = entropy_work_graph_add_yieldable_node( - graph, polling_task, yield_ctx, "Poller", - ENTROPY_EXEC_ANY_THREAD, 10, &status // Max 10 reschedules + graph, polling_task, yield_ctx, "Poller", ENTROPY_EXEC_ANY_THREAD, 10, &status // Max 10 reschedules ); check_status("Add yieldable node", status); @@ -246,10 +225,8 @@ int main(void) { merge_ctx->task_id = 5; merge_ctx->task_name = "merger"; - entropy_NodeHandle merge = entropy_work_graph_add_node( - graph, merge_results_task, merge_ctx, "MergeResults", - ENTROPY_EXEC_ANY_THREAD, &status - ); + entropy_NodeHandle merge = entropy_work_graph_add_node(graph, merge_results_task, merge_ctx, "MergeResults", + ENTROPY_EXEC_ANY_THREAD, &status); check_status("Add merge node", status); // Main thread UI update (depends on merge completing) @@ -258,8 +235,7 @@ int main(void) { ui_ctx->task_name = "ui-updater"; entropy_NodeHandle ui_update = entropy_work_graph_add_node( - graph, update_ui_task, ui_ctx, "UpdateUI", - ENTROPY_EXEC_MAIN_THREAD, &status // Must run on main thread + graph, update_ui_task, ui_ctx, "UpdateUI", ENTROPY_EXEC_MAIN_THREAD, &status // Must run on main thread ); check_status("Add UI node", status); @@ -322,13 +298,12 @@ int main(void) { int iteration = 0; while (!entropy_work_graph_is_complete(graph)) { // Execute main thread work (UI updates, etc.) - size_t executed = entropy_work_contract_group_execute_main_thread_work( - group, 5, &status // Process up to 5 main thread tasks - ); + size_t executed = + entropy_work_contract_group_execute_main_thread_work(group, 5, &status // Process up to 5 main thread tasks + ); if (executed > 0) { - ENTROPY_LOG_DEBUG_CAT_F("Monitor", - "Executed %zu main thread tasks", executed); + ENTROPY_LOG_DEBUG_CAT_F("Monitor", "Executed %zu main thread tasks", executed); } // Print stats periodically diff --git a/Examples/WorkGraphYieldableExample.cpp b/Examples/WorkGraphYieldableExample.cpp index e6f3595..052ab7c 100644 --- a/Examples/WorkGraphYieldableExample.cpp +++ b/Examples/WorkGraphYieldableExample.cpp @@ -1,9 +1,10 @@ #define NOMINMAX #include + #include -#include #include #include +#include using namespace EntropyEngine::Core; using namespace EntropyEngine::Core::Logging; @@ -16,167 +17,186 @@ int main() { config.threadCount = 4; WorkService service(config); service.start(); - + WorkContractGroup group(1000); service.addWorkContractGroup(&group); ENTROPY_LOG_INFO_CAT("WorkGraphExample", std::format("Group added to service: {}", group.debugString())); - + // Example 1: Basic work graph with dependencies and main thread work { ENTROPY_LOG_INFO_CAT("WorkGraphExample", "\n=== Example 1: Basic Work Graph with Dependencies ==="); WorkGraph graph(&group); - + // Create nodes - auto task1 = graph.addNode([]() { - ENTROPY_LOG_INFO_CAT("WorkGraphExample", "Task 1: Background work"); - std::this_thread::sleep_for(100ms); - }, "task1"); - - auto task2 = graph.addNode([]() { - ENTROPY_LOG_INFO_CAT("WorkGraphExample", "Task 2: More background work"); - std::this_thread::sleep_for(100ms); - }, "task2"); - - auto mainThreadTask = graph.addNode([]() { - ENTROPY_LOG_INFO_CAT("WorkGraphExample", "Main Thread Task: UI Update"); - std::this_thread::sleep_for(50ms); - }, "main-thread-task", nullptr, ExecutionType::MainThread); - - auto finalTask = graph.addNode([]() { - ENTROPY_LOG_INFO_CAT("WorkGraphExample", "Final Task: Cleanup"); - }, "final"); - + auto task1 = graph.addNode( + []() { + ENTROPY_LOG_INFO_CAT("WorkGraphExample", "Task 1: Background work"); + std::this_thread::sleep_for(100ms); + }, + "task1"); + + auto task2 = graph.addNode( + []() { + ENTROPY_LOG_INFO_CAT("WorkGraphExample", "Task 2: More background work"); + std::this_thread::sleep_for(100ms); + }, + "task2"); + + auto mainThreadTask = graph.addNode( + []() { + ENTROPY_LOG_INFO_CAT("WorkGraphExample", "Main Thread Task: UI Update"); + std::this_thread::sleep_for(50ms); + }, + "main-thread-task", nullptr, ExecutionType::MainThread); + + auto finalTask = + graph.addNode([]() { ENTROPY_LOG_INFO_CAT("WorkGraphExample", "Final Task: Cleanup"); }, "final"); + // Set dependencies: task1 -> task2 -> mainThreadTask -> finalTask graph.addDependency(task1, task2); graph.addDependency(task2, mainThreadTask); graph.addDependency(mainThreadTask, finalTask); - + // Execute graph.execute(); - + // Pump main thread work while (!graph.isComplete()) { group.executeMainThreadWork(10); std::this_thread::sleep_for(10ms); } - + ENTROPY_LOG_INFO_CAT("WorkGraphExample", "Graph 1 complete"); } - + // Example 2: Yieldable node that waits for atomic value { ENTROPY_LOG_INFO_CAT("WorkGraphExample", "\n=== Example 2: Yieldable Node Waiting for Atomic ==="); WorkGraph graph(&group); - + std::atomic ready{false}; - + // Producer sets the atomic after 500ms - auto producer = graph.addNode([&ready]() { - ENTROPY_LOG_INFO_CAT("WorkGraphExample", "Producer: Working..."); - std::this_thread::sleep_for(1000ms); - ready = true; - ENTROPY_LOG_INFO_CAT("WorkGraphExample", "Producer: Data ready!"); - }, "producer"); - + auto producer = graph.addNode( + [&ready]() { + ENTROPY_LOG_INFO_CAT("WorkGraphExample", "Producer: Working..."); + std::this_thread::sleep_for(1000ms); + ready = true; + ENTROPY_LOG_INFO_CAT("WorkGraphExample", "Producer: Data ready!"); + }, + "producer"); + // Consumer yields until atomic is true - auto consumer = graph.addYieldableNode([&ready]() -> WorkResultContext { - static int attempts = 0; - attempts++; - ENTROPY_LOG_INFO_CAT("WorkGraphExample", std::format("Consumer: Attempt {} - checking...", attempts)); + auto consumer = graph.addYieldableNode( + [&ready]() -> WorkResultContext { + static int attempts = 0; + attempts++; + ENTROPY_LOG_INFO_CAT("WorkGraphExample", std::format("Consumer: Attempt {} - checking...", attempts)); - if (!ready.load()) { - std::this_thread::sleep_for(100ms); - return WorkResultContext::yield(); - } + if (!ready.load()) { + std::this_thread::sleep_for(100ms); + return WorkResultContext::yield(); + } + + ENTROPY_LOG_INFO_CAT("WorkGraphExample", + std::format("Consumer: Got data after {} attempts!", attempts)); + return WorkResultContext::complete(); + }, + "consumer", nullptr, ExecutionType::AnyThread, 20); // Max 20 attempts - ENTROPY_LOG_INFO_CAT("WorkGraphExample", std::format("Consumer: Got data after {} attempts!", attempts)); - return WorkResultContext::complete(); - }, "consumer", nullptr, ExecutionType::AnyThread, 20); // Max 20 attempts - // Execute (no dependency - they run in parallel) graph.execute(); graph.wait(); - + ENTROPY_LOG_INFO_CAT("WorkGraphExample", "Graph 2 complete"); } - + // Example 3: Suspend/Resume functionality { ENTROPY_LOG_INFO_CAT("WorkGraphExample", "\n=== Example 3: Suspend and Resume Graph ==="); WorkGraph graph(&group); - + std::atomic counter{0}; - + // Create several nodes that increment counter - auto node1 = graph.addNode([&counter]() { - ENTROPY_LOG_INFO_CAT("WorkGraphExample", "Node 1: Working..."); - std::this_thread::sleep_for(200ms); - counter++; - ENTROPY_LOG_INFO_CAT("WorkGraphExample", std::format("Node 1: Done (counter={})", counter.load())); - }, "node1"); - - auto node2 = graph.addNode([&counter]() { - ENTROPY_LOG_INFO_CAT("WorkGraphExample", "Node 2: Working..."); - std::this_thread::sleep_for(200ms); - counter++; - ENTROPY_LOG_INFO_CAT("WorkGraphExample", std::format("Node 2: Done (counter={})", counter.load())); - }, "node2"); - + auto node1 = graph.addNode( + [&counter]() { + ENTROPY_LOG_INFO_CAT("WorkGraphExample", "Node 1: Working..."); + std::this_thread::sleep_for(200ms); + counter++; + ENTROPY_LOG_INFO_CAT("WorkGraphExample", std::format("Node 1: Done (counter={})", counter.load())); + }, + "node1"); + + auto node2 = graph.addNode( + [&counter]() { + ENTROPY_LOG_INFO_CAT("WorkGraphExample", "Node 2: Working..."); + std::this_thread::sleep_for(200ms); + counter++; + ENTROPY_LOG_INFO_CAT("WorkGraphExample", std::format("Node 2: Done (counter={})", counter.load())); + }, + "node2"); + // Yieldable node that increments counter multiple times - auto yieldNode = graph.addYieldableNode([&counter]() -> WorkResultContext { - static int iterations = 0; - iterations++; - ENTROPY_LOG_INFO_CAT("WorkGraphExample", std::format("Yield Node: Iteration {}", iterations)); - counter++; - std::this_thread::sleep_for(100ms); - - if (iterations < 5) { - return WorkResultContext::yield(); - } - ENTROPY_LOG_INFO_CAT("WorkGraphExample", std::format("Yield Node: Complete (counter={})", counter.load())); - return WorkResultContext::complete(); - }, "yield-node"); - - auto node3 = graph.addNode([&counter]() { - ENTROPY_LOG_INFO_CAT("WorkGraphExample", "Node 3: Working..."); - std::this_thread::sleep_for(200ms); - counter++; - ENTROPY_LOG_INFO_CAT("WorkGraphExample", std::format("Node 3: Done (counter={})", counter.load())); - }, "node3"); - + auto yieldNode = graph.addYieldableNode( + [&counter]() -> WorkResultContext { + static int iterations = 0; + iterations++; + ENTROPY_LOG_INFO_CAT("WorkGraphExample", std::format("Yield Node: Iteration {}", iterations)); + counter++; + std::this_thread::sleep_for(100ms); + + if (iterations < 5) { + return WorkResultContext::yield(); + } + ENTROPY_LOG_INFO_CAT("WorkGraphExample", + std::format("Yield Node: Complete (counter={})", counter.load())); + return WorkResultContext::complete(); + }, + "yield-node"); + + auto node3 = graph.addNode( + [&counter]() { + ENTROPY_LOG_INFO_CAT("WorkGraphExample", "Node 3: Working..."); + std::this_thread::sleep_for(200ms); + counter++; + ENTROPY_LOG_INFO_CAT("WorkGraphExample", std::format("Node 3: Done (counter={})", counter.load())); + }, + "node3"); + // Set up dependencies: node1 -> node2 -> yieldNode -> node3 graph.addDependency(node1, node2); graph.addDependency(node2, yieldNode); graph.addDependency(yieldNode, node3); - + // Start execution graph.execute(); ENTROPY_LOG_INFO_CAT("WorkGraphExample", "Graph started"); - + // Let it run for a bit std::this_thread::sleep_for(300ms); - + // Suspend the graph ENTROPY_LOG_INFO_CAT("WorkGraphExample", "\n>>> SUSPENDING GRAPH <<<"); graph.suspend(); ENTROPY_LOG_INFO_CAT("WorkGraphExample", std::format("Graph suspended (counter={})", counter.load())); - + // Wait while suspended - nothing new should schedule ENTROPY_LOG_INFO_CAT("WorkGraphExample", "Waiting 1 second while suspended..."); std::this_thread::sleep_for(1000ms); ENTROPY_LOG_INFO_CAT("WorkGraphExample", std::format("Counter after suspension wait: {}", counter.load())); - + // Resume the graph ENTROPY_LOG_INFO_CAT("WorkGraphExample", "\n>>> RESUMING GRAPH <<<"); graph.resume(); ENTROPY_LOG_INFO_CAT("WorkGraphExample", "Graph resumed"); - + // Wait for completion auto result = graph.wait(); ENTROPY_LOG_INFO_CAT("WorkGraphExample", std::format("Graph 3 complete (final counter={})", counter.load())); } - + // Example 4: Timed Yield - Sleep until specific time (NEW!) { ENTROPY_LOG_INFO_CAT("WorkGraphExample", "\n=== Example 4: Timed Yield - Zero-CPU Waiting ==="); @@ -186,42 +206,46 @@ int main() { std::atomic dataReady{false}; // Simulated async operation that completes after 500ms - auto dataProvider = graph.addNode([&dataReady]() { - ENTROPY_LOG_INFO_CAT("WorkGraphExample", "Data Provider: Starting async operation..."); - std::this_thread::sleep_for(500ms); - dataReady.store(true); - ENTROPY_LOG_INFO_CAT("WorkGraphExample", "Data Provider: Data is ready!"); - }, "data-provider"); + auto dataProvider = graph.addNode( + [&dataReady]() { + ENTROPY_LOG_INFO_CAT("WorkGraphExample", "Data Provider: Starting async operation..."); + std::this_thread::sleep_for(500ms); + dataReady.store(true); + ENTROPY_LOG_INFO_CAT("WorkGraphExample", "Data Provider: Data is ready!"); + }, + "data-provider"); // Poller using timed yields - checks every 100ms without busy-waiting - auto poller = graph.addYieldableNode([&pollCount, &dataReady]() -> WorkResultContext { - pollCount++; - auto now = std::chrono::steady_clock::now(); - ENTROPY_LOG_INFO_CAT("WorkGraphExample", - std::format("Poller: Check #{} - data ready: {}", pollCount.load(), dataReady.load())); - - if (!dataReady.load()) { - // NOT READY: Yield until 100ms from now (NO CPU USAGE!) - auto wakeTime = now + 100ms; - ENTROPY_LOG_INFO_CAT("WorkGraphExample", "Poller: Sleeping for 100ms..."); - return WorkResultContext::yieldUntil(wakeTime); - } - - // READY: Process and complete - ENTROPY_LOG_INFO_CAT("WorkGraphExample", - std::format("Poller: Data ready after {} polls!", pollCount.load())); - return WorkResultContext::complete(); - }, "poller"); + auto poller = graph.addYieldableNode( + [&pollCount, &dataReady]() -> WorkResultContext { + pollCount++; + auto now = std::chrono::steady_clock::now(); + ENTROPY_LOG_INFO_CAT("WorkGraphExample", std::format("Poller: Check #{} - data ready: {}", + pollCount.load(), dataReady.load())); + + if (!dataReady.load()) { + // NOT READY: Yield until 100ms from now (NO CPU USAGE!) + auto wakeTime = now + 100ms; + ENTROPY_LOG_INFO_CAT("WorkGraphExample", "Poller: Sleeping for 100ms..."); + return WorkResultContext::yieldUntil(wakeTime); + } + + // READY: Process and complete + ENTROPY_LOG_INFO_CAT("WorkGraphExample", + std::format("Poller: Data ready after {} polls!", pollCount.load())); + return WorkResultContext::complete(); + }, + "poller"); // Execute (nodes run in parallel) graph.execute(); graph.wait(); ENTROPY_LOG_INFO_CAT("WorkGraphExample", - std::format("Graph 4 complete - Poller checked {} times (expected ~5)", pollCount.load())); + std::format("Graph 4 complete - Poller checked {} times (expected ~5)", pollCount.load())); ENTROPY_LOG_INFO_CAT("WorkGraphExample", "Note: Zero CPU usage while waiting - timer sleeps passively!"); } service.stop(); return 0; -} \ No newline at end of file +} diff --git a/Tests/Concurrency/MainThreadWorkTests.cpp b/Tests/Concurrency/MainThreadWorkTests.cpp index 784b257..9203374 100644 --- a/Tests/Concurrency/MainThreadWorkTests.cpp +++ b/Tests/Concurrency/MainThreadWorkTests.cpp @@ -1,5 +1,7 @@ #include + #include + #include "Concurrency/WorkContractGroup.h" #include "Concurrency/WorkGraphTypes.h" @@ -12,7 +14,8 @@ TEST(MainThreadWork, ScheduleAndDrain_MainThreadTasks) { const int N = 7; for (int i = 0; i < N; ++i) { - auto h = group.createContract([&ran]() noexcept { ran.fetch_add(1, std::memory_order_relaxed); }, ExecutionType::MainThread); + auto h = group.createContract([&ran]() noexcept { ran.fetch_add(1, std::memory_order_relaxed); }, + ExecutionType::MainThread); auto res = h.schedule(); ASSERT_TRUE(res == ScheduleResult::Scheduled || res == ScheduleResult::AlreadyScheduled); } diff --git a/Tests/Concurrency/WorkContractGroupAccountingTests.cpp b/Tests/Concurrency/WorkContractGroupAccountingTests.cpp index c9de13f..098f397 100644 --- a/Tests/Concurrency/WorkContractGroupAccountingTests.cpp +++ b/Tests/Concurrency/WorkContractGroupAccountingTests.cpp @@ -1,5 +1,7 @@ #include + #include + #include "Concurrency/WorkContractGroup.h" using namespace EntropyEngine::Core::Concurrency; diff --git a/Tests/Core/TimerTests.cpp b/Tests/Core/TimerTests.cpp index fe9d680..8b0c615 100644 --- a/Tests/Core/TimerTests.cpp +++ b/Tests/Core/TimerTests.cpp @@ -8,11 +8,13 @@ */ #include + +#include #include #include -#include -#include "Core/TimerService.h" + #include "Concurrency/WorkService.h" +#include "Core/TimerService.h" using namespace EntropyEngine::Core; using namespace EntropyEngine::Core::Concurrency; @@ -24,10 +26,11 @@ using namespace std::chrono_literals; #ifdef _WIN32 constexpr auto kWindowsDrainTime = 200ms; // Windows: slower callback completion #else -constexpr auto kUnixDrainTime = 50ms; // Unix (macOS, Linux): faster callback completion +constexpr auto kUnixDrainTime = 50ms; // Unix (macOS, Linux): faster callback completion #endif -class TimerServiceTest : public ::testing::Test { +class TimerServiceTest : public ::testing::Test +{ protected: void SetUp() override { // Create WorkService @@ -146,8 +149,7 @@ TEST_F(TimerServiceTest, OneShotTimer_Fires) { std::atomic fired{false}; auto timer = timerService->scheduleTimer( - 50ms, - [&fired]() { fired.store(true, std::memory_order_release); }, + 50ms, [&fired]() { fired.store(true, std::memory_order_release); }, false // One-shot ); @@ -156,8 +158,7 @@ TEST_F(TimerServiceTest, OneShotTimer_Fires) { // Wait for timer to fire - automatic pumping via main thread contract auto start = std::chrono::steady_clock::now(); - while (!fired.load(std::memory_order_acquire) && - std::chrono::steady_clock::now() - start < 500ms) { + while (!fired.load(std::memory_order_acquire) && std::chrono::steady_clock::now() - start < 500ms) { // Must pump main thread work for automatic timer pumping workService->executeMainThreadWork(10); std::this_thread::sleep_for(10ms); @@ -180,9 +181,8 @@ TEST_F(TimerServiceTest, OneShotTimer_DoesNotRepeat) { auto count = std::make_shared>(0); auto timer = timerService->scheduleTimer( - 50ms, - [count]() { count->fetch_add(1, std::memory_order_relaxed); }, // Capture by value - false // One-shot + 50ms, [count]() { count->fetch_add(1, std::memory_order_relaxed); }, // Capture by value + false // One-shot ); // Wait longer than one interval - automatic pumping via main thread work @@ -211,9 +211,8 @@ TEST_F(TimerServiceTest, RepeatingTimer_FiresMultipleTimes) { auto count = std::make_shared>(0); auto timer = timerService->scheduleTimer( - 50ms, - [count]() { count->fetch_add(1, std::memory_order_relaxed); }, // Capture by value - true // Repeating + 50ms, [count]() { count->fetch_add(1, std::memory_order_relaxed); }, // Capture by value + true // Repeating ); EXPECT_TRUE(timer.isValid()); @@ -249,9 +248,8 @@ TEST_F(TimerServiceTest, TimerCancellation_PreventsExecution) { auto fired = std::make_shared>(false); auto timer = timerService->scheduleTimer( - 100ms, - [fired]() { fired->store(true, std::memory_order_release); }, // Capture by value - false // One-shot + 100ms, [fired]() { fired->store(true, std::memory_order_release); }, // Capture by value + false // One-shot ); EXPECT_TRUE(timer.isValid()); @@ -275,9 +273,8 @@ TEST_F(TimerServiceTest, RepeatingTimer_CancellationStopsFiring) { auto count = std::make_shared>(0); auto timer = timerService->scheduleTimer( - 50ms, - [count]() { count->fetch_add(1, std::memory_order_relaxed); }, // Capture by value - true // Repeating + 50ms, [count]() { count->fetch_add(1, std::memory_order_relaxed); }, // Capture by value + true // Repeating ); // Let it fire a few times - automatic pumping via main thread work @@ -311,14 +308,12 @@ TEST_F(TimerServiceTest, MultipleTimers_ExecuteIndependently) { std::atomic count2{0}; auto timer1 = timerService->scheduleTimer( - 50ms, - [&count1]() { count1.fetch_add(1, std::memory_order_relaxed); }, + 50ms, [&count1]() { count1.fetch_add(1, std::memory_order_relaxed); }, true // Repeating ); auto timer2 = timerService->scheduleTimer( - 75ms, - [&count2]() { count2.fetch_add(1, std::memory_order_relaxed); }, + 75ms, [&count2]() { count2.fetch_add(1, std::memory_order_relaxed); }, true // Repeating ); @@ -359,13 +354,11 @@ TEST_F(TimerServiceTest, MainThreadTimer_ExecutesOnMainThread) { fired.store(true, std::memory_order_release); }, false, // One-shot - ExecutionType::MainThread - ); + ExecutionType::MainThread); // Pump main thread work for automatic timer pumping auto start = std::chrono::steady_clock::now(); - while (!fired.load(std::memory_order_acquire) && - std::chrono::steady_clock::now() - start < 500ms) { + while (!fired.load(std::memory_order_acquire) && std::chrono::steady_clock::now() - start < 500ms) { workService->executeMainThreadWork(10); std::this_thread::sleep_for(10ms); } @@ -388,9 +381,8 @@ TEST_F(TimerServiceTest, TimerMove_TransfersOwnership) { auto count = std::make_shared>(0); auto timer1 = timerService->scheduleTimer( - 50ms, - [count]() { count->fetch_add(1, std::memory_order_relaxed); }, // Capture by value - true // Repeating + 50ms, [count]() { count->fetch_add(1, std::memory_order_relaxed); }, // Capture by value + true // Repeating ); EXPECT_TRUE(timer1.isValid()); @@ -431,9 +423,8 @@ TEST_F(TimerServiceTest, TimerDestruction_CancelsTimer) { { auto timer = timerService->scheduleTimer( - 50ms, - [count]() { count->fetch_add(1, std::memory_order_relaxed); }, // Capture by value - true // Repeating + 50ms, [count]() { count->fetch_add(1, std::memory_order_relaxed); }, // Capture by value + true // Repeating ); // Let it fire once or twice diff --git a/Tests/MinimalGTestNoMain.cpp b/Tests/MinimalGTestNoMain.cpp index e21a3d0..09c6843 100644 --- a/Tests/MinimalGTestNoMain.cpp +++ b/Tests/MinimalGTestNoMain.cpp @@ -1,4 +1,5 @@ #include + #include TEST(Minimal, Test) { diff --git a/Tests/TestHelpers/VFSTestHelpers.cpp b/Tests/TestHelpers/VFSTestHelpers.cpp index 2980f5a..dbc494c 100644 --- a/Tests/TestHelpers/VFSTestHelpers.cpp +++ b/Tests/TestHelpers/VFSTestHelpers.cpp @@ -1,7 +1,9 @@ #include "VFSTestHelpers.h" + #include -namespace entropy::test_helpers { +namespace entropy::test_helpers +{ // Definitions are in the header for these simple RAII helpers. This translation unit // ensures the helpers are linked into each test executable (as CMake adds this file). } diff --git a/Tests/TestHelpers/VFSTestHelpers.h b/Tests/TestHelpers/VFSTestHelpers.h index 43133ff..9d35312 100644 --- a/Tests/TestHelpers/VFSTestHelpers.h +++ b/Tests/TestHelpers/VFSTestHelpers.h @@ -1,17 +1,19 @@ #pragma once +#include #include -#include #include -#include #include +#include #include "EntropyCore.h" #include "VirtualFileSystem/VirtualFileSystem.h" -namespace entropy::test_helpers { +namespace entropy::test_helpers +{ // RAII temporary directory that gets cleaned up on destruction -class ScopedTempDir { +class ScopedTempDir +{ public: ScopedTempDir() { namespace fs = std::filesystem; @@ -31,23 +33,28 @@ class ScopedTempDir { ~ScopedTempDir() { namespace fs = std::filesystem; std::error_code ec; - fs::remove_all(_path, ec); // best-effort cleanup + fs::remove_all(_path, ec); // best-effort cleanup } - const std::filesystem::path& path() const noexcept { return _path; } - std::filesystem::path join(const std::string& name) const { return _path / name; } + const std::filesystem::path& path() const noexcept { + return _path; + } + std::filesystem::path join(const std::string& name) const { + return _path / name; + } private: std::filesystem::path _path; }; // RAII environment with a running WorkService, a WorkContractGroup and a VFS instance -class ScopedWorkEnv { +class ScopedWorkEnv +{ public: ScopedWorkEnv() - : _service(EntropyEngine::Core::Concurrency::WorkService::Config{}) - , _group(2048, "TestVFSGroup") - , _vfs(&_group, EntropyEngine::Core::IO::VirtualFileSystem::Config{}) { + : _service(EntropyEngine::Core::Concurrency::WorkService::Config{}), + _group(2048, "TestVFSGroup"), + _vfs(&_group, EntropyEngine::Core::IO::VirtualFileSystem::Config{}) { _service.start(); _service.addWorkContractGroup(&_group); } @@ -58,8 +65,12 @@ class ScopedWorkEnv { _service.stop(); } - EntropyEngine::Core::IO::VirtualFileSystem& vfs() noexcept { return _vfs; } - EntropyEngine::Core::Concurrency::WorkContractGroup& group() noexcept { return _group; } + EntropyEngine::Core::IO::VirtualFileSystem& vfs() noexcept { + return _vfs; + } + EntropyEngine::Core::Concurrency::WorkContractGroup& group() noexcept { + return _group; + } private: EntropyEngine::Core::Concurrency::WorkService _service; @@ -67,4 +78,4 @@ class ScopedWorkEnv { EntropyEngine::Core::IO::VirtualFileSystem _vfs; }; -} // namespace entropy::test_helpers +} // namespace entropy::test_helpers diff --git a/Tests/TestWorkServiceSingleton.h b/Tests/TestWorkServiceSingleton.h index 728e677..1e0ca8f 100644 --- a/Tests/TestWorkServiceSingleton.h +++ b/Tests/TestWorkServiceSingleton.h @@ -8,13 +8,17 @@ #pragma once -#include "Concurrency/WorkService.h" #include #include -namespace EntropyEngine { -namespace Core { -namespace Testing { +#include "Concurrency/WorkService.h" + +namespace EntropyEngine +{ +namespace Core +{ +namespace Testing +{ /** * @brief Singleton wrapper for WorkService in tests @@ -22,7 +26,8 @@ namespace Testing { * WorkService should only have one instance active at a time due to * static thread_local variables. This wrapper ensures that constraint. */ -class TestWorkServiceSingleton { +class TestWorkServiceSingleton +{ private: static std::unique_ptr instance; static std::mutex mutex; @@ -54,6 +59,6 @@ class TestWorkServiceSingleton { inline std::unique_ptr TestWorkServiceSingleton::instance = nullptr; inline std::mutex TestWorkServiceSingleton::mutex; -} // namespace Testing -} // namespace Core -} // namespace EntropyEngine +} // namespace Testing +} // namespace Core +} // namespace EntropyEngine diff --git a/Tests/VirtualFileSystem/VFSErrorMappingTests.cpp b/Tests/VirtualFileSystem/VFSErrorMappingTests.cpp index b958174..80907b2 100644 --- a/Tests/VirtualFileSystem/VFSErrorMappingTests.cpp +++ b/Tests/VirtualFileSystem/VFSErrorMappingTests.cpp @@ -1,12 +1,14 @@ #include + #include -#include "VirtualFileSystem/VirtualFileSystem.h" -#include "VirtualFileSystem/FileHandle.h" + #include "VFSTestHelpers.h" +#include "VirtualFileSystem/FileHandle.h" +#include "VirtualFileSystem/VirtualFileSystem.h" using namespace EntropyEngine::Core::IO; -using entropy::test_helpers::ScopedWorkEnv; using entropy::test_helpers::ScopedTempDir; +using entropy::test_helpers::ScopedWorkEnv; TEST(VFSErrorMapping, ReadNonexistentFile_ReturnsFileNotFound) { ScopedWorkEnv env; diff --git a/Tests/VirtualFileSystem/VFSSimpleTest.cpp b/Tests/VirtualFileSystem/VFSSimpleTest.cpp index 1337db6..a49e84f 100644 --- a/Tests/VirtualFileSystem/VFSSimpleTest.cpp +++ b/Tests/VirtualFileSystem/VFSSimpleTest.cpp @@ -1,13 +1,14 @@ +#include + +#include "../TestHelpers/VFSTestHelpers.h" #include "EntropyCore.h" -#include "VirtualFileSystem/VirtualFileSystem.h" #include "VirtualFileSystem/FileHandle.h" -#include "../TestHelpers/VFSTestHelpers.h" -#include +#include "VirtualFileSystem/VirtualFileSystem.h" using namespace EntropyEngine::Core; using namespace EntropyEngine::Core::IO; -using entropy::test_helpers::ScopedWorkEnv; using entropy::test_helpers::ScopedTempDir; +using entropy::test_helpers::ScopedWorkEnv; int main() { std::cout << "Starting VFS simple test...\n"; diff --git a/Tests/VirtualFileSystem/VFSTests.cpp b/Tests/VirtualFileSystem/VFSTests.cpp index 6628f07..e20f6a7 100644 --- a/Tests/VirtualFileSystem/VFSTests.cpp +++ b/Tests/VirtualFileSystem/VFSTests.cpp @@ -1,16 +1,18 @@ #include + #include #include + #include "EntropyCore.h" -#include "VirtualFileSystem/VirtualFileSystem.h" -#include "VirtualFileSystem/FileHandle.h" -#include "VirtualFileSystem/DirectoryHandle.h" #include "VFSTestHelpers.h" +#include "VirtualFileSystem/DirectoryHandle.h" +#include "VirtualFileSystem/FileHandle.h" +#include "VirtualFileSystem/VirtualFileSystem.h" using namespace EntropyEngine::Core; using namespace EntropyEngine::Core::IO; -using entropy::test_helpers::ScopedWorkEnv; using entropy::test_helpers::ScopedTempDir; +using entropy::test_helpers::ScopedWorkEnv; TEST(VFS, WriteReadDelete_InTempDir) { ScopedWorkEnv env; @@ -57,6 +59,6 @@ TEST(VFS, ListTempDir_ShowsCreatedFile) { ASSERT_EQ(list.status(), FileOpStatus::Complete) << "list failed: " << list.errorInfo().message; const auto& entries = list.directoryEntries(); - const bool found = std::any_of(entries.begin(), entries.end(), [](const auto& e){ return e.name == "file.txt"; }); + const bool found = std::any_of(entries.begin(), entries.end(), [](const auto& e) { return e.name == "file.txt"; }); EXPECT_TRUE(found) << "Expected to find file.txt in temp dir listing"; } diff --git a/Tests/VirtualFileSystem/VFSTextFidelityTests.cpp b/Tests/VirtualFileSystem/VFSTextFidelityTests.cpp index 61fbdf6..94a6874 100644 --- a/Tests/VirtualFileSystem/VFSTextFidelityTests.cpp +++ b/Tests/VirtualFileSystem/VFSTextFidelityTests.cpp @@ -1,13 +1,15 @@ #include + #include -#include "VirtualFileSystem/VirtualFileSystem.h" + +#include "VFSTestHelpers.h" #include "VirtualFileSystem/FileHandle.h" +#include "VirtualFileSystem/VirtualFileSystem.h" #include "VirtualFileSystem/WriteBatch.h" -#include "VFSTestHelpers.h" using namespace EntropyEngine::Core::IO; -using entropy::test_helpers::ScopedWorkEnv; using entropy::test_helpers::ScopedTempDir; +using entropy::test_helpers::ScopedWorkEnv; static std::string readLineText(const FileOperationHandle& h) { auto bytes = h.contentsBytes(); @@ -28,9 +30,15 @@ TEST(VFSTextFidelity, AppendAndReadLines_PreservesOrder) { ASSERT_EQ(c.status(), FileOpStatus::Complete) << c.errorInfo().message; auto fh = vfs.createFileHandle(path); - auto l0 = fh.readLine(0); l0.wait(); ASSERT_EQ(l0.status(), FileOpStatus::Complete); - auto l1 = fh.readLine(1); l1.wait(); ASSERT_EQ(l1.status(), FileOpStatus::Complete); - auto l2 = fh.readLine(2); l2.wait(); ASSERT_EQ(l2.status(), FileOpStatus::Complete); + auto l0 = fh.readLine(0); + l0.wait(); + ASSERT_EQ(l0.status(), FileOpStatus::Complete); + auto l1 = fh.readLine(1); + l1.wait(); + ASSERT_EQ(l1.status(), FileOpStatus::Complete); + auto l2 = fh.readLine(2); + l2.wait(); + ASSERT_EQ(l2.status(), FileOpStatus::Complete); EXPECT_EQ(readLineText(l0), "alpha"); EXPECT_EQ(readLineText(l1), "beta"); @@ -56,7 +64,8 @@ TEST(VFSTextFidelity, WriteLine_BeyondEOF_ExtendsAndWrites) { ASSERT_EQ(wl.status(), FileOpStatus::Complete) << wl.errorInfo().message; // The written line should be readable exactly - auto r = fh.readLine(5); r.wait(); + auto r = fh.readLine(5); + r.wait(); ASSERT_EQ(r.status(), FileOpStatus::Complete); EXPECT_EQ(readLineText(r), std::string("delta")); } diff --git a/cmake/EntropyLinting.cmake b/cmake/EntropyLinting.cmake new file mode 100644 index 0000000..060898a --- /dev/null +++ b/cmake/EntropyLinting.cmake @@ -0,0 +1,27 @@ +# Enable Linting Control +option(ENTROPY_ENABLE_LINTING "Enable clang-tidy linting" ON) + +# Function to enable linting on a target +function(entropy_enable_linting target_name) + if(ENTROPY_ENABLE_LINTING) + find_program(CLANG_TIDY_PATH NAMES clang-tidy PATHS + /opt/homebrew/opt/llvm/bin + /usr/local/opt/llvm/bin + ) + if(CLANG_TIDY_PATH) + message(STATUS "Linting enabled for ${target_name}") + set_target_properties(${target_name} PROPERTIES + CXX_CLANG_TIDY "${CLANG_TIDY_PATH}" + ) + + # Add warning flags based on compiler + if(MSVC) + target_compile_options(${target_name} PRIVATE /W4 /analyze) + else() + target_compile_options(${target_name} PRIVATE -Wall -Wextra -Wpedantic) + endif() + else() + message(WARNING "clang-tidy not found, linting disabled for ${target_name}") + endif() + endif() +endfunction() diff --git a/include/entropy/entropy_concurrency_types.h b/include/entropy/entropy_concurrency_types.h index 880610f..b45a862 100644 --- a/include/entropy/entropy_concurrency_types.h +++ b/include/entropy/entropy_concurrency_types.h @@ -33,14 +33,14 @@ extern "C" { * * These extend the base EntropyStatus enum with concurrency-specific errors. */ -#define ENTROPY_ERR_ALREADY_SCHEDULED ((EntropyStatus)100) ///< Contract is already scheduled -#define ENTROPY_ERR_EXECUTING ((EntropyStatus)101) ///< Contract is currently executing -#define ENTROPY_ERR_GROUP_FULL ((EntropyStatus)102) ///< Work group has reached capacity -#define ENTROPY_ERR_NOT_SCHEDULED ((EntropyStatus)103) ///< Contract is not scheduled -#define ENTROPY_ERR_ALREADY_RUNNING ((EntropyStatus)104) ///< Service is already running -#define ENTROPY_ERR_NOT_RUNNING ((EntropyStatus)105) ///< Service is not running -#define ENTROPY_ERR_GROUP_EXISTS ((EntropyStatus)106) ///< Group is already registered -#define ENTROPY_ERR_GROUP_NOT_FOUND ((EntropyStatus)107) ///< Group was not found +#define ENTROPY_ERR_ALREADY_SCHEDULED ((EntropyStatus)100) ///< Contract is already scheduled +#define ENTROPY_ERR_EXECUTING ((EntropyStatus)101) ///< Contract is currently executing +#define ENTROPY_ERR_GROUP_FULL ((EntropyStatus)102) ///< Work group has reached capacity +#define ENTROPY_ERR_NOT_SCHEDULED ((EntropyStatus)103) ///< Contract is not scheduled +#define ENTROPY_ERR_ALREADY_RUNNING ((EntropyStatus)104) ///< Service is already running +#define ENTROPY_ERR_NOT_RUNNING ((EntropyStatus)105) ///< Service is not running +#define ENTROPY_ERR_GROUP_EXISTS ((EntropyStatus)106) ///< Group is already registered +#define ENTROPY_ERR_GROUP_NOT_FOUND ((EntropyStatus)107) ///< Group was not found // ============================================================================ // Contract State @@ -52,7 +52,8 @@ extern "C" { * These states track a contract's progress from allocation through completion. * State transitions are atomic and thread-safe. */ -typedef enum EntropyContractState { +typedef enum EntropyContractState +{ ENTROPY_CONTRACT_FREE = 0, ///< Contract slot is available for allocation ENTROPY_CONTRACT_ALLOCATED = 1, ///< Contract has been allocated but not scheduled ENTROPY_CONTRACT_SCHEDULED = 2, ///< Contract is scheduled and ready for execution @@ -69,12 +70,13 @@ typedef enum EntropyContractState { * * Indicates the outcome of attempting to change a contract's scheduling state. */ -typedef enum EntropyScheduleResult { - ENTROPY_SCHEDULE_SCHEDULED = 0, ///< Contract is now scheduled (successful schedule operation) - ENTROPY_SCHEDULE_ALREADY_SCHEDULED = 1,///< Contract was already scheduled (schedule operation failed) - ENTROPY_SCHEDULE_NOT_SCHEDULED = 2, ///< Contract is not scheduled (successful unschedule operation) - ENTROPY_SCHEDULE_EXECUTING = 3, ///< Cannot modify - currently executing - ENTROPY_SCHEDULE_INVALID = 4 ///< Invalid handle provided +typedef enum EntropyScheduleResult +{ + ENTROPY_SCHEDULE_SCHEDULED = 0, ///< Contract is now scheduled (successful schedule operation) + ENTROPY_SCHEDULE_ALREADY_SCHEDULED = 1, ///< Contract was already scheduled (schedule operation failed) + ENTROPY_SCHEDULE_NOT_SCHEDULED = 2, ///< Contract is not scheduled (successful unschedule operation) + ENTROPY_SCHEDULE_EXECUTING = 3, ///< Cannot modify - currently executing + ENTROPY_SCHEDULE_INVALID = 4 ///< Invalid handle provided } EntropyScheduleResult; // ============================================================================ @@ -87,9 +89,10 @@ typedef enum EntropyScheduleResult { * Determines where a contract is allowed to execute. Use MainThread for * work that must run on the main thread (UI updates, rendering setup, etc.). */ -typedef enum EntropyExecutionType { - ENTROPY_EXEC_ANY_THREAD = 0, ///< Runs on any worker thread from the pool - ENTROPY_EXEC_MAIN_THREAD = 1 ///< Must run on the main/UI thread +typedef enum EntropyExecutionType +{ + ENTROPY_EXEC_ANY_THREAD = 0, ///< Runs on any worker thread from the pool + ENTROPY_EXEC_MAIN_THREAD = 1 ///< Must run on the main/UI thread } EntropyExecutionType; // ============================================================================ @@ -159,10 +162,11 @@ typedef void (*EntropyWorkCallback)(void* user_data); * * Controls thread pool size and scheduling behavior. */ -typedef struct EntropyWorkServiceConfig { - uint32_t thread_count; ///< Worker thread count (0 = use all CPU cores) - size_t max_soft_failure_count; ///< Number of selection failures before sleeping - size_t failure_sleep_time_ns; ///< Sleep duration in nanoseconds when no work found +typedef struct EntropyWorkServiceConfig +{ + uint32_t thread_count; ///< Worker thread count (0 = use all CPU cores) + size_t max_soft_failure_count; ///< Number of selection failures before sleeping + size_t failure_sleep_time_ns; ///< Sleep duration in nanoseconds when no work found } EntropyWorkServiceConfig; /** @@ -170,10 +174,11 @@ typedef struct EntropyWorkServiceConfig { * * Provides detailed statistics about main thread work execution. */ -typedef struct EntropyMainThreadWorkResult { - size_t contracts_executed; ///< Number of contracts actually executed - size_t groups_with_work; ///< Number of groups that had work available - EntropyBool more_work_available; ///< Whether there's more work that could be executed +typedef struct EntropyMainThreadWorkResult +{ + size_t contracts_executed; ///< Number of contracts actually executed + size_t groups_with_work; ///< Number of groups that had work available + EntropyBool more_work_available; ///< Whether there's more work that could be executed } EntropyMainThreadWorkResult; // ============================================================================ diff --git a/include/entropy/entropy_directory_handle.h b/include/entropy/entropy_directory_handle.h index 156c33e..635f0cd 100644 --- a/include/entropy/entropy_directory_handle.h +++ b/include/entropy/entropy_directory_handle.h @@ -9,8 +9,8 @@ * wait() on the returned FileOperationHandle to block until completion. */ -#include "entropy/entropy_vfs_types.h" #include "entropy/entropy_file_operation_handle.h" +#include "entropy/entropy_vfs_types.h" #ifdef __cplusplus extern "C" { @@ -32,10 +32,8 @@ extern "C" { * @threadsafety Thread-safe * @ownership Returns owned pointer - must call entropy_directory_handle_destroy() */ -ENTROPY_API entropy_DirectoryHandle entropy_directory_handle_clone( - entropy_DirectoryHandle handle, - EntropyStatus* status -); +ENTROPY_API entropy_DirectoryHandle entropy_directory_handle_clone(entropy_DirectoryHandle handle, + EntropyStatus* status); /** * @brief Destroy a directory handle @@ -75,11 +73,9 @@ ENTROPY_API void entropy_directory_handle_destroy(entropy_DirectoryHandle handle * entropy_file_operation_handle_destroy(op); * @endcode */ -ENTROPY_API entropy_FileOperationHandle entropy_directory_handle_create( - entropy_DirectoryHandle handle, - EntropyBool create_parents, - EntropyStatus* status -); +ENTROPY_API entropy_FileOperationHandle entropy_directory_handle_create(entropy_DirectoryHandle handle, + EntropyBool create_parents, + EntropyStatus* status); /** * @brief Remove the directory at this path @@ -95,11 +91,8 @@ ENTROPY_API entropy_FileOperationHandle entropy_directory_handle_create( * @threadsafety Thread-safe * @ownership Returns owned pointer - must call entropy_file_operation_handle_destroy() */ -ENTROPY_API entropy_FileOperationHandle entropy_directory_handle_remove( - entropy_DirectoryHandle handle, - EntropyBool recursive, - EntropyStatus* status -); +ENTROPY_API entropy_FileOperationHandle entropy_directory_handle_remove(entropy_DirectoryHandle handle, + EntropyBool recursive, EntropyStatus* status); /** * @brief List the contents of this directory @@ -131,11 +124,9 @@ ENTROPY_API entropy_FileOperationHandle entropy_directory_handle_remove( * entropy_file_operation_handle_destroy(op); * @endcode */ -ENTROPY_API entropy_FileOperationHandle entropy_directory_handle_list( - entropy_DirectoryHandle handle, - const EntropyListDirectoryOptions* options, - EntropyStatus* status -); +ENTROPY_API entropy_FileOperationHandle entropy_directory_handle_list(entropy_DirectoryHandle handle, + const EntropyListDirectoryOptions* options, + EntropyStatus* status); /** * @brief Retrieve metadata for this directory @@ -149,10 +140,8 @@ ENTROPY_API entropy_FileOperationHandle entropy_directory_handle_list( * @threadsafety Thread-safe * @ownership Returns owned pointer - must call entropy_file_operation_handle_destroy() */ -ENTROPY_API entropy_FileOperationHandle entropy_directory_handle_get_metadata( - entropy_DirectoryHandle handle, - EntropyStatus* status -); +ENTROPY_API entropy_FileOperationHandle entropy_directory_handle_get_metadata(entropy_DirectoryHandle handle, + EntropyStatus* status); /* ============================================================================ * Metadata Access @@ -170,11 +159,8 @@ ENTROPY_API entropy_FileOperationHandle entropy_directory_handle_get_metadata( * @threadsafety Thread-safe * @ownership Returns borrowed pointer - do NOT free */ -ENTROPY_API const char* entropy_directory_handle_normalized_key( - entropy_DirectoryHandle handle, - EntropyStatus* status -); +ENTROPY_API const char* entropy_directory_handle_normalized_key(entropy_DirectoryHandle handle, EntropyStatus* status); #ifdef __cplusplus -} // extern "C" +} // extern "C" #endif diff --git a/include/entropy/entropy_file_handle.h b/include/entropy/entropy_file_handle.h index 866346c..e157ec8 100644 --- a/include/entropy/entropy_file_handle.h +++ b/include/entropy/entropy_file_handle.h @@ -9,8 +9,8 @@ * the returned FileOperationHandle to block until completion. */ -#include "entropy/entropy_vfs_types.h" #include "entropy/entropy_file_operation_handle.h" +#include "entropy/entropy_vfs_types.h" #ifdef __cplusplus extern "C" { @@ -32,10 +32,7 @@ extern "C" { * @threadsafety Thread-safe * @ownership Returns owned pointer - must call entropy_file_handle_destroy() */ -ENTROPY_API entropy_FileHandle entropy_file_handle_clone( - entropy_FileHandle handle, - EntropyStatus* status -); +ENTROPY_API entropy_FileHandle entropy_file_handle_clone(entropy_FileHandle handle, EntropyStatus* status); /** * @brief Destroy a file handle @@ -73,10 +70,7 @@ ENTROPY_API void entropy_file_handle_destroy(entropy_FileHandle handle); * entropy_file_operation_handle_destroy(op); * @endcode */ -ENTROPY_API entropy_FileOperationHandle entropy_file_handle_read_all( - entropy_FileHandle handle, - EntropyStatus* status -); +ENTROPY_API entropy_FileOperationHandle entropy_file_handle_read_all(entropy_FileHandle handle, EntropyStatus* status); /** * @brief Read a byte range from the file @@ -89,12 +83,8 @@ ENTROPY_API entropy_FileOperationHandle entropy_file_handle_read_all( * @threadsafety Thread-safe * @ownership Returns owned pointer - must call entropy_file_operation_handle_destroy() */ -ENTROPY_API entropy_FileOperationHandle entropy_file_handle_read_range( - entropy_FileHandle handle, - uint64_t offset, - size_t length, - EntropyStatus* status -); +ENTROPY_API entropy_FileOperationHandle entropy_file_handle_read_range(entropy_FileHandle handle, uint64_t offset, + size_t length, EntropyStatus* status); /** * @brief Read a line by index (0-based) @@ -109,11 +99,8 @@ ENTROPY_API entropy_FileOperationHandle entropy_file_handle_read_range( * @threadsafety Thread-safe * @ownership Returns owned pointer - must call entropy_file_operation_handle_destroy() */ -ENTROPY_API entropy_FileOperationHandle entropy_file_handle_read_line( - entropy_FileHandle handle, - size_t line_number, - EntropyStatus* status -); +ENTROPY_API entropy_FileOperationHandle entropy_file_handle_read_line(entropy_FileHandle handle, size_t line_number, + EntropyStatus* status); /* ============================================================================ * Write Operations @@ -131,11 +118,8 @@ ENTROPY_API entropy_FileOperationHandle entropy_file_handle_read_line( * @threadsafety Thread-safe * @ownership Returns owned pointer - must call entropy_file_operation_handle_destroy() */ -ENTROPY_API entropy_FileOperationHandle entropy_file_handle_write_all_text( - entropy_FileHandle handle, - const char* text, - EntropyStatus* status -); +ENTROPY_API entropy_FileOperationHandle entropy_file_handle_write_all_text(entropy_FileHandle handle, const char* text, + EntropyStatus* status); /** * @brief Write full text to the file with options @@ -149,11 +133,7 @@ ENTROPY_API entropy_FileOperationHandle entropy_file_handle_write_all_text( * @ownership Returns owned pointer - must call entropy_file_operation_handle_destroy() */ ENTROPY_API entropy_FileOperationHandle entropy_file_handle_write_all_text_with_options( - entropy_FileHandle handle, - const char* text, - const EntropyWriteOptions* options, - EntropyStatus* status -); + entropy_FileHandle handle, const char* text, const EntropyWriteOptions* options, EntropyStatus* status); /** * @brief Write raw bytes to the file @@ -168,12 +148,9 @@ ENTROPY_API entropy_FileOperationHandle entropy_file_handle_write_all_text_with_ * @threadsafety Thread-safe * @ownership Returns owned pointer - must call entropy_file_operation_handle_destroy() */ -ENTROPY_API entropy_FileOperationHandle entropy_file_handle_write_all_bytes( - entropy_FileHandle handle, - const uint8_t* bytes, - size_t length, - EntropyStatus* status -); +ENTROPY_API entropy_FileOperationHandle entropy_file_handle_write_all_bytes(entropy_FileHandle handle, + const uint8_t* bytes, size_t length, + EntropyStatus* status); /** * @brief Write raw bytes to the file with options @@ -187,13 +164,9 @@ ENTROPY_API entropy_FileOperationHandle entropy_file_handle_write_all_bytes( * @threadsafety Thread-safe * @ownership Returns owned pointer - must call entropy_file_operation_handle_destroy() */ -ENTROPY_API entropy_FileOperationHandle entropy_file_handle_write_all_bytes_with_options( - entropy_FileHandle handle, - const uint8_t* bytes, - size_t length, - const EntropyWriteOptions* options, - EntropyStatus* status -); +ENTROPY_API entropy_FileOperationHandle +entropy_file_handle_write_all_bytes_with_options(entropy_FileHandle handle, const uint8_t* bytes, size_t length, + const EntropyWriteOptions* options, EntropyStatus* status); /** * @brief Write bytes starting at a specific offset @@ -207,13 +180,9 @@ ENTROPY_API entropy_FileOperationHandle entropy_file_handle_write_all_bytes_with * @threadsafety Thread-safe * @ownership Returns owned pointer - must call entropy_file_operation_handle_destroy() */ -ENTROPY_API entropy_FileOperationHandle entropy_file_handle_write_range( - entropy_FileHandle handle, - uint64_t offset, - const uint8_t* bytes, - size_t length, - EntropyStatus* status -); +ENTROPY_API entropy_FileOperationHandle entropy_file_handle_write_range(entropy_FileHandle handle, uint64_t offset, + const uint8_t* bytes, size_t length, + EntropyStatus* status); /** * @brief Replace a single line by index (0-based) @@ -229,12 +198,8 @@ ENTROPY_API entropy_FileOperationHandle entropy_file_handle_write_range( * @threadsafety Thread-safe * @ownership Returns owned pointer - must call entropy_file_operation_handle_destroy() */ -ENTROPY_API entropy_FileOperationHandle entropy_file_handle_write_line( - entropy_FileHandle handle, - size_t line_number, - const char* line, - EntropyStatus* status -); +ENTROPY_API entropy_FileOperationHandle entropy_file_handle_write_line(entropy_FileHandle handle, size_t line_number, + const char* line, EntropyStatus* status); /* ============================================================================ * File Management @@ -249,10 +214,8 @@ ENTROPY_API entropy_FileOperationHandle entropy_file_handle_write_line( * @threadsafety Thread-safe * @ownership Returns owned pointer - must call entropy_file_operation_handle_destroy() */ -ENTROPY_API entropy_FileOperationHandle entropy_file_handle_create_empty( - entropy_FileHandle handle, - EntropyStatus* status -); +ENTROPY_API entropy_FileOperationHandle entropy_file_handle_create_empty(entropy_FileHandle handle, + EntropyStatus* status); /** * @brief Delete the file if it exists (idempotent) @@ -263,10 +226,7 @@ ENTROPY_API entropy_FileOperationHandle entropy_file_handle_create_empty( * @threadsafety Thread-safe * @ownership Returns owned pointer - must call entropy_file_operation_handle_destroy() */ -ENTROPY_API entropy_FileOperationHandle entropy_file_handle_remove( - entropy_FileHandle handle, - EntropyStatus* status -); +ENTROPY_API entropy_FileOperationHandle entropy_file_handle_remove(entropy_FileHandle handle, EntropyStatus* status); /* ============================================================================ * Metadata Access @@ -284,11 +244,8 @@ ENTROPY_API entropy_FileOperationHandle entropy_file_handle_remove( * @threadsafety Thread-safe * @ownership Returns borrowed pointer - do NOT free */ -ENTROPY_API const char* entropy_file_handle_normalized_key( - entropy_FileHandle handle, - EntropyStatus* status -); +ENTROPY_API const char* entropy_file_handle_normalized_key(entropy_FileHandle handle, EntropyStatus* status); #ifdef __cplusplus -} // extern "C" +} // extern "C" #endif diff --git a/include/entropy/entropy_file_operation_handle.h b/include/entropy/entropy_file_operation_handle.h index 5f58c26..21b7a6d 100644 --- a/include/entropy/entropy_file_operation_handle.h +++ b/include/entropy/entropy_file_operation_handle.h @@ -30,10 +30,8 @@ extern "C" { * @threadsafety Thread-safe * @ownership Returns owned pointer - must call entropy_file_operation_handle_destroy() */ -ENTROPY_API entropy_FileOperationHandle entropy_file_operation_handle_clone( - entropy_FileOperationHandle handle, - EntropyStatus* status -); +ENTROPY_API entropy_FileOperationHandle entropy_file_operation_handle_clone(entropy_FileOperationHandle handle, + EntropyStatus* status); /** * @brief Destroy a file operation handle @@ -44,9 +42,7 @@ ENTROPY_API entropy_FileOperationHandle entropy_file_operation_handle_clone( * @param handle Handle to destroy (can be NULL) * @threadsafety Thread-safe */ -ENTROPY_API void entropy_file_operation_handle_destroy( - entropy_FileOperationHandle handle -); +ENTROPY_API void entropy_file_operation_handle_destroy(entropy_FileOperationHandle handle); /* ============================================================================ * Synchronization @@ -62,10 +58,7 @@ ENTROPY_API void entropy_file_operation_handle_destroy( * @param status Error reporting (required) * @threadsafety Thread-safe */ -ENTROPY_API void entropy_file_operation_handle_wait( - entropy_FileOperationHandle handle, - EntropyStatus* status -); +ENTROPY_API void entropy_file_operation_handle_wait(entropy_FileOperationHandle handle, EntropyStatus* status); /** * @brief Get current operation status @@ -77,10 +70,8 @@ ENTROPY_API void entropy_file_operation_handle_wait( * @return Current status * @threadsafety Thread-safe */ -ENTROPY_API EntropyFileOpStatus entropy_file_operation_handle_status( - entropy_FileOperationHandle handle, - EntropyStatus* status -); +ENTROPY_API EntropyFileOpStatus entropy_file_operation_handle_status(entropy_FileOperationHandle handle, + EntropyStatus* status); /* ============================================================================ * Read Results - only valid after wait() @@ -107,11 +98,8 @@ ENTROPY_API EntropyFileOpStatus entropy_file_operation_handle_status( * } * @endcode */ -ENTROPY_API const uint8_t* entropy_file_operation_handle_contents_bytes( - entropy_FileOperationHandle handle, - size_t* out_size, - EntropyStatus* status -); +ENTROPY_API const uint8_t* entropy_file_operation_handle_contents_bytes(entropy_FileOperationHandle handle, + size_t* out_size, EntropyStatus* status); /** * @brief Get read result as text @@ -125,10 +113,8 @@ ENTROPY_API const uint8_t* entropy_file_operation_handle_contents_bytes( * @threadsafety NOT thread-safe - do not call concurrently on same handle * @ownership Returns borrowed pointer - do NOT free */ -ENTROPY_API const char* entropy_file_operation_handle_contents_text( - entropy_FileOperationHandle handle, - EntropyStatus* status -); +ENTROPY_API const char* entropy_file_operation_handle_contents_text(entropy_FileOperationHandle handle, + EntropyStatus* status); /* ============================================================================ * Write Results - only valid after wait() @@ -144,10 +130,8 @@ ENTROPY_API const char* entropy_file_operation_handle_contents_text( * @return Bytes written (0 if not a write operation) * @threadsafety Thread-safe */ -ENTROPY_API uint64_t entropy_file_operation_handle_bytes_written( - entropy_FileOperationHandle handle, - EntropyStatus* status -); +ENTROPY_API uint64_t entropy_file_operation_handle_bytes_written(entropy_FileOperationHandle handle, + EntropyStatus* status); /* ============================================================================ * Metadata Results - only valid after wait() @@ -165,10 +149,8 @@ ENTROPY_API uint64_t entropy_file_operation_handle_bytes_written( * @threadsafety NOT thread-safe - do not call concurrently on same handle * @ownership Returns borrowed pointer - do NOT free */ -ENTROPY_API const EntropyFileMetadata* entropy_file_operation_handle_metadata( - entropy_FileOperationHandle handle, - EntropyStatus* status -); +ENTROPY_API const EntropyFileMetadata* entropy_file_operation_handle_metadata(entropy_FileOperationHandle handle, + EntropyStatus* status); /* ============================================================================ * Directory Listing Results - only valid after wait() @@ -197,10 +179,7 @@ ENTROPY_API const EntropyFileMetadata* entropy_file_operation_handle_metadata( * @endcode */ ENTROPY_API const EntropyDirectoryEntry* entropy_file_operation_handle_directory_entries( - entropy_FileOperationHandle handle, - size_t* out_count, - EntropyStatus* status -); + entropy_FileOperationHandle handle, size_t* out_count, EntropyStatus* status); /* ============================================================================ * Error Information - only valid after wait() when status is Failed @@ -226,11 +205,9 @@ ENTROPY_API const EntropyDirectoryEntry* entropy_file_operation_handle_directory * } * @endcode */ -ENTROPY_API const EntropyFileErrorInfo* entropy_file_operation_handle_error_info( - entropy_FileOperationHandle handle, - EntropyStatus* status -); +ENTROPY_API const EntropyFileErrorInfo* entropy_file_operation_handle_error_info(entropy_FileOperationHandle handle, + EntropyStatus* status); #ifdef __cplusplus -} // extern "C" +} // extern "C" #endif diff --git a/include/entropy/entropy_vfs_types.h b/include/entropy/entropy_vfs_types.h index 4e5375e..f9460db 100644 --- a/include/entropy/entropy_vfs_types.h +++ b/include/entropy/entropy_vfs_types.h @@ -8,9 +8,10 @@ * It follows the hourglass pattern: stable C89 ABI with internal C++ implementation. */ -#include "Core/entropy_c_api.h" -#include #include +#include + +#include "Core/entropy_c_api.h" #ifdef __cplusplus extern "C" { @@ -20,14 +21,14 @@ extern "C" { * Error Codes - VFS-specific extensions to EntropyStatus * ============================================================================ */ -#define ENTROPY_ERR_VFS_FILE_NOT_FOUND 100 -#define ENTROPY_ERR_VFS_ACCESS_DENIED 101 -#define ENTROPY_ERR_VFS_DISK_FULL 102 -#define ENTROPY_ERR_VFS_INVALID_PATH 103 -#define ENTROPY_ERR_VFS_IO_ERROR 104 -#define ENTROPY_ERR_VFS_NETWORK_ERROR 105 -#define ENTROPY_ERR_VFS_TIMEOUT 106 -#define ENTROPY_ERR_VFS_CONFLICT 107 +#define ENTROPY_ERR_VFS_FILE_NOT_FOUND 100 +#define ENTROPY_ERR_VFS_ACCESS_DENIED 101 +#define ENTROPY_ERR_VFS_DISK_FULL 102 +#define ENTROPY_ERR_VFS_INVALID_PATH 103 +#define ENTROPY_ERR_VFS_IO_ERROR 104 +#define ENTROPY_ERR_VFS_NETWORK_ERROR 105 +#define ENTROPY_ERR_VFS_TIMEOUT 106 +#define ENTROPY_ERR_VFS_CONFLICT 107 /* ============================================================================ * Enumerations @@ -36,56 +37,61 @@ extern "C" { /** * @brief Status of a file operation */ -typedef enum EntropyFileOpStatus { - ENTROPY_FILE_OP_PENDING = 0, /**< Operation scheduled but not started */ - ENTROPY_FILE_OP_RUNNING = 1, /**< Operation in progress */ - ENTROPY_FILE_OP_PARTIAL = 2, /**< Operation partially completed (e.g., EOF on read) */ - ENTROPY_FILE_OP_COMPLETE = 3, /**< Operation completed successfully */ - ENTROPY_FILE_OP_FAILED = 4 /**< Operation failed (check error info) */ +typedef enum EntropyFileOpStatus +{ + ENTROPY_FILE_OP_PENDING = 0, /**< Operation scheduled but not started */ + ENTROPY_FILE_OP_RUNNING = 1, /**< Operation in progress */ + ENTROPY_FILE_OP_PARTIAL = 2, /**< Operation partially completed (e.g., EOF on read) */ + ENTROPY_FILE_OP_COMPLETE = 3, /**< Operation completed successfully */ + ENTROPY_FILE_OP_FAILED = 4 /**< Operation failed (check error info) */ } EntropyFileOpStatus; /** * @brief File operation error taxonomy */ -typedef enum EntropyFileError { - ENTROPY_FILE_ERROR_NONE = 0, /**< No error */ - ENTROPY_FILE_ERROR_FILE_NOT_FOUND, /**< Path does not exist when required */ - ENTROPY_FILE_ERROR_ACCESS_DENIED, /**< Permission denied */ - ENTROPY_FILE_ERROR_DISK_FULL, /**< No space left on device */ - ENTROPY_FILE_ERROR_INVALID_PATH, /**< Malformed path or parent missing */ - ENTROPY_FILE_ERROR_IO_ERROR, /**< Other I/O failure */ - ENTROPY_FILE_ERROR_NETWORK_ERROR, /**< Remote backend transport failure */ - ENTROPY_FILE_ERROR_TIMEOUT, /**< Operation timed out */ - ENTROPY_FILE_ERROR_CONFLICT, /**< Contention detected */ - ENTROPY_FILE_ERROR_UNKNOWN /**< Unknown error */ +typedef enum EntropyFileError +{ + ENTROPY_FILE_ERROR_NONE = 0, /**< No error */ + ENTROPY_FILE_ERROR_FILE_NOT_FOUND, /**< Path does not exist when required */ + ENTROPY_FILE_ERROR_ACCESS_DENIED, /**< Permission denied */ + ENTROPY_FILE_ERROR_DISK_FULL, /**< No space left on device */ + ENTROPY_FILE_ERROR_INVALID_PATH, /**< Malformed path or parent missing */ + ENTROPY_FILE_ERROR_IO_ERROR, /**< Other I/O failure */ + ENTROPY_FILE_ERROR_NETWORK_ERROR, /**< Remote backend transport failure */ + ENTROPY_FILE_ERROR_TIMEOUT, /**< Operation timed out */ + ENTROPY_FILE_ERROR_CONFLICT, /**< Contention detected */ + ENTROPY_FILE_ERROR_UNKNOWN /**< Unknown error */ } EntropyFileError; /** * @brief Stream access mode */ -typedef enum EntropyStreamMode { - ENTROPY_STREAM_MODE_READ = 0, /**< Read-only */ - ENTROPY_STREAM_MODE_WRITE = 1, /**< Write-only */ - ENTROPY_STREAM_MODE_READ_WRITE = 2 /**< Read-write */ +typedef enum EntropyStreamMode +{ + ENTROPY_STREAM_MODE_READ = 0, /**< Read-only */ + ENTROPY_STREAM_MODE_WRITE = 1, /**< Write-only */ + ENTROPY_STREAM_MODE_READ_WRITE = 2 /**< Read-write */ } EntropyStreamMode; /** * @brief Directory listing sort order */ -typedef enum EntropySortOrder { - ENTROPY_SORT_NONE = 0, /**< No sorting */ - ENTROPY_SORT_BY_NAME = 1, /**< Sort by name */ - ENTROPY_SORT_BY_SIZE = 2, /**< Sort by file size */ - ENTROPY_SORT_BY_MODIFIED_TIME = 3 /**< Sort by modification time */ +typedef enum EntropySortOrder +{ + ENTROPY_SORT_NONE = 0, /**< No sorting */ + ENTROPY_SORT_BY_NAME = 1, /**< Sort by name */ + ENTROPY_SORT_BY_SIZE = 2, /**< Sort by file size */ + ENTROPY_SORT_BY_MODIFIED_TIME = 3 /**< Sort by modification time */ } EntropySortOrder; /** * @brief Stream seek direction */ -typedef enum EntropySeekDir { - ENTROPY_SEEK_BEGIN = 0, /**< Seek from beginning */ - ENTROPY_SEEK_CURRENT = 1, /**< Seek from current position */ - ENTROPY_SEEK_END = 2 /**< Seek from end */ +typedef enum EntropySeekDir +{ + ENTROPY_SEEK_BEGIN = 0, /**< Seek from beginning */ + ENTROPY_SEEK_CURRENT = 1, /**< Seek from current position */ + ENTROPY_SEEK_END = 2 /**< Seek from end */ } EntropySeekDir; /* ============================================================================ @@ -117,7 +123,8 @@ typedef struct entropy_WriteBatch_t* entropy_WriteBatch; /** * @brief Configuration for VirtualFileSystem */ -typedef struct EntropyVFSConfig { +typedef struct EntropyVFSConfig +{ /** Serialize writes per path (advisory locking) */ EntropyBool serialize_writes_per_path; @@ -146,7 +153,8 @@ typedef struct EntropyVFSConfig { /** * @brief Options for file read operations */ -typedef struct EntropyReadOptions { +typedef struct EntropyReadOptions +{ /** Starting byte offset */ uint64_t offset; @@ -160,7 +168,8 @@ typedef struct EntropyReadOptions { /** * @brief Options for file write operations */ -typedef struct EntropyWriteOptions { +typedef struct EntropyWriteOptions +{ /** Starting byte offset (ignored if append=true) */ uint64_t offset; @@ -195,7 +204,8 @@ typedef struct EntropyWriteOptions { /** * @brief Options for stream operations */ -typedef struct EntropyStreamOptions { +typedef struct EntropyStreamOptions +{ /** Access mode */ EntropyStreamMode mode; @@ -209,7 +219,8 @@ typedef struct EntropyStreamOptions { /** * @brief Options for directory listing */ -typedef struct EntropyListDirectoryOptions { +typedef struct EntropyListDirectoryOptions +{ /** Recurse into subdirectories */ EntropyBool recursive; @@ -239,7 +250,8 @@ typedef struct EntropyListDirectoryOptions { /** * @brief Error information for failed operations */ -typedef struct EntropyFileErrorInfo { +typedef struct EntropyFileErrorInfo +{ /** Error code */ EntropyFileError code; @@ -259,7 +271,8 @@ typedef struct EntropyFileErrorInfo { /** * @brief File metadata */ -typedef struct EntropyFileMetadata { +typedef struct EntropyFileMetadata +{ /** Full path */ const char* path; @@ -297,7 +310,8 @@ typedef struct EntropyFileMetadata { /** * @brief Directory entry with metadata */ -typedef struct EntropyDirectoryEntry { +typedef struct EntropyDirectoryEntry +{ /** Filename only (borrowed pointer) */ const char* name; @@ -317,7 +331,8 @@ typedef struct EntropyDirectoryEntry { /** * @brief I/O operation result */ -typedef struct EntropyIoResult { +typedef struct EntropyIoResult +{ /** Bytes transferred */ size_t bytes_transferred; @@ -382,5 +397,5 @@ ENTROPY_API const char* entropy_file_op_status_to_string(EntropyFileOpStatus sta ENTROPY_API const char* entropy_file_error_to_string(EntropyFileError error); #ifdef __cplusplus -} // extern "C" +} // extern "C" #endif diff --git a/include/entropy/entropy_virtual_file_system.h b/include/entropy/entropy_virtual_file_system.h index 8d0df35..ae7e4e3 100644 --- a/include/entropy/entropy_virtual_file_system.h +++ b/include/entropy/entropy_virtual_file_system.h @@ -40,10 +40,7 @@ extern "C" { * entropy_work_contract_group_destroy(group); * @endcode */ -ENTROPY_API entropy_VirtualFileSystem entropy_vfs_create( - entropy_WorkContractGroup group, - EntropyStatus* status -); +ENTROPY_API entropy_VirtualFileSystem entropy_vfs_create(entropy_WorkContractGroup group, EntropyStatus* status); /** * @brief Create a VirtualFileSystem with custom configuration @@ -55,11 +52,9 @@ ENTROPY_API entropy_VirtualFileSystem entropy_vfs_create( * @threadsafety Thread-safe * @ownership Returns owned pointer - must call entropy_vfs_destroy() */ -ENTROPY_API entropy_VirtualFileSystem entropy_vfs_create_with_config( - entropy_WorkContractGroup group, - const EntropyVFSConfig* config, - EntropyStatus* status -); +ENTROPY_API entropy_VirtualFileSystem entropy_vfs_create_with_config(entropy_WorkContractGroup group, + const EntropyVFSConfig* config, + EntropyStatus* status); /** * @brief Destroy a VirtualFileSystem and release resources @@ -94,11 +89,8 @@ ENTROPY_API void entropy_vfs_destroy(entropy_VirtualFileSystem vfs); * entropy_file_handle_destroy(fh); * @endcode */ -ENTROPY_API entropy_FileHandle entropy_vfs_create_file_handle( - entropy_VirtualFileSystem vfs, - const char* path, - EntropyStatus* status -); +ENTROPY_API entropy_FileHandle entropy_vfs_create_file_handle(entropy_VirtualFileSystem vfs, const char* path, + EntropyStatus* status); /** * @brief Create a directory handle for the given path @@ -112,11 +104,8 @@ ENTROPY_API entropy_FileHandle entropy_vfs_create_file_handle( * @threadsafety Thread-safe * @ownership Returns owned pointer - must call entropy_directory_handle_destroy() */ -ENTROPY_API entropy_DirectoryHandle entropy_vfs_create_directory_handle( - entropy_VirtualFileSystem vfs, - const char* path, - EntropyStatus* status -); +ENTROPY_API entropy_DirectoryHandle entropy_vfs_create_directory_handle(entropy_VirtualFileSystem vfs, const char* path, + EntropyStatus* status); /** * @brief Create a write batch for atomic multi-line file editing @@ -140,12 +129,9 @@ ENTROPY_API entropy_DirectoryHandle entropy_vfs_create_directory_handle( * entropy_write_batch_destroy(batch); * @endcode */ -ENTROPY_API entropy_WriteBatch entropy_vfs_create_write_batch( - entropy_VirtualFileSystem vfs, - const char* path, - EntropyStatus* status -); +ENTROPY_API entropy_WriteBatch entropy_vfs_create_write_batch(entropy_VirtualFileSystem vfs, const char* path, + EntropyStatus* status); #ifdef __cplusplus -} // extern "C" +} // extern "C" #endif diff --git a/include/entropy/entropy_work_contract_group.h b/include/entropy/entropy_work_contract_group.h index ede6140..aa5a441 100644 --- a/include/entropy/entropy_work_contract_group.h +++ b/include/entropy/entropy_work_contract_group.h @@ -68,11 +68,8 @@ extern "C" { * } * @endcode */ -ENTROPY_API entropy_WorkContractGroup entropy_work_contract_group_create( - size_t capacity, - const char* name, - EntropyStatus* status -); +ENTROPY_API entropy_WorkContractGroup entropy_work_contract_group_create(size_t capacity, const char* name, + EntropyStatus* status); /** * @brief Destroys a work contract group and frees resources @@ -99,9 +96,7 @@ ENTROPY_API entropy_WorkContractGroup entropy_work_contract_group_create( * entropy_work_contract_group_destroy(group); * @endcode */ -ENTROPY_API void entropy_work_contract_group_destroy( - entropy_WorkContractGroup group -); +ENTROPY_API void entropy_work_contract_group_destroy(entropy_WorkContractGroup group); // ============================================================================ // Contract Creation @@ -149,13 +144,11 @@ ENTROPY_API void entropy_work_contract_group_destroy( * } * @endcode */ -ENTROPY_API entropy_WorkContractHandle entropy_work_contract_group_create_contract( - entropy_WorkContractGroup group, - EntropyWorkCallback callback, - void* user_data, - EntropyExecutionType execution_type, - EntropyStatus* status -); +ENTROPY_API entropy_WorkContractHandle entropy_work_contract_group_create_contract(entropy_WorkContractGroup group, + EntropyWorkCallback callback, + void* user_data, + EntropyExecutionType execution_type, + EntropyStatus* status); // ============================================================================ // Synchronization @@ -189,10 +182,7 @@ ENTROPY_API entropy_WorkContractHandle entropy_work_contract_group_create_contra * printf("All work finished!\n"); * @endcode */ -ENTROPY_API void entropy_work_contract_group_wait( - entropy_WorkContractGroup group, - EntropyStatus* status -); +ENTROPY_API void entropy_work_contract_group_wait(entropy_WorkContractGroup group, EntropyStatus* status); /** * @brief Stops the group from accepting new work selections @@ -214,10 +204,7 @@ ENTROPY_API void entropy_work_contract_group_wait( * entropy_work_contract_group_destroy(group); * @endcode */ -ENTROPY_API void entropy_work_contract_group_stop( - entropy_WorkContractGroup group, - EntropyStatus* status -); +ENTROPY_API void entropy_work_contract_group_stop(entropy_WorkContractGroup group, EntropyStatus* status); /** * @brief Resumes the group to allow new work selections @@ -238,10 +225,7 @@ ENTROPY_API void entropy_work_contract_group_stop( * entropy_work_contract_group_resume(group, &status); * @endcode */ -ENTROPY_API void entropy_work_contract_group_resume( - entropy_WorkContractGroup group, - EntropyStatus* status -); +ENTROPY_API void entropy_work_contract_group_resume(entropy_WorkContractGroup group, EntropyStatus* status); /** * @brief Checks if the group is in the process of stopping @@ -257,9 +241,7 @@ ENTROPY_API void entropy_work_contract_group_resume( * } * @endcode */ -ENTROPY_API EntropyBool entropy_work_contract_group_is_stopping( - entropy_WorkContractGroup group -); +ENTROPY_API EntropyBool entropy_work_contract_group_is_stopping(entropy_WorkContractGroup group); // ============================================================================ // Statistics and Monitoring @@ -273,9 +255,7 @@ ENTROPY_API EntropyBool entropy_work_contract_group_is_stopping( * * @threadsafety Thread-safe */ -ENTROPY_API size_t entropy_work_contract_group_capacity( - entropy_WorkContractGroup group -); +ENTROPY_API size_t entropy_work_contract_group_capacity(entropy_WorkContractGroup group); /** * @brief Gets the number of currently allocated contracts @@ -294,9 +274,7 @@ ENTROPY_API size_t entropy_work_contract_group_capacity( * used, capacity, (used * 100.0) / capacity); * @endcode */ -ENTROPY_API size_t entropy_work_contract_group_active_count( - entropy_WorkContractGroup group -); +ENTROPY_API size_t entropy_work_contract_group_active_count(entropy_WorkContractGroup group); /** * @brief Gets the number of contracts currently scheduled for execution @@ -316,9 +294,7 @@ ENTROPY_API size_t entropy_work_contract_group_active_count( * } * @endcode */ -ENTROPY_API size_t entropy_work_contract_group_scheduled_count( - entropy_WorkContractGroup group -); +ENTROPY_API size_t entropy_work_contract_group_scheduled_count(entropy_WorkContractGroup group); /** * @brief Gets the number of contracts currently executing @@ -330,9 +306,7 @@ ENTROPY_API size_t entropy_work_contract_group_scheduled_count( * * @threadsafety Thread-safe */ -ENTROPY_API size_t entropy_work_contract_group_executing_count( - entropy_WorkContractGroup group -); +ENTROPY_API size_t entropy_work_contract_group_executing_count(entropy_WorkContractGroup group); /** * @brief Gets the number of main thread contracts currently scheduled @@ -342,9 +316,7 @@ ENTROPY_API size_t entropy_work_contract_group_executing_count( * * @threadsafety Thread-safe */ -ENTROPY_API size_t entropy_work_contract_group_main_thread_scheduled_count( - entropy_WorkContractGroup group -); +ENTROPY_API size_t entropy_work_contract_group_main_thread_scheduled_count(entropy_WorkContractGroup group); /** * @brief Gets the number of main thread contracts currently executing @@ -354,9 +326,7 @@ ENTROPY_API size_t entropy_work_contract_group_main_thread_scheduled_count( * * @threadsafety Thread-safe */ -ENTROPY_API size_t entropy_work_contract_group_main_thread_executing_count( - entropy_WorkContractGroup group -); +ENTROPY_API size_t entropy_work_contract_group_main_thread_executing_count(entropy_WorkContractGroup group); /** * @brief Checks if there are any main thread contracts ready to execute @@ -375,9 +345,7 @@ ENTROPY_API size_t entropy_work_contract_group_main_thread_executing_count( * } * @endcode */ -ENTROPY_API EntropyBool entropy_work_contract_group_has_main_thread_work( - entropy_WorkContractGroup group -); +ENTROPY_API EntropyBool entropy_work_contract_group_has_main_thread_work(entropy_WorkContractGroup group); // ============================================================================ // Main Thread Execution @@ -407,10 +375,8 @@ ENTROPY_API EntropyBool entropy_work_contract_group_has_main_thread_work( * } * @endcode */ -ENTROPY_API size_t entropy_work_contract_group_execute_all_main_thread_work( - entropy_WorkContractGroup group, - EntropyStatus* status -); +ENTROPY_API size_t entropy_work_contract_group_execute_all_main_thread_work(entropy_WorkContractGroup group, + EntropyStatus* status); /** * @brief Executes main thread targeted work contracts with a limit @@ -436,11 +402,8 @@ ENTROPY_API size_t entropy_work_contract_group_execute_all_main_thread_work( * } * @endcode */ -ENTROPY_API size_t entropy_work_contract_group_execute_main_thread_work( - entropy_WorkContractGroup group, - size_t max_contracts, - EntropyStatus* status -); +ENTROPY_API size_t entropy_work_contract_group_execute_main_thread_work(entropy_WorkContractGroup group, + size_t max_contracts, EntropyStatus* status); // ============================================================================ // Advanced Execution Control (for custom executors) @@ -478,11 +441,9 @@ ENTROPY_API size_t entropy_work_contract_group_execute_main_thread_work( * } * @endcode */ -ENTROPY_API entropy_WorkContractHandle entropy_work_contract_group_select_for_execution( - entropy_WorkContractGroup group, - uint64_t* bias, - EntropyStatus* status -); +ENTROPY_API entropy_WorkContractHandle entropy_work_contract_group_select_for_execution(entropy_WorkContractGroup group, + uint64_t* bias, + EntropyStatus* status); /** * @brief Executes the work function of a contract @@ -496,11 +457,8 @@ ENTROPY_API entropy_WorkContractHandle entropy_work_contract_group_select_for_ex * * @threadsafety Thread-safe, but each handle can only be executed once */ -ENTROPY_API void entropy_work_contract_group_execute_contract( - entropy_WorkContractGroup group, - entropy_WorkContractHandle handle, - EntropyStatus* status -); +ENTROPY_API void entropy_work_contract_group_execute_contract(entropy_WorkContractGroup group, + entropy_WorkContractHandle handle, EntropyStatus* status); /** * @brief Completes execution and cleans up a contract @@ -514,11 +472,9 @@ ENTROPY_API void entropy_work_contract_group_execute_contract( * * @threadsafety Thread-safe */ -ENTROPY_API void entropy_work_contract_group_complete_execution( - entropy_WorkContractGroup group, - entropy_WorkContractHandle handle, - EntropyStatus* status -); +ENTROPY_API void entropy_work_contract_group_complete_execution(entropy_WorkContractGroup group, + entropy_WorkContractHandle handle, + EntropyStatus* status); #ifdef __cplusplus } diff --git a/include/entropy/entropy_work_contract_handle.h b/include/entropy/entropy_work_contract_handle.h index 4d503b8..13052b2 100644 --- a/include/entropy/entropy_work_contract_handle.h +++ b/include/entropy/entropy_work_contract_handle.h @@ -56,10 +56,8 @@ extern "C" { * } * @endcode */ -ENTROPY_API EntropyScheduleResult entropy_work_contract_schedule( - entropy_WorkContractHandle handle, - EntropyStatus* status -); +ENTROPY_API EntropyScheduleResult entropy_work_contract_schedule(entropy_WorkContractHandle handle, + EntropyStatus* status); /** * @brief Attempts to remove a contract from the ready set @@ -84,10 +82,8 @@ ENTROPY_API EntropyScheduleResult entropy_work_contract_schedule( * } * @endcode */ -ENTROPY_API EntropyScheduleResult entropy_work_contract_unschedule( - entropy_WorkContractHandle handle, - EntropyStatus* status -); +ENTROPY_API EntropyScheduleResult entropy_work_contract_unschedule(entropy_WorkContractHandle handle, + EntropyStatus* status); /** * @brief Checks whether a handle still refers to a live contract @@ -108,9 +104,7 @@ ENTROPY_API EntropyScheduleResult entropy_work_contract_unschedule( * } * @endcode */ -ENTROPY_API EntropyBool entropy_work_contract_is_valid( - entropy_WorkContractHandle handle -); +ENTROPY_API EntropyBool entropy_work_contract_is_valid(entropy_WorkContractHandle handle); /** * @brief Immediately frees a contract's slot @@ -131,9 +125,7 @@ ENTROPY_API EntropyBool entropy_work_contract_is_valid( * handle = NULL; // Good practice * @endcode */ -ENTROPY_API void entropy_work_contract_release( - entropy_WorkContractHandle handle -); +ENTROPY_API void entropy_work_contract_release(entropy_WorkContractHandle handle); /** * @brief Destroy a work contract handle @@ -155,9 +147,7 @@ ENTROPY_API void entropy_work_contract_release( * handle = NULL; // Good practice * @endcode */ -ENTROPY_API void entropy_work_contract_handle_destroy( - entropy_WorkContractHandle handle -); +ENTROPY_API void entropy_work_contract_handle_destroy(entropy_WorkContractHandle handle); /** * @brief Reports whether the contract is currently scheduled @@ -175,10 +165,7 @@ ENTROPY_API void entropy_work_contract_handle_destroy( * } * @endcode */ -ENTROPY_API EntropyBool entropy_work_contract_is_scheduled( - entropy_WorkContractHandle handle, - EntropyStatus* status -); +ENTROPY_API EntropyBool entropy_work_contract_is_scheduled(entropy_WorkContractHandle handle, EntropyStatus* status); /** * @brief Reports whether the contract is currently executing @@ -196,10 +183,7 @@ ENTROPY_API EntropyBool entropy_work_contract_is_scheduled( * } * @endcode */ -ENTROPY_API EntropyBool entropy_work_contract_is_executing( - entropy_WorkContractHandle handle, - EntropyStatus* status -); +ENTROPY_API EntropyBool entropy_work_contract_is_executing(entropy_WorkContractHandle handle, EntropyStatus* status); /** * @brief Gets the current state of a contract @@ -221,10 +205,8 @@ ENTROPY_API EntropyBool entropy_work_contract_is_executing( * } * @endcode */ -ENTROPY_API EntropyContractState entropy_work_contract_get_state( - entropy_WorkContractHandle handle, - EntropyStatus* status -); +ENTROPY_API EntropyContractState entropy_work_contract_get_state(entropy_WorkContractHandle handle, + EntropyStatus* status); #ifdef __cplusplus } diff --git a/include/entropy/entropy_work_graph.h b/include/entropy/entropy_work_graph.h index 855681c..aec3bdb 100644 --- a/include/entropy/entropy_work_graph.h +++ b/include/entropy/entropy_work_graph.h @@ -53,15 +53,16 @@ extern "C" { * These states track a node's progress from creation through completion. * State transitions are atomic and thread-safe. */ -typedef enum EntropyNodeState { - ENTROPY_NODE_PENDING = 0, ///< Waiting for dependencies - can't run yet - ENTROPY_NODE_READY = 1, ///< All dependencies satisfied, waiting for thread +typedef enum EntropyNodeState +{ + ENTROPY_NODE_PENDING = 0, ///< Waiting for dependencies - can't run yet + ENTROPY_NODE_READY = 1, ///< All dependencies satisfied, waiting for thread ENTROPY_NODE_SCHEDULED = 2, ///< Submitted to WorkContractGroup, in queue ENTROPY_NODE_EXECUTING = 3, ///< Currently running on a worker thread ENTROPY_NODE_COMPLETED = 4, ///< Finished successfully - triggered children - ENTROPY_NODE_FAILED = 5, ///< Exception thrown - children will be cancelled + ENTROPY_NODE_FAILED = 5, ///< Exception thrown - children will be cancelled ENTROPY_NODE_CANCELLED = 6, ///< Skipped due to parent failure - never ran - ENTROPY_NODE_YIELDED = 7 ///< Suspended execution, will be rescheduled + ENTROPY_NODE_YIELDED = 7 ///< Suspended execution, will be rescheduled } EntropyNodeState; /** @@ -70,9 +71,10 @@ typedef enum EntropyNodeState { * Allows work functions to control their execution flow. Complete means * the work is done, Yield means suspend and reschedule later. */ -typedef enum EntropyWorkResult { - ENTROPY_WORK_COMPLETE = 0, ///< Work is done, proceed to completion - ENTROPY_WORK_YIELD = 1 ///< Suspend and reschedule for later execution +typedef enum EntropyWorkResult +{ + ENTROPY_WORK_COMPLETE = 0, ///< Work is done, proceed to completion + ENTROPY_WORK_YIELD = 1 ///< Suspend and reschedule for later execution } EntropyWorkResult; // ============================================================================ @@ -137,15 +139,16 @@ typedef EntropyWorkResult (*EntropyYieldableWorkCallback)(void* user_data); * * Controls optional features and tuning parameters. */ -typedef struct EntropyWorkGraphConfig { - EntropyBool enable_events; ///< Enable event bus for monitoring - EntropyBool enable_state_manager; ///< Enable advanced state management - EntropyBool enable_advanced_scheduling; ///< Enable priority queues, affinity - size_t expected_node_count; ///< Expected nodes (for pre-allocation) - size_t max_deferred_nodes; ///< Maximum deferred queue size (0 = unlimited) - size_t max_deferred_processing_iterations; ///< Max iterations when processing deferred - EntropyBool enable_debug_logging; ///< Enable verbose debug output - EntropyBool enable_debug_registration; ///< Make graph visible in debug tools +typedef struct EntropyWorkGraphConfig +{ + EntropyBool enable_events; ///< Enable event bus for monitoring + EntropyBool enable_state_manager; ///< Enable advanced state management + EntropyBool enable_advanced_scheduling; ///< Enable priority queues, affinity + size_t expected_node_count; ///< Expected nodes (for pre-allocation) + size_t max_deferred_nodes; ///< Maximum deferred queue size (0 = unlimited) + size_t max_deferred_processing_iterations; ///< Max iterations when processing deferred + EntropyBool enable_debug_logging; ///< Enable verbose debug output + EntropyBool enable_debug_registration; ///< Make graph visible in debug tools } EntropyWorkGraphConfig; /** @@ -153,11 +156,12 @@ typedef struct EntropyWorkGraphConfig { * * Provides detailed statistics about graph execution after wait() completes. */ -typedef struct EntropyWaitResult { - EntropyBool all_completed; ///< True only if every single node succeeded - uint32_t dropped_count; ///< Nodes we couldn't schedule (queue overflow) - uint32_t failed_count; ///< Nodes that threw exceptions - uint32_t completed_count; ///< Nodes that ran successfully +typedef struct EntropyWaitResult +{ + EntropyBool all_completed; ///< True only if every single node succeeded + uint32_t dropped_count; ///< Nodes we couldn't schedule (queue overflow) + uint32_t failed_count; ///< Nodes that threw exceptions + uint32_t completed_count; ///< Nodes that ran successfully } EntropyWaitResult; /** @@ -165,16 +169,17 @@ typedef struct EntropyWaitResult { * * Provides a consistent view of graph execution state. */ -typedef struct EntropyWorkGraphStats { - uint32_t total_nodes; ///< Total number of nodes in the graph - uint32_t completed_nodes; ///< Successfully finished nodes - uint32_t failed_nodes; ///< Nodes that threw exceptions - uint32_t cancelled_nodes; ///< Nodes skipped due to parent failure - uint32_t pending_nodes; ///< Waiting for dependencies - uint32_t ready_nodes; ///< Ready but not yet scheduled - uint32_t scheduled_nodes; ///< In the work queue - uint32_t executing_nodes; ///< Currently running - size_t memory_usage; ///< Approximate memory consumption in bytes +typedef struct EntropyWorkGraphStats +{ + uint32_t total_nodes; ///< Total number of nodes in the graph + uint32_t completed_nodes; ///< Successfully finished nodes + uint32_t failed_nodes; ///< Nodes that threw exceptions + uint32_t cancelled_nodes; ///< Nodes skipped due to parent failure + uint32_t pending_nodes; ///< Waiting for dependencies + uint32_t ready_nodes; ///< Ready but not yet scheduled + uint32_t scheduled_nodes; ///< In the work queue + uint32_t executing_nodes; ///< Currently running + size_t memory_usage; ///< Approximate memory consumption in bytes } EntropyWorkGraphStats; // ============================================================================ @@ -238,10 +243,7 @@ ENTROPY_API const char* entropy_work_result_to_string(EntropyWorkResult result); * } * @endcode */ -ENTROPY_API entropy_WorkGraph entropy_work_graph_create( - entropy_WorkContractGroup work_group, - EntropyStatus* status -); +ENTROPY_API entropy_WorkGraph entropy_work_graph_create(entropy_WorkContractGroup work_group, EntropyStatus* status); /** * @brief Creates a work graph with custom configuration @@ -265,11 +267,9 @@ ENTROPY_API entropy_WorkGraph entropy_work_graph_create( * ); * @endcode */ -ENTROPY_API entropy_WorkGraph entropy_work_graph_create_with_config( - entropy_WorkContractGroup work_group, - const EntropyWorkGraphConfig* config, - EntropyStatus* status -); +ENTROPY_API entropy_WorkGraph entropy_work_graph_create_with_config(entropy_WorkContractGroup work_group, + const EntropyWorkGraphConfig* config, + EntropyStatus* status); /** * @brief Destroys a work graph and frees resources @@ -284,9 +284,7 @@ ENTROPY_API entropy_WorkGraph entropy_work_graph_create_with_config( * @threadsafety Thread-safe * @ownership Frees the graph */ -ENTROPY_API void entropy_work_graph_destroy( - entropy_WorkGraph graph -); +ENTROPY_API void entropy_work_graph_destroy(entropy_WorkGraph graph); // ============================================================================ // Node Creation @@ -322,14 +320,9 @@ ENTROPY_API void entropy_work_graph_destroy( * ); * @endcode */ -ENTROPY_API entropy_NodeHandle entropy_work_graph_add_node( - entropy_WorkGraph graph, - EntropyWorkCallback callback, - void* user_data, - const char* name, - EntropyExecutionType execution_type, - EntropyStatus* status -); +ENTROPY_API entropy_NodeHandle entropy_work_graph_add_node(entropy_WorkGraph graph, EntropyWorkCallback callback, + void* user_data, const char* name, + EntropyExecutionType execution_type, EntropyStatus* status); /** * @brief Adds a yieldable task that can suspend and resume execution @@ -364,15 +357,11 @@ ENTROPY_API entropy_NodeHandle entropy_work_graph_add_node( * ); * @endcode */ -ENTROPY_API entropy_NodeHandle entropy_work_graph_add_yieldable_node( - entropy_WorkGraph graph, - EntropyYieldableWorkCallback callback, - void* user_data, - const char* name, - EntropyExecutionType execution_type, - uint32_t max_reschedules, - EntropyStatus* status -); +ENTROPY_API entropy_NodeHandle entropy_work_graph_add_yieldable_node(entropy_WorkGraph graph, + EntropyYieldableWorkCallback callback, + void* user_data, const char* name, + EntropyExecutionType execution_type, + uint32_t max_reschedules, EntropyStatus* status); // ============================================================================ // Dependency Management @@ -401,12 +390,8 @@ ENTROPY_API entropy_NodeHandle entropy_work_graph_add_yieldable_node( * entropy_work_graph_add_dependency(graph, B, C, &status); // C waits for B * @endcode */ -ENTROPY_API void entropy_work_graph_add_dependency( - entropy_WorkGraph graph, - entropy_NodeHandle from, - entropy_NodeHandle to, - EntropyStatus* status -); +ENTROPY_API void entropy_work_graph_add_dependency(entropy_WorkGraph graph, entropy_NodeHandle from, + entropy_NodeHandle to, EntropyStatus* status); // ============================================================================ // Execution Control @@ -428,10 +413,7 @@ ENTROPY_API void entropy_work_graph_add_dependency( * // Graph is now running in the background * @endcode */ -ENTROPY_API void entropy_work_graph_execute( - entropy_WorkGraph graph, - EntropyStatus* status -); +ENTROPY_API void entropy_work_graph_execute(entropy_WorkGraph graph, EntropyStatus* status); /** * @brief Suspend graph execution - no new nodes will be scheduled @@ -450,10 +432,7 @@ ENTROPY_API void entropy_work_graph_execute( * entropy_work_graph_resume(graph, &status); * @endcode */ -ENTROPY_API void entropy_work_graph_suspend( - entropy_WorkGraph graph, - EntropyStatus* status -); +ENTROPY_API void entropy_work_graph_suspend(entropy_WorkGraph graph, EntropyStatus* status); /** * @brief Resume graph execution after suspension @@ -466,10 +445,7 @@ ENTROPY_API void entropy_work_graph_suspend( * * @threadsafety Thread-safe */ -ENTROPY_API void entropy_work_graph_resume( - entropy_WorkGraph graph, - EntropyStatus* status -); +ENTROPY_API void entropy_work_graph_resume(entropy_WorkGraph graph, EntropyStatus* status); /** * @brief Check if the graph is currently suspended @@ -479,9 +455,7 @@ ENTROPY_API void entropy_work_graph_resume( * * @threadsafety Thread-safe */ -ENTROPY_API EntropyBool entropy_work_graph_is_suspended( - entropy_WorkGraph graph -); +ENTROPY_API EntropyBool entropy_work_graph_is_suspended(entropy_WorkGraph graph); /** * @brief Wait for entire workflow to finish @@ -505,11 +479,7 @@ ENTROPY_API EntropyBool entropy_work_graph_is_suspended( * } * @endcode */ -ENTROPY_API void entropy_work_graph_wait( - entropy_WorkGraph graph, - EntropyWaitResult* result, - EntropyStatus* status -); +ENTROPY_API void entropy_work_graph_wait(entropy_WorkGraph graph, EntropyWaitResult* result, EntropyStatus* status); /** * @brief Quick non-blocking check if workflow is done @@ -528,9 +498,7 @@ ENTROPY_API void entropy_work_graph_wait( * } * @endcode */ -ENTROPY_API EntropyBool entropy_work_graph_is_complete( - entropy_WorkGraph graph -); +ENTROPY_API EntropyBool entropy_work_graph_is_complete(entropy_WorkGraph graph); // ============================================================================ // Statistics and Monitoring @@ -556,11 +524,8 @@ ENTROPY_API EntropyBool entropy_work_graph_is_complete( * progress, stats.completed_nodes, stats.total_nodes); * @endcode */ -ENTROPY_API void entropy_work_graph_get_stats( - entropy_WorkGraph graph, - EntropyWorkGraphStats* stats, - EntropyStatus* status -); +ENTROPY_API void entropy_work_graph_get_stats(entropy_WorkGraph graph, EntropyWorkGraphStats* stats, + EntropyStatus* status); /** * @brief Get the number of nodes that haven't reached terminal state @@ -570,9 +535,7 @@ ENTROPY_API void entropy_work_graph_get_stats( * * @threadsafety Thread-safe */ -ENTROPY_API uint32_t entropy_work_graph_get_pending_count( - entropy_WorkGraph graph -); +ENTROPY_API uint32_t entropy_work_graph_get_pending_count(entropy_WorkGraph graph); // ============================================================================ // Node Handle Operations @@ -587,10 +550,7 @@ ENTROPY_API uint32_t entropy_work_graph_get_pending_count( * * @threadsafety Thread-safe */ -ENTROPY_API EntropyBool entropy_node_handle_is_valid( - entropy_WorkGraph graph, - entropy_NodeHandle handle -); +ENTROPY_API EntropyBool entropy_node_handle_is_valid(entropy_WorkGraph graph, entropy_NodeHandle handle); /** * @brief Get the state of a node @@ -602,11 +562,8 @@ ENTROPY_API EntropyBool entropy_node_handle_is_valid( * * @threadsafety Thread-safe */ -ENTROPY_API EntropyNodeState entropy_node_handle_get_state( - entropy_WorkGraph graph, - entropy_NodeHandle handle, - EntropyStatus* status -); +ENTROPY_API EntropyNodeState entropy_node_handle_get_state(entropy_WorkGraph graph, entropy_NodeHandle handle, + EntropyStatus* status); /** * @brief Get the debug name of a node @@ -619,10 +576,7 @@ ENTROPY_API EntropyNodeState entropy_node_handle_get_state( * * @threadsafety Thread-safe */ -ENTROPY_API const char* entropy_node_handle_get_name( - entropy_WorkGraph graph, - entropy_NodeHandle handle -); +ENTROPY_API const char* entropy_node_handle_get_name(entropy_WorkGraph graph, entropy_NodeHandle handle); /** * @brief Destroy a node handle @@ -634,9 +588,7 @@ ENTROPY_API const char* entropy_node_handle_get_name( * * @threadsafety Thread-safe */ -ENTROPY_API void entropy_node_handle_destroy( - entropy_NodeHandle handle -); +ENTROPY_API void entropy_node_handle_destroy(entropy_NodeHandle handle); #ifdef __cplusplus } diff --git a/include/entropy/entropy_work_service.h b/include/entropy/entropy_work_service.h index 745b69a..31952af 100644 --- a/include/entropy/entropy_work_service.h +++ b/include/entropy/entropy_work_service.h @@ -73,10 +73,8 @@ extern "C" { * } * @endcode */ -ENTROPY_API entropy_WorkService entropy_work_service_create( - const EntropyWorkServiceConfig* config, - EntropyStatus* status -); +ENTROPY_API entropy_WorkService entropy_work_service_create(const EntropyWorkServiceConfig* config, + EntropyStatus* status); /** * @brief Destroys a work service and cleans up all resources @@ -95,9 +93,7 @@ ENTROPY_API entropy_WorkService entropy_work_service_create( * entropy_work_service_destroy(service); * @endcode */ -ENTROPY_API void entropy_work_service_destroy( - entropy_WorkService service -); +ENTROPY_API void entropy_work_service_destroy(entropy_WorkService service); // ============================================================================ // Service Control @@ -123,10 +119,7 @@ ENTROPY_API void entropy_work_service_destroy( * } * @endcode */ -ENTROPY_API void entropy_work_service_start( - entropy_WorkService service, - EntropyStatus* status -); +ENTROPY_API void entropy_work_service_start(entropy_WorkService service, EntropyStatus* status); /** * @brief Signals all worker threads to stop (non-blocking) @@ -144,10 +137,7 @@ ENTROPY_API void entropy_work_service_start( * entropy_work_service_request_stop(service, &status); // Non-blocking * @endcode */ -ENTROPY_API void entropy_work_service_request_stop( - entropy_WorkService service, - EntropyStatus* status -); +ENTROPY_API void entropy_work_service_request_stop(entropy_WorkService service, EntropyStatus* status); /** * @brief Waits for all worker threads to finish (blocking) @@ -167,10 +157,7 @@ ENTROPY_API void entropy_work_service_request_stop( * entropy_work_service_wait_for_stop(service, &status); // Wait for completion * @endcode */ -ENTROPY_API void entropy_work_service_wait_for_stop( - entropy_WorkService service, - EntropyStatus* status -); +ENTROPY_API void entropy_work_service_wait_for_stop(entropy_WorkService service, EntropyStatus* status); /** * @brief Stops all worker threads and waits for them to finish @@ -189,10 +176,7 @@ ENTROPY_API void entropy_work_service_wait_for_stop( * entropy_work_service_stop(service, &status); // Stop and wait (blocking) * @endcode */ -ENTROPY_API void entropy_work_service_stop( - entropy_WorkService service, - EntropyStatus* status -); +ENTROPY_API void entropy_work_service_stop(entropy_WorkService service, EntropyStatus* status); /** * @brief Checks if the service is currently running @@ -208,9 +192,7 @@ ENTROPY_API void entropy_work_service_stop( * } * @endcode */ -ENTROPY_API EntropyBool entropy_work_service_is_running( - entropy_WorkService service -); +ENTROPY_API EntropyBool entropy_work_service_is_running(entropy_WorkService service); // ============================================================================ // Group Management @@ -240,11 +222,8 @@ ENTROPY_API EntropyBool entropy_work_service_is_running( * } * @endcode */ -ENTROPY_API void entropy_work_service_add_group( - entropy_WorkService service, - entropy_WorkContractGroup group, - EntropyStatus* status -); +ENTROPY_API void entropy_work_service_add_group(entropy_WorkService service, entropy_WorkContractGroup group, + EntropyStatus* status); /** * @brief Unregisters a work group from the service @@ -266,11 +245,8 @@ ENTROPY_API void entropy_work_service_add_group( * entropy_work_service_remove_group(service, physics_group, &status); * @endcode */ -ENTROPY_API void entropy_work_service_remove_group( - entropy_WorkService service, - entropy_WorkContractGroup group, - EntropyStatus* status -); +ENTROPY_API void entropy_work_service_remove_group(entropy_WorkService service, entropy_WorkContractGroup group, + EntropyStatus* status); /** * @brief Removes all registered work groups (only when stopped) @@ -291,10 +267,7 @@ ENTROPY_API void entropy_work_service_remove_group( * // Re-add groups and restart... * @endcode */ -ENTROPY_API void entropy_work_service_clear( - entropy_WorkService service, - EntropyStatus* status -); +ENTROPY_API void entropy_work_service_clear(entropy_WorkService service, EntropyStatus* status); // ============================================================================ // Service Statistics @@ -315,9 +288,7 @@ ENTROPY_API void entropy_work_service_clear( * printf("Managing %zu work groups\n", count); * @endcode */ -ENTROPY_API size_t entropy_work_service_get_group_count( - entropy_WorkService service -); +ENTROPY_API size_t entropy_work_service_get_group_count(entropy_WorkService service); /** * @brief Gets the current thread count @@ -334,9 +305,7 @@ ENTROPY_API size_t entropy_work_service_get_group_count( * printf("Running with %zu worker threads\n", threads); * @endcode */ -ENTROPY_API size_t entropy_work_service_get_thread_count( - entropy_WorkService service -); +ENTROPY_API size_t entropy_work_service_get_thread_count(entropy_WorkService service); // ============================================================================ // Main Thread Work Execution @@ -376,12 +345,9 @@ ENTROPY_API size_t entropy_work_service_get_thread_count( * } * @endcode */ -ENTROPY_API void entropy_work_service_execute_main_thread_work( - entropy_WorkService service, - size_t max_contracts, - EntropyMainThreadWorkResult* result, - EntropyStatus* status -); +ENTROPY_API void entropy_work_service_execute_main_thread_work(entropy_WorkService service, size_t max_contracts, + EntropyMainThreadWorkResult* result, + EntropyStatus* status); /** * @brief Execute main thread work from a specific group @@ -408,12 +374,10 @@ ENTROPY_API void entropy_work_service_execute_main_thread_work( * ); * @endcode */ -ENTROPY_API size_t entropy_work_service_execute_main_thread_work_from_group( - entropy_WorkService service, - entropy_WorkContractGroup group, - size_t max_contracts, - EntropyStatus* status -); +ENTROPY_API size_t entropy_work_service_execute_main_thread_work_from_group(entropy_WorkService service, + entropy_WorkContractGroup group, + size_t max_contracts, + EntropyStatus* status); /** * @brief Check if any registered group has main thread work available @@ -436,9 +400,7 @@ ENTROPY_API size_t entropy_work_service_execute_main_thread_work_from_group( * } * @endcode */ -ENTROPY_API EntropyBool entropy_work_service_has_main_thread_work( - entropy_WorkService service -); +ENTROPY_API EntropyBool entropy_work_service_has_main_thread_work(entropy_WorkService service); #ifdef __cplusplus } diff --git a/include/entropy/entropy_write_batch.h b/include/entropy/entropy_write_batch.h index ff58a4e..b04a88e 100644 --- a/include/entropy/entropy_write_batch.h +++ b/include/entropy/entropy_write_batch.h @@ -9,8 +9,8 @@ * modifications without repeatedly reading and writing the entire file. */ -#include "entropy/entropy_vfs_types.h" #include "entropy/entropy_file_operation_handle.h" +#include "entropy/entropy_vfs_types.h" #ifdef __cplusplus extern "C" { @@ -56,12 +56,8 @@ ENTROPY_API void entropy_write_batch_destroy(entropy_WriteBatch batch); * entropy_write_batch_destroy(batch); * @endcode */ -ENTROPY_API void entropy_write_batch_write_line( - entropy_WriteBatch batch, - size_t line_number, - const char* content, - EntropyStatus* status -); +ENTROPY_API void entropy_write_batch_write_line(entropy_WriteBatch batch, size_t line_number, const char* content, + EntropyStatus* status); /** * @brief Insert a line at index (shifts existing lines down) @@ -74,12 +70,8 @@ ENTROPY_API void entropy_write_batch_write_line( * @param status Error reporting (required) * @threadsafety NOT thread-safe - do not use batch concurrently */ -ENTROPY_API void entropy_write_batch_insert_line( - entropy_WriteBatch batch, - size_t line_number, - const char* content, - EntropyStatus* status -); +ENTROPY_API void entropy_write_batch_insert_line(entropy_WriteBatch batch, size_t line_number, const char* content, + EntropyStatus* status); /** * @brief Delete a line at index (shifts remaining lines up) @@ -91,11 +83,7 @@ ENTROPY_API void entropy_write_batch_insert_line( * @param status Error reporting (required) * @threadsafety NOT thread-safe - do not use batch concurrently */ -ENTROPY_API void entropy_write_batch_delete_line( - entropy_WriteBatch batch, - size_t line_number, - EntropyStatus* status -); +ENTROPY_API void entropy_write_batch_delete_line(entropy_WriteBatch batch, size_t line_number, EntropyStatus* status); /** * @brief Append a line to the end of the file @@ -107,11 +95,7 @@ ENTROPY_API void entropy_write_batch_delete_line( * @param status Error reporting (required) * @threadsafety NOT thread-safe - do not use batch concurrently */ -ENTROPY_API void entropy_write_batch_append_line( - entropy_WriteBatch batch, - const char* content, - EntropyStatus* status -); +ENTROPY_API void entropy_write_batch_append_line(entropy_WriteBatch batch, const char* content, EntropyStatus* status); /** * @brief Replace entire file content with text @@ -123,11 +107,7 @@ ENTROPY_API void entropy_write_batch_append_line( * @param status Error reporting (required) * @threadsafety NOT thread-safe - do not use batch concurrently */ -ENTROPY_API void entropy_write_batch_replace_all( - entropy_WriteBatch batch, - const char* content, - EntropyStatus* status -); +ENTROPY_API void entropy_write_batch_replace_all(entropy_WriteBatch batch, const char* content, EntropyStatus* status); /** * @brief Clear the file (truncate to zero length) @@ -138,10 +118,7 @@ ENTROPY_API void entropy_write_batch_replace_all( * @param status Error reporting (required) * @threadsafety NOT thread-safe - do not use batch concurrently */ -ENTROPY_API void entropy_write_batch_clear( - entropy_WriteBatch batch, - EntropyStatus* status -); +ENTROPY_API void entropy_write_batch_clear(entropy_WriteBatch batch, EntropyStatus* status); /* ============================================================================ * Execution @@ -172,10 +149,7 @@ ENTROPY_API void entropy_write_batch_clear( * entropy_write_batch_destroy(batch); * @endcode */ -ENTROPY_API entropy_FileOperationHandle entropy_write_batch_commit( - entropy_WriteBatch batch, - EntropyStatus* status -); +ENTROPY_API entropy_FileOperationHandle entropy_write_batch_commit(entropy_WriteBatch batch, EntropyStatus* status); /** * @brief Apply all pending operations atomically with options @@ -187,11 +161,9 @@ ENTROPY_API entropy_FileOperationHandle entropy_write_batch_commit( * @threadsafety NOT thread-safe - do not use batch concurrently * @ownership Returns owned pointer - must call entropy_file_operation_handle_destroy() */ -ENTROPY_API entropy_FileOperationHandle entropy_write_batch_commit_with_options( - entropy_WriteBatch batch, - const EntropyWriteOptions* options, - EntropyStatus* status -); +ENTROPY_API entropy_FileOperationHandle entropy_write_batch_commit_with_options(entropy_WriteBatch batch, + const EntropyWriteOptions* options, + EntropyStatus* status); /** * @brief Build the resulting content without writing it @@ -205,10 +177,7 @@ ENTROPY_API entropy_FileOperationHandle entropy_write_batch_commit_with_options( * @threadsafety NOT thread-safe - do not use batch concurrently * @ownership Returns owned pointer - must call entropy_file_operation_handle_destroy() */ -ENTROPY_API entropy_FileOperationHandle entropy_write_batch_preview( - entropy_WriteBatch batch, - EntropyStatus* status -); +ENTROPY_API entropy_FileOperationHandle entropy_write_batch_preview(entropy_WriteBatch batch, EntropyStatus* status); /* ============================================================================ * Query and Management @@ -222,10 +191,7 @@ ENTROPY_API entropy_FileOperationHandle entropy_write_batch_preview( * @return Number of operations queued, or 0 on error * @threadsafety Thread-safe */ -ENTROPY_API size_t entropy_write_batch_pending_operations( - entropy_WriteBatch batch, - EntropyStatus* status -); +ENTROPY_API size_t entropy_write_batch_pending_operations(entropy_WriteBatch batch, EntropyStatus* status); /** * @brief Check if the batch is empty @@ -235,10 +201,7 @@ ENTROPY_API size_t entropy_write_batch_pending_operations( * @return True if no operations pending, false otherwise * @threadsafety Thread-safe */ -ENTROPY_API EntropyBool entropy_write_batch_is_empty( - entropy_WriteBatch batch, - EntropyStatus* status -); +ENTROPY_API EntropyBool entropy_write_batch_is_empty(entropy_WriteBatch batch, EntropyStatus* status); /** * @brief Clear all pending operations without writing @@ -249,10 +212,7 @@ ENTROPY_API EntropyBool entropy_write_batch_is_empty( * @param status Error reporting (required) * @threadsafety NOT thread-safe - do not use batch concurrently */ -ENTROPY_API void entropy_write_batch_reset( - entropy_WriteBatch batch, - EntropyStatus* status -); +ENTROPY_API void entropy_write_batch_reset(entropy_WriteBatch batch, EntropyStatus* status); /** * @brief Get the target file path for this batch @@ -265,11 +225,8 @@ ENTROPY_API void entropy_write_batch_reset( * @threadsafety Thread-safe * @ownership Returns borrowed pointer - do NOT free */ -ENTROPY_API const char* entropy_write_batch_get_path( - entropy_WriteBatch batch, - EntropyStatus* status -); +ENTROPY_API const char* entropy_write_batch_get_path(entropy_WriteBatch batch, EntropyStatus* status); #ifdef __cplusplus -} // extern "C" +} // extern "C" #endif diff --git a/src/Concurrency/AdaptiveRankingScheduler.cpp b/src/Concurrency/AdaptiveRankingScheduler.cpp index 4a750aa..666b57a 100644 --- a/src/Concurrency/AdaptiveRankingScheduler.cpp +++ b/src/Concurrency/AdaptiveRankingScheduler.cpp @@ -8,25 +8,26 @@ */ #include "AdaptiveRankingScheduler.h" -#include "WorkContractGroup.h" + #include #include -namespace EntropyEngine { -namespace Core { -namespace Concurrency { +#include "WorkContractGroup.h" + +namespace EntropyEngine +{ +namespace Core +{ +namespace Concurrency +{ // Thread-local state definition thread_local AdaptiveRankingScheduler::ThreadState AdaptiveRankingScheduler::stThreadState; -AdaptiveRankingScheduler::AdaptiveRankingScheduler(const Config& config) - : _config(config) { -} +AdaptiveRankingScheduler::AdaptiveRankingScheduler(const Config& config) : _config(config) {} -IWorkScheduler::ScheduleResult AdaptiveRankingScheduler::selectNextGroup( - const std::vector& groups, - const SchedulingContext& context -) { +IWorkScheduler::ScheduleResult AdaptiveRankingScheduler::selectNextGroup(const std::vector& groups, + const SchedulingContext& context) { // Phase 1: Try to execute from the current sticky group for cache locality if (stThreadState.consecutiveExecutionCount < _config.maxConsecutiveExecutionCount) { WorkContractGroup* stickyGroup = getCurrentGroupIfValid(); @@ -34,21 +35,21 @@ IWorkScheduler::ScheduleResult AdaptiveRankingScheduler::selectNextGroup( return {stickyGroup, false}; } } - + // Phase 2: Sticky state is broken. Find a new work plan stThreadState.consecutiveExecutionCount = 0; - + if (needsRankingUpdate(groups)) { updateRankings(groups); } - + // Phase 3: Execute the new work plan WorkContractGroup* selectedGroup = executeWorkPlan(groups); - + if (selectedGroup) { return {selectedGroup, false}; } - + // No work found anywhere return {nullptr, true}; } @@ -73,67 +74,64 @@ bool AdaptiveRankingScheduler::needsRankingUpdate(const std::vector= _config.updateCycleInterval) { return true; } - + // 4. Current sticky group has no more work WorkContractGroup* currentGroup = getCurrentGroupIfValid(); if (currentGroup && currentGroup->scheduledCount() == 0) { return true; } - + return false; } void AdaptiveRankingScheduler::updateRankings(const std::vector& groups) { std::vector rankings; - + // Calculate rankings for each group for (auto* group : groups) { if (!group) continue; - + size_t scheduled = group->scheduledCount(); - if (scheduled == 0) continue; // Skip groups with no work - + if (scheduled == 0) continue; // Skip groups with no work + size_t executing = group->executingCount(); - + // Using the SRS formula with floating point math double executionCountF = static_cast(executing) + 1.0; double scheduleCountF = static_cast(scheduled); double threadCountF = static_cast(_config.threadCount); - + double threadPenalty = 1.0 - (executionCountF / threadCountF); double rank = (scheduleCountF / executionCountF) * threadPenalty; - + rankings.push_back({group, rank}); } - + // Sort by rank (highest first) - std::sort(rankings.begin(), rankings.end(), - [](const GroupRank& a, const GroupRank& b) { - return a.rank > b.rank; - }); - + std::sort(rankings.begin(), rankings.end(), [](const GroupRank& a, const GroupRank& b) { return a.rank > b.rank; }); + // Update cached ordered groups stThreadState.rankedGroups.clear(); stThreadState.rankedGroups.reserve(rankings.size()); for (const auto& r : rankings) { stThreadState.rankedGroups.push_back(r.group); } - + // Reset update counter and current sticky group index stThreadState.rankingUpdateCounter = 0; stThreadState.currentGroupIndex = 0; - + // Record the generation we've seen stThreadState.lastSeenGeneration = _groupsGeneration.load(std::memory_order_relaxed); } @@ -143,11 +141,11 @@ WorkContractGroup* AdaptiveRankingScheduler::executeWorkPlan(const std::vectorscheduledCount() > 0) { // Success! We found work // Set this as our new sticky group for the next loop @@ -157,7 +155,7 @@ WorkContractGroup* AdaptiveRankingScheduler::executeWorkPlan(const std::vector #include -namespace EntropyEngine { -namespace Core { -namespace Concurrency { +#include "IWorkScheduler.h" + +namespace EntropyEngine +{ +namespace Core +{ +namespace Concurrency +{ /** * @brief Adaptive scheduler that learns from workload patterns to optimize distribution. - * + * * The AdaptiveRankingScheduler serves as the default scheduler implementation. It maintains * thread affinity for cache locality while preventing any single group from monopolizing * thread resources. The scheduler functions as an adaptive load balancer that responds * dynamically to changing work patterns. - * + * * The ranking algorithm: `rank = (scheduledWork / (executingWork + 1)) * (1 - executingWork / totalThreads)` - * + * * This formula produces the following behavior: * - Groups with high work volume but few threads receive maximum priority * - Groups with existing thread allocation receive proportionally lower priority * - Groups consuming excessive thread resources relative to total threads are penalized * - Groups without pending work are excluded from consideration - * + * * Thread affinity mechanism: Threads maintain affinity to their selected group for up to * maxConsecutiveExecutionCount executions. Threads relinquish affinity when the group * exhausts work or after reaching the consecutive execution limit. - * + * * Each thread maintains an independent view of group rankings through thread-local * caching, updating only when necessary to minimize synchronization. - * + * * Recommended use cases: Optimal for heterogeneous workloads where groups exhibit varying * work volumes or when work patterns change dynamically during execution. - * + * * Not recommended when: All groups maintain equal work distribution consistently, or when * strict round-robin fairness is required. Consider RoundRobinScheduler for these scenarios. - * + * * @code * // Configure for shorter sticky periods (more responsive to work changes) * IWorkScheduler::Config config; * config.maxConsecutiveExecutionCount = 4; // Default is 8 * config.updateCycleInterval = 8; // Update rankings more often - * + * * auto scheduler = std::make_unique(config); * WorkService service(wsConfig, std::move(scheduler)); * @endcode */ -class AdaptiveRankingScheduler : public IWorkScheduler { +class AdaptiveRankingScheduler : public IWorkScheduler +{ private: Config _config; - + /** * @brief Per-thread state for adaptive scheduling. - * + * * This structure enables scheduling by maintaining thread-local * copies of rankings and affinity state. The design eliminates locks, atomics, and * contention between threads. Synchronization occurs only when the group list changes * or during periodic rebalancing operations. - * + * * The rankedGroups vector represents each thread's independent priority ordering. * Rankings update based on thread-local observations, which may differ slightly * between threads. This variance helps prevent thundering herd effects during * work distribution. */ - struct ThreadState { - size_t currentGroupIndex = 0; ///< Current position in rankedGroups (thread affinity position) - size_t consecutiveExecutionCount = 0; ///< Number of consecutive executions on current group - size_t rankingUpdateCounter = 0; ///< Counts work done since last ranking update - std::vector rankedGroups; ///< Thread-local group priority ordering - uint64_t lastSeenGeneration = 0; ///< Generation counter for detecting group list changes - + struct ThreadState + { + size_t currentGroupIndex = 0; ///< Current position in rankedGroups (thread affinity position) + size_t consecutiveExecutionCount = 0; ///< Number of consecutive executions on current group + size_t rankingUpdateCounter = 0; ///< Counts work done since last ranking update + std::vector rankedGroups; ///< Thread-local group priority ordering + uint64_t lastSeenGeneration = 0; ///< Generation counter for detecting group list changes + void reset() { currentGroupIndex = 0; consecutiveExecutionCount = 0; @@ -96,7 +102,7 @@ class AdaptiveRankingScheduler : public IWorkScheduler { lastSeenGeneration = 0; } }; - + /// Thread-local state for adaptive scheduling algorithm /// Each worker thread maintains its own cached rankings, sticky position, and /// update counters to enable lock-free scheduling decisions. This eliminates @@ -104,124 +110,124 @@ class AdaptiveRankingScheduler : public IWorkScheduler { /// patterns it observes. Thread-local because each thread needs independent /// scheduling state to maintain the lock-free property. static thread_local ThreadState stThreadState; - + // Generation counter for detecting group list changes std::atomic _groupsGeneration{0}; - + /** * @brief Group ranking data for sorting. - * + * * Simple struct used when computing rankings. We calculate a rank score * for each group and sort by it. Higher rank = higher priority. */ - struct GroupRank { + struct GroupRank + { WorkContractGroup* group; double rank; }; - + public: /** * @brief Constructs adaptive ranking scheduler with given configuration - * + * * Key parameters: maxConsecutiveExecutionCount (thread stickiness), * updateCycleInterval (ranking refresh rate). - * + * * @param config Scheduler configuration */ explicit AdaptiveRankingScheduler(const Config& config); - + ~AdaptiveRankingScheduler() override = default; - + /** * @brief Selects next group using adaptive ranking algorithm - * + * * Checks current affinity group first, then traverses ranked list. * Recomputes rankings when stale. - * + * * @param groups Available work groups - * @param context Current thread context + * @param context Current thread context * @return Selected group or nullptr if no work available */ - ScheduleResult selectNextGroup( - const std::vector& groups, - const SchedulingContext& context - ) override; - + ScheduleResult selectNextGroup(const std::vector& groups, + const SchedulingContext& context) override; + /** * @brief Updates execution counters for affinity tracking - * + * * Tracks consecutive executions to determine when to release affinity. - * + * * @param group Group that work was executed from * @param threadId Thread that executed the work */ void notifyWorkExecuted(WorkContractGroup* group, size_t threadId) override; - + /** * @brief Increments generation counter to invalidate cached rankings - * + * * Threads detect generation change and update rankings. Lock-free consistency. - * + * * @param newGroups Updated group list */ void notifyGroupsChanged(const std::vector& newGroups) override; - + /** * @brief Resets all state including thread-local data - * + * * Only resets calling thread. Others reset lazily on next schedule. */ void reset() override; - + /** * @brief Returns "AdaptiveRanking" */ - const char* getName() const override { return "AdaptiveRanking"; } - + const char* getName() const override { + return "AdaptiveRanking"; + } + private: /** * @brief Checks if current thread needs to update its rankings - * + * * Updates when: cache empty, groups changed, interval reached, or * current group has no work. Balances responsiveness with overhead. - * + * * @param groups Current group list * @return true if rankings should be recomputed */ bool needsRankingUpdate(const std::vector& groups) const; - + /** * @brief Updates thread-local ranking of groups based on work pressure - * + * * Ranks by work-to-thread ratio. Excludes groups without work. * Different threads may have slightly different rankings - intentional. - * + * * @param groups Groups to rank */ void updateRankings(const std::vector& groups); - + /** * @brief Executes cascading work plan through ranked groups - * + * * Starts at affinity position, traverses ranked list. Updates affinity * on work discovery. Guarantees finding work if any exists. - * + * * @param groups Available groups (for validation) * @return Group with available work or nullptr */ WorkContractGroup* executeWorkPlan(const std::vector& groups); - + /** * @brief Gets current affinity group if index is still valid - * + * * Bounds-checked access to current affinity group. - * + * * @return Current affinity group or nullptr if invalid */ WorkContractGroup* getCurrentGroupIfValid() const; }; -} // namespace Concurrency -} // namespace Core -} // namespace EntropyEngine - +} // namespace Concurrency +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Concurrency/DirectScheduler.h b/src/Concurrency/DirectScheduler.h index bad5295..52a8e34 100644 --- a/src/Concurrency/DirectScheduler.h +++ b/src/Concurrency/DirectScheduler.h @@ -10,7 +10,7 @@ /** * @file DirectScheduler.h * @brief Minimal overhead work scheduler for benchmarking and testing - * + * * This file contains DirectScheduler, a bare-bones scheduler with minimum overhead * for benchmarking and debugging, not production use. */ @@ -18,78 +18,81 @@ #pragma once #include "IWorkScheduler.h" +#include "WorkContractGroup.h" -namespace EntropyEngine { -namespace Core { -namespace Concurrency { +namespace EntropyEngine +{ +namespace Core +{ +namespace Concurrency +{ /** * @brief The "just give me work!" scheduler - absolute minimum overhead. - * - * This is the scheduler equivalent of a greedy algorithm. It scans from the + * + * This is the scheduler equivalent of a greedy algorithm. It scans from the * beginning and grabs the first group with work. No fancy logic, no state, * no optimization. Just pure, simple work-finding. - * + * * This scheduler was created to isolate scheduling logic from other system * overheads in benchmarking scenarios. - * + * * The Good: * - No state means no memory allocation * - Dead simple to understand and debug * - First groups get priority (which might be what you want) - * + * * The Bad: * - Terrible work distribution - first groups get hammered * - No load balancing whatsoever * - Later groups might starve if early groups always have work * - All threads pile onto the same groups - * + * * When to use this: * - Benchmarking to establish absolute minimum overhead * - Debugging to eliminate scheduler as a variable * - When you have only one or two groups anyway * - Testing worst-case contention scenarios - * + * * When NOT to use this: * - Production systems (seriously, don't) * - Multiple groups that need fair execution * - Any time you care about performance beyond raw overhead - * + * * @code * // Only use this for testing! * auto scheduler = std::make_unique(config); * // Now all threads will pile onto group[0] if it has work * @endcode */ -class DirectScheduler : public IWorkScheduler { +class DirectScheduler : public IWorkScheduler +{ public: /** * @brief Constructs the world's simplest scheduler - * + * * Config is ignored - this scheduler needs no configuration. - * + * * @param config Ignored entirely */ explicit DirectScheduler(const Config& config) { // No state to initialize } - + ~DirectScheduler() override = default; - + /** * @brief Finds work by scanning from the start - * + * * Returns first group with work. All threads converge on same group - * bad for performance, good for measuring overhead. - * + * * @param groups Groups to scan (in order) * @param context Completely ignored * @return First group with work, or nullptr */ - ScheduleResult selectNextGroup( - const std::vector& groups, - const SchedulingContext& context - ) override { + ScheduleResult selectNextGroup(const std::vector& groups, + const SchedulingContext& context) override { // Just scan and return first group with work for (auto* group : groups) { if (group && group->scheduledCount() > 0) { @@ -98,14 +101,15 @@ class DirectScheduler : public IWorkScheduler { } return {nullptr, true}; } - + /** * @brief Returns "Direct" */ - const char* getName() const override { return "Direct"; } + const char* getName() const override { + return "Direct"; + } }; -} // namespace Concurrency -} // namespace Core -} // namespace EntropyEngine - +} // namespace Concurrency +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Concurrency/IConcurrencyProvider.h b/src/Concurrency/IConcurrencyProvider.h index 12013f4..fb7180d 100644 --- a/src/Concurrency/IConcurrencyProvider.h +++ b/src/Concurrency/IConcurrencyProvider.h @@ -19,9 +19,12 @@ #pragma once -namespace EntropyEngine { -namespace Core { -namespace Concurrency { +namespace EntropyEngine +{ +namespace Core +{ +namespace Concurrency +{ class WorkContractGroup; @@ -67,7 +70,8 @@ class WorkContractGroup; * }; * @endcode */ -class IConcurrencyProvider { +class IConcurrencyProvider +{ public: virtual ~IConcurrencyProvider() = default; @@ -92,7 +96,7 @@ class IConcurrencyProvider { * @param group The group being destroyed */ virtual void notifyGroupDestroyed(WorkContractGroup* group) = 0; - + /** * @brief Notifies the provider that main thread work may be available * @@ -102,7 +106,7 @@ class IConcurrencyProvider { * differently (e.g., post to UI event queue). * * @param group The group that has new main thread work available (optional) - * + * * @code * void notifyMainThreadWorkAvailable(WorkContractGroup* group) override { * // Post event to UI thread's message queue @@ -116,7 +120,6 @@ class IConcurrencyProvider { } }; -} // Concurrency -} // Core -} // EntropyEngine - +} // namespace Concurrency +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Concurrency/IWorkScheduler.h b/src/Concurrency/IWorkScheduler.h index 6325590..f8cb4bd 100644 --- a/src/Concurrency/IWorkScheduler.h +++ b/src/Concurrency/IWorkScheduler.h @@ -10,57 +10,60 @@ /** * @file IWorkScheduler.h * @brief Abstract interface for pluggable work scheduling strategies - * + * * This file defines the IWorkScheduler interface for pluggable scheduling algorithms. * Separates scheduling logic from thread management. */ #pragma once -#include -#include #include #include +#include +#include -namespace EntropyEngine { -namespace Core { -namespace Concurrency { +namespace EntropyEngine +{ +namespace Core +{ +namespace Concurrency +{ // Forward declaration class WorkContractGroup; /** * @brief Abstract interface for work scheduling strategies in the WorkService. - * + * * IWorkScheduler defines the decision-making component that determines work execution * order. The WorkService manages threading and execution infrastructure while the * scheduler provides the selection logic. This separation enables experimentation with * different scheduling strategies without modifying the core thread management system. - * + * * Implementation rationale: Custom schedulers enable optimizations for specific workload * patterns such as round-robin fairness, priority-based scheduling, or adaptive strategies. * While the default AdaptiveRankingScheduler handles most use cases effectively, specialized * workloads may benefit from simpler implementations with lower overhead or more targeted * scheduling algorithms. - * + * * Thread Safety: Implementations MUST be thread-safe. Multiple worker threads will invoke * selectNextGroup() concurrently, potentially with identical group sets. Utilize atomics, * thread-local storage, or lock-free algorithms to ensure correctness. - * + * * Design Requirements: selectNextGroup() executes within worker thread loops. * Consider the frequency of calls when designing implementations. - * + * * @code * // Example custom scheduler that always picks the group with most work * class GreedyScheduler : public IWorkScheduler { * public: * ScheduleResult selectNextGroup( * const std::vector& groups, - * const SchedulingContext& context) override + * const SchedulingContext& context) override * { * WorkContractGroup* best = nullptr; * size_t maxWork = 0; - * + * * for (auto* group : groups) { * size_t work = group->scheduledCount(); * if (work > maxWork) { @@ -68,75 +71,80 @@ class WorkContractGroup; * best = group; * } * } - * + * * return {best, best == nullptr}; * } - * + * * const char* getName() const override { return "Greedy"; } * }; * @endcode */ -class IWorkScheduler { +class IWorkScheduler +{ public: virtual ~IWorkScheduler() = default; - + /** * @brief Configuration for scheduler behavior. - * + * * Provides common configuration parameters that schedulers may utilize. Schedulers * can use any subset of these parameters or ignore them entirely. The WorkService * passes this configuration through to schedulers without modification. - * + * * For additional configuration requirements, extend this structure or implement * custom configuration mechanisms specific to your scheduler implementation. */ - struct Config { - size_t maxConsecutiveExecutionCount = 8; ///< How many times to execute from same group before switching (prevents starvation) - size_t updateCycleInterval = 16; ///< How often to refresh internal state (for adaptive schedulers) - size_t failureSleepTime = 1; ///< Nanoseconds to sleep when no work found (usually not needed) - size_t threadCount = 0; ///< Number of worker threads (0 = hardware_concurrency) + struct Config + { + size_t maxConsecutiveExecutionCount = + 8; ///< How many times to execute from same group before switching (prevents starvation) + size_t updateCycleInterval = 16; ///< How often to refresh internal state (for adaptive schedulers) + size_t failureSleepTime = 1; ///< Nanoseconds to sleep when no work found (usually not needed) + size_t threadCount = 0; ///< Number of worker threads (0 = hardware_concurrency) }; - + /** * @brief Context passed to scheduler for each scheduling decision. - * + * * Provides thread-local information to enable informed scheduling decisions. * This context supports strategies such as maintaining thread-group affinity * for cache locality, distributing work evenly across threads, or detecting * and addressing thread starvation conditions. - * + * * All fields are maintained by the WorkService and should be treated as read-only. */ - struct SchedulingContext { - size_t threadId; ///< Unique ID for this worker thread (0 to threadCount-1) - size_t consecutiveFailures; ///< How many times in a row we've found no work - WorkContractGroup* lastExecutedGroup; ///< Last group this thread executed from (nullptr on first call) + struct SchedulingContext + { + size_t threadId; ///< Unique ID for this worker thread (0 to threadCount-1) + size_t consecutiveFailures; ///< How many times in a row we've found no work + WorkContractGroup* lastExecutedGroup; ///< Last group this thread executed from (nullptr on first call) }; - + /** * @brief Result of a scheduling decision. - * + * * Encapsulates the scheduler's decision for the worker thread. Indicates either * a selected work group or the absence of available work. The shouldSleep hint * guides the WorkService in choosing between spinning and backing off when no * work is available. */ - struct ScheduleResult { - WorkContractGroup* group; ///< Group to execute from (nullptr = no work available) - bool shouldSleep; ///< Hint: true if thread should sleep vs spin (ignored if group != nullptr) + struct ScheduleResult + { + WorkContractGroup* group; ///< Group to execute from (nullptr = no work available) + bool shouldSleep; ///< Hint: true if thread should sleep vs spin (ignored if group != nullptr) }; - + /** * @brief Selects the next work group for execution - * + * * Core scheduling method called continuously by worker threads. Examines * available groups and selects one for execution. Called frequently - * avoid allocations and complexity. - * + * * @param groups Current snapshot of registered work groups (groups might have no work) * @param context Thread-specific info to help with scheduling decisions * @return ScheduleResult with chosen group (or nullptr if no work found) - * + * * @code * // Simplest possible implementation - just find first group with work * ScheduleResult selectNextGroup(groups, context) override { @@ -149,45 +157,43 @@ class IWorkScheduler { * } * @endcode */ - virtual ScheduleResult selectNextGroup( - const std::vector& groups, - const SchedulingContext& context - ) = 0; - + virtual ScheduleResult selectNextGroup(const std::vector& groups, + const SchedulingContext& context) = 0; + /** * @brief Notifies scheduler that work was successfully executed - * + * * Optional callback for tracking execution patterns. Use for adapting * behavior, balancing load, or maintaining fairness. Default is no-op. - * + * * @param group The group that work was executed from * @param threadId The thread that executed the work */ virtual void notifyWorkExecuted(WorkContractGroup* group, size_t threadId) {} - + /** * @brief Notifies scheduler that the group list has changed - * + * * Called when groups are added/removed. Update cached state if needed. * Runs concurrently with selectNextGroup() - ensure thread safety. - * + * * @param newGroups Updated list of work groups (complete replacement) */ virtual void notifyGroupsChanged(const std::vector& newGroups) {} - + /** * @brief Resets scheduler to initial state - * + * * Clear all accumulated state, statistics, and learned patterns. * Scheduler should behave like newly constructed. Default is no-op. */ virtual void reset() {} - + /** * @brief Gets human-readable name for this scheduling strategy - * + * * Used in logs and debugging. Keep concise and descriptive. - * + * * @return Name of the scheduling algorithm (must be a static string) */ virtual const char* getName() const = 0; @@ -195,25 +201,24 @@ class IWorkScheduler { /** * @brief Factory function type for creating schedulers. - * + * * Enables registration of schedulers by name and dynamic switching between * implementations. The factory receives configuration parameters and returns * a new scheduler instance. - * + * * @code * std::map schedulers = { * {"round-robin", [](auto& cfg) { return std::make_unique(cfg); }}, * {"adaptive", [](auto& cfg) { return std::make_unique(cfg); }}, * {"random", [](auto& cfg) { return std::make_unique(cfg); }} * }; - * + * * // Create scheduler by name * auto scheduler = schedulers["adaptive"](config); * @endcode */ using SchedulerFactory = std::function(const IWorkScheduler::Config&)>; -} // namespace Concurrency -} // namespace Core -} // namespace EntropyEngine - +} // namespace Concurrency +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Concurrency/NodeScheduler.cpp b/src/Concurrency/NodeScheduler.cpp index 962d251..dd157c7 100644 --- a/src/Concurrency/NodeScheduler.cpp +++ b/src/Concurrency/NodeScheduler.cpp @@ -8,20 +8,25 @@ */ #include "NodeScheduler.h" -#include "WorkGraphEvents.h" -#include "WorkGraph.h" -#include "../Logging/Logger.h" + #include #include +#include "../Logging/Logger.h" +#include "WorkGraph.h" +#include "WorkGraphEvents.h" + #ifdef EntropyDarwin -using std::min; using std::max; +using std::min; #endif -namespace EntropyEngine { -namespace Core { -namespace Concurrency { +namespace EntropyEngine +{ +namespace Core +{ +namespace Concurrency +{ bool NodeScheduler::scheduleNode(NodeHandle node) { if (_config.enableDebugLogging) { @@ -35,7 +40,7 @@ bool NodeScheduler::scheduleNode(NodeHandle node) { } return false; } - + // Check capacity if (!hasCapacity()) { // Try to defer instead @@ -48,20 +53,20 @@ bool NodeScheduler::scheduleNode(NodeHandle node) { } return deferred; } - + // Create work wrapper auto work = createWorkWrapper(node); - + // Create contract with the node's execution type auto handle = _contractGroup->createContract(std::move(work), nodeData->executionType); if (!handle.valid()) { // Contract group refused - try to defer return deferNode(node); } - + // Store handle in node nodeData->handle = handle; - + // Schedule the contract auto result = handle.schedule(); if (result != ScheduleResult::Scheduled) { @@ -69,68 +74,69 @@ bool NodeScheduler::scheduleNode(NodeHandle node) { nodeData->handle = WorkContractHandle(); // Clear invalid handle return deferNode(node); } - + // Update statistics updateStats(true, false, false); - + // Publish event publishScheduledEvent(node); - + // Notify callback if (_callbacks.onNodeScheduled) { _callbacks.onNodeScheduled(node); } - + return true; } bool NodeScheduler::deferNode(NodeHandle node) { std::lock_guard lock(_deferredMutex); // Exclusive lock for modifying queue - + if (_config.enableDebugLogging) { - ENTROPY_LOG_DEBUG_CAT("NodeScheduler", "Deferring node, queue size: " + std::to_string(_deferredQueue.size()) + ", max: " + std::to_string(_config.maxDeferredNodes)); + ENTROPY_LOG_DEBUG_CAT("NodeScheduler", "Deferring node, queue size: " + std::to_string(_deferredQueue.size()) + + ", max: " + std::to_string(_config.maxDeferredNodes)); } - + // Check queue capacity (0 = unlimited) if (_config.maxDeferredNodes > 0 && _deferredQueue.size() >= _config.maxDeferredNodes) { // Queue full - drop the node - auto msg = std::format("NodeScheduler dropping node - deferred queue full (max: {})", - _config.maxDeferredNodes); + auto msg = std::format("NodeScheduler dropping node - deferred queue full (max: {})", _config.maxDeferredNodes); ENTROPY_LOG_ERROR_CAT("NodeScheduler", msg); updateStats(false, false, true); - + // Notify callback about dropped node if (_callbacks.onNodeDropped) { _callbacks.onNodeDropped(node); } - + return false; } - + // Add to deferred queue _deferredQueue.push_back(node); - + // Update statistics updateStats(false, true, false); - + // Track peak deferred count { std::lock_guard statsLock(_statsMutex); _stats.peakDeferred = std::max(_stats.peakDeferred, _deferredQueue.size()); } - + // Publish event publishDeferredEvent(node); - + // Notify callback if (_callbacks.onNodeDeferred) { _callbacks.onNodeDeferred(node); } - + if (_config.enableDebugLogging) { - ENTROPY_LOG_DEBUG_CAT("NodeScheduler", "Node deferred successfully, queue size now: " + std::to_string(_deferredQueue.size())); + ENTROPY_LOG_DEBUG_CAT("NodeScheduler", + "Node deferred successfully, queue size now: " + std::to_string(_deferredQueue.size())); } - + return true; } @@ -140,25 +146,25 @@ size_t NodeScheduler::processDeferredNodes(size_t maxToSchedule) { if (toProcess == 0) { toProcess = getAvailableCapacity(); } - + if (toProcess == 0) { return 0; // No capacity } - + // Extract nodes from deferred queue std::vector nodesToSchedule; { std::lock_guard lock(_deferredMutex); // Exclusive lock for modifying queue - + size_t count = std::min(toProcess, _deferredQueue.size()); nodesToSchedule.reserve(count); - + for (size_t i = 0; i < count; ++i) { nodesToSchedule.push_back(_deferredQueue.front()); _deferredQueue.pop_front(); } } - + // Schedule the nodes size_t scheduled = 0; for (const auto& node : nodesToSchedule) { @@ -169,19 +175,19 @@ size_t NodeScheduler::processDeferredNodes(size_t maxToSchedule) { break; // Stop if we hit capacity } } - + return scheduled; } size_t NodeScheduler::scheduleReadyNodes(const std::vector& nodes) { size_t scheduled = 0; - + // Try batch scheduling if enabled if (_config.enableBatchScheduling && nodes.size() > 1) { // Schedule in batches for better efficiency for (size_t i = 0; i < nodes.size(); i += _config.batchSize) { size_t batchEnd = std::min(i + _config.batchSize, nodes.size()); - + for (size_t j = i; j < batchEnd; ++j) { if (scheduleNode(nodes[j])) { scheduled++; @@ -190,7 +196,7 @@ size_t NodeScheduler::scheduleReadyNodes(const std::vector& nodes) { return scheduled; } } - + // Check if we should continue if (!hasCapacity()) { break; @@ -207,7 +213,7 @@ size_t NodeScheduler::scheduleReadyNodes(const std::vector& nodes) { } } } - + return scheduled; } @@ -226,17 +232,17 @@ std::function NodeScheduler::createWorkWrapper(NodeHandle node) { if (!nodeData) { return; } - + // Notify execution starting (check destroyed flag before each callback) if (!_destroyed.load(std::memory_order_acquire) && _callbacks.onNodeExecuting) { _callbacks.onNodeExecuting(node); } - + // Publish event if (!_destroyed.load(std::memory_order_acquire) && _eventBus) { _eventBus->publish(NodeExecutingEvent(_graph, node)); } - + // Execute the work based on variant type bool failed = false; bool yielded = false; @@ -300,7 +306,7 @@ std::function NodeScheduler::createWorkWrapper(NodeHandle node) { _callbacks.onNodeCompleted(node); } } - + // Note: We cannot process deferred nodes here because the contract // hasn't been freed yet. The WorkService will call completeExecution() // AFTER this wrapper returns, and only then will capacity be available. @@ -309,7 +315,7 @@ std::function NodeScheduler::createWorkWrapper(NodeHandle node) { void NodeScheduler::updateStats(bool scheduled, bool deferred, bool dropped) { std::lock_guard lock(_statsMutex); - + if (scheduled) { _stats.nodesScheduled++; } @@ -343,7 +349,7 @@ bool NodeScheduler::deferNodeUntil(NodeHandle node, std::chrono::steady_clock::t auto now = std::chrono::steady_clock::now(); auto delay = std::chrono::duration_cast(wakeTime - now); ENTROPY_LOG_DEBUG_CAT("NodeScheduler", - "Deferring node until wake time (delay: " + std::to_string(delay.count()) + "ms)"); + "Deferring node until wake time (delay: " + std::to_string(delay.count()) + "ms)"); } // Add to timed deferred queue (priority queue sorted by wake time) @@ -361,8 +367,7 @@ size_t NodeScheduler::processTimedDeferredNodes(size_t maxToSchedule) { std::lock_guard lock(_timedDeferredMutex); // Pop all nodes that are ready (wake time <= now) - while (!_timedDeferredQueue.empty() && - _timedDeferredQueue.top().wakeTime <= now) { + while (!_timedDeferredQueue.empty() && _timedDeferredQueue.top().wakeTime <= now) { readyNodes.push_back(_timedDeferredQueue.top()); _timedDeferredQueue.pop(); @@ -375,7 +380,7 @@ size_t NodeScheduler::processTimedDeferredNodes(size_t maxToSchedule) { if (_config.enableDebugLogging && !readyNodes.empty()) { ENTROPY_LOG_DEBUG_CAT("NodeScheduler", - "Processing " + std::to_string(readyNodes.size()) + " timed deferred nodes"); + "Processing " + std::to_string(readyNodes.size()) + " timed deferred nodes"); } // Schedule the ready nodes @@ -398,6 +403,6 @@ size_t NodeScheduler::processTimedDeferredNodes(size_t maxToSchedule) { return scheduled; } -} // namespace Concurrency -} // namespace Core -} // namespace EntropyEngine \ No newline at end of file +} // namespace Concurrency +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Concurrency/NodeScheduler.h b/src/Concurrency/NodeScheduler.h index 4503a1e..bf03219 100644 --- a/src/Concurrency/NodeScheduler.h +++ b/src/Concurrency/NodeScheduler.h @@ -10,7 +10,7 @@ /** * @file NodeScheduler.h * @brief Graph node scheduling with overflow management for WorkGraph execution - * + * * This file contains the NodeScheduler, which manages the scheduling of graph nodes * into WorkContractGroups. It handles overflow scenarios when the work queue is full, * maintains execution order, and provides lifecycle callbacks for monitoring. @@ -18,60 +18,64 @@ #pragma once -#include "WorkGraphTypes.h" -#include "WorkContractGroup.h" -#include "../Core/EventBus.h" #include -#include +#include #include +#include #include -#include -namespace EntropyEngine { -namespace Core { -namespace Concurrency { +#include "../Core/EventBus.h" +#include "WorkContractGroup.h" +#include "WorkGraphTypes.h" + +namespace EntropyEngine +{ +namespace Core +{ +namespace Concurrency +{ /** * @brief Manages graph node scheduling and overflow handling for work execution - * + * * NodeScheduler serves as the interface between WorkGraph's high-level dependency * management and WorkContractGroup's low-level execution system. When WorkGraph * determines nodes are ready for execution, this scheduler manages their transition * into the work queue. The scheduler handles capacity constraints by maintaining a * deferred queue for nodes that cannot be immediately scheduled due to queue limitations. - * + * * This component bridges the abstraction gap between graph-based task management and * queue-based execution. It addresses the practical constraint that ready nodes may * exceed available execution slots, providing buffering and advanced scheduling * infrastructure. - * + * * Key responsibilities: * - Immediate scheduling when there's capacity * - Deferred queue management when work queue is full * - Batch scheduling (schedule multiple nodes in one go) * - Lifecycle callbacks for monitoring and debugging * - Statistics tracking for analysis - * + * * Complexity characteristics: * - Deferred queue operations: O(1) push_back, O(1) pop_front - * + * * Suitable applications: * - Task graphs that might generate bursts of ready nodes * - Systems where work generation can outpace execution * - Monitoring node execution lifecycle for debugging * - Building priority scheduling on top (future enhancement) - * + * * Design trade-offs: * - Uses mutex for thread safety (not lock-free like WorkContractGroup) * - Deque for deferred queue (good cache locality, no allocations until overflow) * - Separate stats tracking to avoid polluting hot path - * + * * @code * // Basic usage with a WorkGraph * WorkContractGroup contractGroup(1024); * WorkGraph graph; * NodeScheduler scheduler(&contractGroup, &graph); - * + * * // Set up lifecycle monitoring * NodeScheduler::Callbacks callbacks; * callbacks.onNodeScheduled = [](NodeHandle node) { @@ -81,102 +85,98 @@ namespace Concurrency { * LOG_WARN("Node {} deferred (queue full)", node.index()); * }; * scheduler.setCallbacks(callbacks); - * + * * // Schedule nodes as they become ready * if (!scheduler.scheduleNode(readyNode)) { * // Node was deferred, will be scheduled when capacity available * } - * + * * // Process deferred nodes when work completes * size_t scheduled = scheduler.processDeferredNodes(); * @endcode */ -class NodeScheduler { +class NodeScheduler +{ public: /** * @brief Configuration parameters for tuning scheduler behavior - * + * * These settings provide control over memory usage, scheduling overhead, and * responsiveness. The defaults work well for most cases, but you might * want to tune them based on your workload. */ - struct Config { - size_t maxDeferredNodes; ///< Maximum nodes to queue when full (prevents unbounded growth) - bool enableBatchScheduling; ///< Schedule multiple nodes in one operation (reduces overhead) - size_t batchSize; ///< How many nodes to schedule per batch (tune for your workload) - bool enableDebugLogging; ///< Enable verbose debug logging for troubleshooting - + struct Config + { + size_t maxDeferredNodes; ///< Maximum nodes to queue when full (prevents unbounded growth) + bool enableBatchScheduling; ///< Schedule multiple nodes in one operation (reduces overhead) + size_t batchSize; ///< How many nodes to schedule per batch (tune for your workload) + bool enableDebugLogging; ///< Enable verbose debug logging for troubleshooting + Config() : maxDeferredNodes(100), enableBatchScheduling(true), batchSize(10), enableDebugLogging(false) {} }; - + /** * @brief Lifecycle hooks for monitoring node execution flow - * + * * These callbacks let you track what's happening to your nodes as they flow * through the scheduling system. Perfect for debugging, profiling, or building * visualization tools. All callbacks are optional - only set the ones you need. */ - struct Callbacks { - std::function onNodeScheduled; ///< Node successfully entered work queue - std::function onNodeDeferred; ///< Node queued due to lack of capacity - std::function onNodeExecuting; ///< Node started executing (worker picked it up) - std::function onNodeCompleted; ///< Node finished successfully + struct Callbacks + { + std::function onNodeScheduled; ///< Node successfully entered work queue + std::function onNodeDeferred; ///< Node queued due to lack of capacity + std::function onNodeExecuting; ///< Node started executing (worker picked it up) + std::function onNodeCompleted; ///< Node finished successfully std::function onNodeFailed; ///< Node threw an exception - std::function onNodeDropped; ///< Node dropped (deferred queue overflow) - std::function onNodeYielded; ///< Node yielded execution (will reschedule immediately) - std::function onNodeYieldedUntil; ///< Node yielded until specific time + std::function onNodeDropped; ///< Node dropped (deferred queue overflow) + std::function onNodeYielded; ///< Node yielded execution (will reschedule immediately) + std::function + onNodeYieldedUntil; ///< Node yielded until specific time }; - + /** * @brief Creates a scheduler bridging graph nodes to work execution - * + * * Sets up scheduling for a WorkGraph. Doesn't own graph or contract group. - * + * * @param contractGroup Where to schedule work (must outlive scheduler) - * @param graph The graph whose nodes we're scheduling (must outlive scheduler) + * @param graph The graph whose nodes we're scheduling (must outlive scheduler) * @param eventBus Optional event system (can be nullptr) * @param config Tuning parameters - * + * * @code * // Typical setup in a WorkGraph * auto contractGroup = std::make_unique(1024); * auto scheduler = std::make_unique( - * contractGroup.get(), + * contractGroup.get(), * this, // WorkGraph passes itself * _eventBus, * NodeScheduler::Config{.maxDeferredNodes = 200} * ); * @endcode */ - NodeScheduler(WorkContractGroup* contractGroup, - const WorkGraph* graph, - std::shared_mutex* graphMutex, - Core::EventBus* eventBus = nullptr, - const Config& config = {}) - : _contractGroup(contractGroup) - , _graph(graph) - , _graphMutex(graphMutex) - , _eventBus(eventBus) - , _config(config) { - } - + NodeScheduler(WorkContractGroup* contractGroup, const WorkGraph* graph, std::shared_mutex* graphMutex, + Core::EventBus* eventBus = nullptr, const Config& config = {}) + : _contractGroup(contractGroup), _graph(graph), _graphMutex(graphMutex), _eventBus(eventBus), _config(config) {} + ~NodeScheduler() { // Signal that this scheduler is being destroyed _destroyed.store(true, std::memory_order_release); } - + /** * @brief Installs lifecycle monitoring callbacks - * + * * Set before scheduling for lifecycle tracking. Not thread-safe with active * scheduling - set during init. - * + * * @param callbacks Structure with optional callback functions - * + * * @code * NodeScheduler::Callbacks callbacks; - * callbacks.onNodeDeferred = [&deferredCount](NodeHandle) { - * deferredCount++; + * callbacks.onNodeDeferred = [&deferredCount](NodeHandle) { + * deferredCount++; * }; * callbacks.onNodeDropped = [](NodeHandle node) { * LOG_ERROR("Critical: Node {} dropped!", node.index()); @@ -187,16 +187,16 @@ class NodeScheduler { void setCallbacks(const Callbacks& callbacks) { _callbacks = callbacks; } - + /** * @brief Attempts to schedule a node, deferring if necessary - * + * * Main entry point for execution. Tries immediate scheduling, defers if full, * drops if deferred queue full. Thread-safe. - * + * * @param node Handle to the node that's ready to execute * @return true if scheduled immediately, false if deferred or dropped - * + * * @code * // In your graph's "node became ready" logic * if (scheduler.scheduleNode(readyNode)) { @@ -211,16 +211,16 @@ class NodeScheduler { * @endcode */ bool scheduleNode(NodeHandle node); - + /** * @brief Explicitly defers a node without trying to schedule first - * + * * Bypasses capacity check, goes straight to deferred queue. Useful for * batch operations when you know there's no capacity. - * + * * @param node The node to add to deferred queue * @return true if queued successfully, false if deferred queue is full - * + * * @code * // When you know the work queue is full * if (!scheduler.hasCapacity()) { @@ -235,7 +235,7 @@ class NodeScheduler { * @endcode */ bool deferNode(NodeHandle node); - + /** * @brief Drains the deferred queue into available execution slots * @@ -303,33 +303,33 @@ class NodeScheduler { * @endcode */ bool deferNodeUntil(NodeHandle node, std::chrono::steady_clock::time_point wakeTime); - + /** * @brief Quick check if we can accept more work right now - * + * * Snapshot that may be stale. Use for optimization hints, not critical logic. - * + * * @return true if work queue has free slots, false if full - * + * * @code * // Use for early-out optimization * if (!scheduler.hasCapacity()) { * // Don't bother trying to schedule a bunch of nodes - * return; + * return; * } * @endcode */ bool hasCapacity() const { return _contractGroup->activeCount() < _contractGroup->capacity(); } - + /** * @brief Gets exact number of free execution slots - * + * * More precise than hasCapacity(). Snapshot that might be stale. - * + * * @return Number of nodes that could be scheduled immediately - * + * * @code * // Batch scheduling optimization * size_t capacity = scheduler.getAvailableCapacity(); @@ -344,14 +344,14 @@ class NodeScheduler { size_t capacity = _contractGroup->capacity(); return (active < capacity) ? (capacity - active) : 0; } - + /** * @brief Checks how many nodes are waiting in the deferred queue - * + * * Monitor system pressure. Thread-safe but takes lock - avoid tight loops. - * + * * @return Current number of nodes waiting for execution capacity - * + * * @code * // Monitor for queue buildup * size_t deferred = scheduler.getDeferredCount(); @@ -364,14 +364,14 @@ class NodeScheduler { std::shared_lock lock(_deferredMutex); return _deferredQueue.size(); } - + /** * @brief Nuclear option: drops all deferred nodes - * + * * For aborting pending work. Nodes are lost - no execution or callbacks. - * + * * @return How many nodes were dropped from the queue - * + * * @code * // Emergency abort * size_t dropped = scheduler.clearDeferredNodes(); @@ -386,58 +386,59 @@ class NodeScheduler { _deferredQueue.clear(); return count; } - + /** * @brief Batch scheduling for multiple ready nodes - * + * * Handles multiple nodes efficiently. Schedules what fits, defers rest. * Preserves order. - * + * * @param nodes Vector of nodes ready for execution * @return Number scheduled immediately (rest deferred/dropped) - * + * * @code * // After a node completes, schedule all its dependents * std::vector readyDependents = getDependents(completed); * size_t scheduled = scheduler.scheduleReadyNodes(readyDependents); - * LOG_DEBUG("Scheduled {}/{} dependent nodes", + * LOG_DEBUG("Scheduled {}/{} dependent nodes", * scheduled, readyDependents.size()); * @endcode */ size_t scheduleReadyNodes(const std::vector& nodes); - + /** * @brief Health metrics for the scheduler - * + * * These stats help you understand scheduling behavior and identify bottlenecks. * If nodesDropped > 0, you're losing work and need to increase maxDeferredNodes. * If peakDeferred is high, you might need more workers or to adjust node execution. */ - struct Stats { - size_t nodesScheduled = 0; ///< Total nodes that went straight to execution - size_t nodesDeferred = 0; ///< Total nodes that had to wait in deferred queue - size_t nodesDropped = 0; ///< Critical: nodes lost due to queue overflow! - size_t currentDeferred = 0; ///< Current size of deferred queue - size_t peakDeferred = 0; ///< Highest deferred queue size seen + struct Stats + { + size_t nodesScheduled = 0; ///< Total nodes that went straight to execution + size_t nodesDeferred = 0; ///< Total nodes that had to wait in deferred queue + size_t nodesDropped = 0; ///< Critical: nodes lost due to queue overflow! + size_t currentDeferred = 0; ///< Current size of deferred queue + size_t peakDeferred = 0; ///< Highest deferred queue size seen }; - + Stats getStats() const { std::lock_guard lock(_statsMutex); Stats stats = _stats; stats.currentDeferred = getDeferredCount(); return stats; } - + /** * @brief Clears all statistics counters back to zero - * + * * For benchmarking or resetting after warmup. Only counters, not queue. - * + * * @code * scheduler.resetStats(); * runBenchmark(); * auto stats = scheduler.getStats(); - * LOG_INFO("Benchmark: {} scheduled, {} deferred", + * LOG_INFO("Benchmark: {} scheduled, {} deferred", * stats.nodesScheduled, stats.nodesDeferred); * @endcode */ @@ -445,18 +446,18 @@ class NodeScheduler { std::lock_guard lock(_statsMutex); _stats = Stats{}; } - + /** * @brief Estimates memory consumption of the scheduler - * + * * Includes object + deferred queue. Conservative estimate. - * + * * @return Approximate bytes used by this scheduler instance - * + * * @code * // Check memory pressure * if (scheduler.getMemoryUsage() > 1024 * 1024) { // 1MB - * LOG_WARN("Scheduler using {}KB of memory", + * LOG_WARN("Scheduler using {}KB of memory", * scheduler.getMemoryUsage() / 1024); * } * @endcode @@ -465,24 +466,25 @@ class NodeScheduler { std::shared_lock lock(_deferredMutex); // Shared lock for reading return sizeof(*this) + _deferredQueue.size() * sizeof(NodeHandle); } - + private: - WorkContractGroup* _contractGroup; ///< Where we schedule work (not owned) - const WorkGraph* _graph; ///< Graph we're scheduling for (not owned) - std::shared_mutex* _graphMutex; ///< Mutex protecting graph structure (not owned) - Core::EventBus* _eventBus; ///< Optional event system for notifications - Config _config; ///< Scheduler configuration - Callbacks _callbacks; ///< Lifecycle event callbacks - + WorkContractGroup* _contractGroup; ///< Where we schedule work (not owned) + const WorkGraph* _graph; ///< Graph we're scheduling for (not owned) + std::shared_mutex* _graphMutex; ///< Mutex protecting graph structure (not owned) + Core::EventBus* _eventBus; ///< Optional event system for notifications + Config _config; ///< Scheduler configuration + Callbacks _callbacks; ///< Lifecycle event callbacks + // Safety flag to prevent use after destruction mutable std::atomic _destroyed{false}; ///< Set to true in destructor for safety checks - + // Deferred queue for nodes waiting for capacity - mutable std::shared_mutex _deferredMutex; ///< Reader-writer lock for deferred queue (mutable for const methods) - std::deque _deferredQueue; ///< FIFO queue of nodes waiting for capacity + mutable std::shared_mutex _deferredMutex; ///< Reader-writer lock for deferred queue (mutable for const methods) + std::deque _deferredQueue; ///< FIFO queue of nodes waiting for capacity // Timed deferred queue for nodes waiting until specific time (e.g., timers) - struct TimedNode { + struct TimedNode + { NodeHandle node; std::chrono::steady_clock::time_point wakeTime; @@ -491,53 +493,53 @@ class NodeScheduler { return wakeTime > other.wakeTime; } }; - mutable std::shared_mutex _timedDeferredMutex; ///< Reader-writer lock for timed deferred queue + mutable std::shared_mutex _timedDeferredMutex; ///< Reader-writer lock for timed deferred queue std::priority_queue _timedDeferredQueue; ///< Min-heap sorted by wake time (earliest first) // Statistics - mutable std::mutex _statsMutex; ///< Protects statistics (separate to reduce contention) - Stats _stats; ///< Accumulated statistics - + mutable std::mutex _statsMutex; ///< Protects statistics (separate to reduce contention) + Stats _stats; ///< Accumulated statistics + /** * @brief Creates the lambda that will be executed by workers - * + * * Wraps node work with lifecycle callbacks and error handling. Handles * onNodeExecuting, work execution, onNodeCompleted/Failed. * Private - internal bridge between graph and execution. - * + * * @param node The node whose work we're wrapping * @return Lambda function suitable for WorkContractGroup execution */ std::function createWorkWrapper(NodeHandle node); - + /** * @brief Thread-safe statistics update helper - * + * * Updates counters and tracks peak deferred size. Private - auto-updated. - * + * * @param scheduled True if node was scheduled immediately * @param deferred True if node was added to deferred queue * @param dropped True if node was dropped (queue overflow) */ void updateStats(bool scheduled, bool deferred, bool dropped); - + /** * @brief Publishes a "node scheduled" event to the event bus - * + * * Only if event bus configured. Private - internal side effect. - * + * * @param node The node that was scheduled */ void publishScheduledEvent(NodeHandle node); - + /** * @brief Publishes a "node deferred" event to the event bus - * + * * @param node The node that was deferred */ void publishDeferredEvent(NodeHandle node); }; -} // namespace Concurrency -} // namespace Core -} // namespace EntropyEngine \ No newline at end of file +} // namespace Concurrency +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Concurrency/NodeStateManager.cpp b/src/Concurrency/NodeStateManager.cpp index dc530cd..908022a 100644 --- a/src/Concurrency/NodeStateManager.cpp +++ b/src/Concurrency/NodeStateManager.cpp @@ -8,14 +8,19 @@ */ #include "NodeStateManager.h" -#include "WorkGraphEvents.h" -#include "WorkGraph.h" -#include "../Logging/Logger.h" + #include -namespace EntropyEngine { -namespace Core { -namespace Concurrency { +#include "../Logging/Logger.h" +#include "WorkGraph.h" +#include "WorkGraphEvents.h" + +namespace EntropyEngine +{ +namespace Core +{ +namespace Concurrency +{ bool NodeStateManager::transitionState(NodeHandle node, NodeState from, NodeState to) { // Validate transition @@ -24,34 +29,34 @@ bool NodeStateManager::transitionState(NodeHandle node, NodeState from, NodeStat auto* dag0 = node.handleOwnerAs>(); auto* nodeData = dag0 ? dag0->getNodeData(node) : nullptr; if (nodeData) { - auto msg = std::format("Invalid state transition attempted: {} -> {} for node: {}", - getStateName(from), getStateName(to), nodeData->name); + auto msg = std::format("Invalid state transition attempted: {} -> {} for node: {}", getStateName(from), + getStateName(to), nodeData->name); ENTROPY_LOG_WARNING_CAT("NodeStateManager", msg); } return false; } - + // Get node data for atomic state update auto* dag = node.handleOwnerAs>(); auto* nodeData = dag ? dag->getNodeData(node) : nullptr; if (!nodeData) { return false; } - + // Attempt atomic state transition NodeState expected = from; if (!nodeData->state.compare_exchange_strong(expected, to, std::memory_order_acq_rel)) { return false; // Current state didn't match 'from' } - + // State is tracked directly in the node's atomic field - + // Update statistics updateStats(from, to); - + // Publish event if event bus is configured publishStateChange(node, from, to); - + return true; } @@ -61,14 +66,14 @@ void NodeStateManager::forceState(NodeHandle node, NodeState to) { if (!nodeData) { return; } - + NodeState from = nodeData->state.exchange(to, std::memory_order_acq_rel); - + // State is tracked directly in the node's atomic field - + // Update statistics updateStats(from, to); - + // Publish event publishStateChange(node, from, to); } @@ -79,7 +84,7 @@ NodeState NodeStateManager::getState(NodeHandle node) const { if (!nodeData) { return NodeState::Pending; } - + return nodeData->state.load(std::memory_order_acquire); } @@ -89,43 +94,56 @@ void NodeStateManager::registerNode(NodeHandle node, NodeState initialState) { if (!nodeData) { return; } - + // Set initial state nodeData->state.store(initialState, std::memory_order_release); - + // State is tracked directly in the node's atomic field - + // Update stats atomically _stats.totalNodes.fetch_add(1, std::memory_order_relaxed); - + switch (initialState) { - case NodeState::Pending: _stats.pendingNodes.fetch_add(1, std::memory_order_relaxed); break; - case NodeState::Ready: _stats.readyNodes.fetch_add(1, std::memory_order_relaxed); break; - case NodeState::Scheduled: _stats.scheduledNodes.fetch_add(1, std::memory_order_relaxed); break; - case NodeState::Executing: _stats.executingNodes.fetch_add(1, std::memory_order_relaxed); break; - case NodeState::Completed: _stats.completedNodes.fetch_add(1, std::memory_order_relaxed); break; - case NodeState::Failed: _stats.failedNodes.fetch_add(1, std::memory_order_relaxed); break; - case NodeState::Cancelled: _stats.cancelledNodes.fetch_add(1, std::memory_order_relaxed); break; + case NodeState::Pending: + _stats.pendingNodes.fetch_add(1, std::memory_order_relaxed); + break; + case NodeState::Ready: + _stats.readyNodes.fetch_add(1, std::memory_order_relaxed); + break; + case NodeState::Scheduled: + _stats.scheduledNodes.fetch_add(1, std::memory_order_relaxed); + break; + case NodeState::Executing: + _stats.executingNodes.fetch_add(1, std::memory_order_relaxed); + break; + case NodeState::Completed: + _stats.completedNodes.fetch_add(1, std::memory_order_relaxed); + break; + case NodeState::Failed: + _stats.failedNodes.fetch_add(1, std::memory_order_relaxed); + break; + case NodeState::Cancelled: + _stats.cancelledNodes.fetch_add(1, std::memory_order_relaxed); + break; } } size_t NodeStateManager::batchTransition(const std::vector>& updates) { size_t successCount = 0; - + for (const auto& [node, from, to] : updates) { if (transitionState(node, from, to)) { successCount++; } } - + return successCount; } -void NodeStateManager::getNodesInState(NodeState state, - const std::vector& allNodes, - std::vector& output) const { +void NodeStateManager::getNodesInState(NodeState state, const std::vector& allNodes, + std::vector& output) const { output.clear(); - + // Check each node's atomic state directly for (const auto& node : allNodes) { auto* dagN = node.handleOwnerAs>(); @@ -139,45 +157,59 @@ void NodeStateManager::getNodesInState(NodeState state, void NodeStateManager::updateStats(NodeState oldState, NodeState newState) { // Atomically decrement old state counter switch (oldState) { - case NodeState::Pending: - if (_stats.pendingNodes.load(std::memory_order_relaxed) > 0) - _stats.pendingNodes.fetch_sub(1, std::memory_order_relaxed); + case NodeState::Pending: + if (_stats.pendingNodes.load(std::memory_order_relaxed) > 0) + _stats.pendingNodes.fetch_sub(1, std::memory_order_relaxed); break; - case NodeState::Ready: - if (_stats.readyNodes.load(std::memory_order_relaxed) > 0) - _stats.readyNodes.fetch_sub(1, std::memory_order_relaxed); + case NodeState::Ready: + if (_stats.readyNodes.load(std::memory_order_relaxed) > 0) + _stats.readyNodes.fetch_sub(1, std::memory_order_relaxed); break; - case NodeState::Scheduled: - if (_stats.scheduledNodes.load(std::memory_order_relaxed) > 0) - _stats.scheduledNodes.fetch_sub(1, std::memory_order_relaxed); + case NodeState::Scheduled: + if (_stats.scheduledNodes.load(std::memory_order_relaxed) > 0) + _stats.scheduledNodes.fetch_sub(1, std::memory_order_relaxed); break; - case NodeState::Executing: - if (_stats.executingNodes.load(std::memory_order_relaxed) > 0) - _stats.executingNodes.fetch_sub(1, std::memory_order_relaxed); + case NodeState::Executing: + if (_stats.executingNodes.load(std::memory_order_relaxed) > 0) + _stats.executingNodes.fetch_sub(1, std::memory_order_relaxed); break; - case NodeState::Completed: - if (_stats.completedNodes.load(std::memory_order_relaxed) > 0) - _stats.completedNodes.fetch_sub(1, std::memory_order_relaxed); + case NodeState::Completed: + if (_stats.completedNodes.load(std::memory_order_relaxed) > 0) + _stats.completedNodes.fetch_sub(1, std::memory_order_relaxed); break; - case NodeState::Failed: - if (_stats.failedNodes.load(std::memory_order_relaxed) > 0) - _stats.failedNodes.fetch_sub(1, std::memory_order_relaxed); + case NodeState::Failed: + if (_stats.failedNodes.load(std::memory_order_relaxed) > 0) + _stats.failedNodes.fetch_sub(1, std::memory_order_relaxed); break; - case NodeState::Cancelled: - if (_stats.cancelledNodes.load(std::memory_order_relaxed) > 0) - _stats.cancelledNodes.fetch_sub(1, std::memory_order_relaxed); + case NodeState::Cancelled: + if (_stats.cancelledNodes.load(std::memory_order_relaxed) > 0) + _stats.cancelledNodes.fetch_sub(1, std::memory_order_relaxed); break; } - + // Atomically increment new state counter switch (newState) { - case NodeState::Pending: _stats.pendingNodes.fetch_add(1, std::memory_order_relaxed); break; - case NodeState::Ready: _stats.readyNodes.fetch_add(1, std::memory_order_relaxed); break; - case NodeState::Scheduled: _stats.scheduledNodes.fetch_add(1, std::memory_order_relaxed); break; - case NodeState::Executing: _stats.executingNodes.fetch_add(1, std::memory_order_relaxed); break; - case NodeState::Completed: _stats.completedNodes.fetch_add(1, std::memory_order_relaxed); break; - case NodeState::Failed: _stats.failedNodes.fetch_add(1, std::memory_order_relaxed); break; - case NodeState::Cancelled: _stats.cancelledNodes.fetch_add(1, std::memory_order_relaxed); break; + case NodeState::Pending: + _stats.pendingNodes.fetch_add(1, std::memory_order_relaxed); + break; + case NodeState::Ready: + _stats.readyNodes.fetch_add(1, std::memory_order_relaxed); + break; + case NodeState::Scheduled: + _stats.scheduledNodes.fetch_add(1, std::memory_order_relaxed); + break; + case NodeState::Executing: + _stats.executingNodes.fetch_add(1, std::memory_order_relaxed); + break; + case NodeState::Completed: + _stats.completedNodes.fetch_add(1, std::memory_order_relaxed); + break; + case NodeState::Failed: + _stats.failedNodes.fetch_add(1, std::memory_order_relaxed); + break; + case NodeState::Cancelled: + _stats.cancelledNodes.fetch_add(1, std::memory_order_relaxed); + break; } } @@ -185,42 +217,42 @@ void NodeStateManager::publishStateChange(NodeHandle node, NodeState from, NodeS if (!_eventBus) { return; // No event bus configured } - + // Publish generic state change event _eventBus->publish(NodeStateChangedEvent(_graph, node, from, to)); - + // Publish specific events for important transitions switch (to) { case NodeState::Ready: _eventBus->publish(NodeReadyEvent(_graph, node)); break; - + case NodeState::Scheduled: // Note: NodeScheduler also publishes this event, so we might get duplicates _eventBus->publish(NodeScheduledEvent(_graph, node)); break; - + case NodeState::Executing: _eventBus->publish(NodeExecutingEvent(_graph, node)); break; - + case NodeState::Completed: _eventBus->publish(NodeCompletedEvent(_graph, node)); break; - + case NodeState::Failed: _eventBus->publish(NodeFailedEvent(_graph, node)); break; - + case NodeState::Cancelled: _eventBus->publish(NodeCancelledEvent(_graph, node)); break; - + default: break; } } -} // namespace Concurrency -} // namespace Core -} // namespace EntropyEngine \ No newline at end of file +} // namespace Concurrency +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Concurrency/NodeStateManager.h b/src/Concurrency/NodeStateManager.h index bcaacf9..ff3ae61 100644 --- a/src/Concurrency/NodeStateManager.h +++ b/src/Concurrency/NodeStateManager.h @@ -9,75 +9,78 @@ #pragma once -#include "WorkGraphTypes.h" -#include "../Core/EventBus.h" #include -namespace EntropyEngine { -namespace Core { -namespace Concurrency { +#include "../Core/EventBus.h" +#include "WorkGraphTypes.h" + +namespace EntropyEngine +{ +namespace Core +{ +namespace Concurrency +{ /** * @brief Centralized state management for WorkGraph nodes - * + * * This component is responsible for: * - Validating state transitions * - Publishing state change events * - Tracking state statistics * - Providing thread-safe state queries - * + * * Design goals: * - Minimal memory footprint (suitable for thousands of instances) * - Lock-free reads where possible * - Optional event publishing (only if EventBus provided) */ -class NodeStateManager { +class NodeStateManager +{ public: /** * @brief Construct state manager - * + * * @param graph The owning WorkGraph (for event context) * @param eventBus Optional event bus for publishing state changes */ explicit NodeStateManager(const WorkGraph* graph, Core::EventBus* eventBus = nullptr) - : _graph(graph) - , _eventBus(eventBus) { - } - + : _graph(graph), _eventBus(eventBus) {} + /** * @brief Attempt to transition a node to a new state - * + * * This is the primary method for all state changes. It validates the transition, * updates the state atomically, and publishes events if configured. - * + * * @param node The node to transition * @param from Expected current state (for CAS operation) * @param to Target state * @return true if transition succeeded, false if current state didn't match 'from' */ bool transitionState(NodeHandle node, NodeState from, NodeState to); - + /** * @brief Force a state transition without validation - * + * * Use sparingly - only for error recovery or initialization - * + * * @param node The node to transition * @param to Target state */ void forceState(NodeHandle node, NodeState to); - + /** * @brief Get current state of a node - * + * * @param node The node to query * @return Current state, or Pending if node is invalid */ NodeState getState(NodeHandle node) const; - + /** * @brief Check if a state transition is valid - * + * * @param from Source state * @param to Target state * @return true if transition is allowed @@ -85,7 +88,7 @@ class NodeStateManager { static bool canTransition(NodeState from, NodeState to) { return isValidTransition(from, to); } - + /** * @brief Get human-readable name for a state * @param state The state to convert @@ -94,7 +97,7 @@ class NodeStateManager { static const char* getStateName(NodeState state) { return nodeStateToString(state); } - + /** * @brief Check if a node is in a terminal state * @param node The node to check @@ -103,7 +106,7 @@ class NodeStateManager { bool isTerminal(NodeHandle node) const { return isTerminalState(getState(node)); } - + /** * @brief Get statistics about current state distribution * @param stats Output parameter for statistics @@ -121,7 +124,7 @@ class NodeStateManager { stats.memoryUsage.store(_stats.memoryUsage.load(std::memory_order_relaxed)); stats.totalExecutionTime = _stats.totalExecutionTime; } - + /** * @brief Reset all statistics */ @@ -137,42 +140,41 @@ class NodeStateManager { _stats.memoryUsage.store(0, std::memory_order_relaxed); _stats.totalExecutionTime = {}; } - + /** * @brief Register a node with initial state * @param node The node to register * @param initialState Initial state (default: Pending) */ void registerNode(NodeHandle node, NodeState initialState = NodeState::Pending); - + /** * @brief Batch update for multiple nodes * @param updates Vector of (node, from, to) tuples * @return Number of successful transitions */ size_t batchTransition(const std::vector>& updates); - + /** * @brief Get all nodes in a specific state * @param state The state to query * @param allNodes List of all nodes to check * @param output Vector to fill with matching nodes */ - void getNodesInState(NodeState state, - const std::vector& allNodes, - std::vector& output) const; - + void getNodesInState(NodeState state, const std::vector& allNodes, + std::vector& output) const; + /** * @brief Check if any nodes are in non-terminal states * @return true if there are pending, ready, scheduled, or executing nodes */ bool hasActiveNodes() const { - return _stats.pendingNodes.load(std::memory_order_relaxed) > 0 || - _stats.readyNodes.load(std::memory_order_relaxed) > 0 || - _stats.scheduledNodes.load(std::memory_order_relaxed) > 0 || + return _stats.pendingNodes.load(std::memory_order_relaxed) > 0 || + _stats.readyNodes.load(std::memory_order_relaxed) > 0 || + _stats.scheduledNodes.load(std::memory_order_relaxed) > 0 || _stats.executingNodes.load(std::memory_order_relaxed) > 0; } - + /** * @brief Get memory usage estimate * @return Approximate memory usage in bytes @@ -180,25 +182,25 @@ class NodeStateManager { size_t getMemoryUsage() const { return sizeof(*this); } - + private: const WorkGraph* _graph; Core::EventBus* _eventBus; - + // Statistics tracking - atomic for lock-free updates WorkGraphStats _stats; - + /** * @brief Update statistics when state changes */ void updateStats(NodeState oldState, NodeState newState); - + /** * @brief Publish state change event if event bus is configured */ void publishStateChange(NodeHandle node, NodeState from, NodeState to); }; -} // namespace Concurrency -} // namespace Core -} // namespace EntropyEngine \ No newline at end of file +} // namespace Concurrency +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Concurrency/RandomScheduler.cpp b/src/Concurrency/RandomScheduler.cpp index 942a707..2ca6748 100644 --- a/src/Concurrency/RandomScheduler.cpp +++ b/src/Concurrency/RandomScheduler.cpp @@ -8,12 +8,17 @@ */ #include "RandomScheduler.h" -#include "WorkContractGroup.h" + #include -namespace EntropyEngine { -namespace Core { -namespace Concurrency { +#include "WorkContractGroup.h" + +namespace EntropyEngine +{ +namespace Core +{ +namespace Concurrency +{ // Thread-local state definitions thread_local std::mt19937 RandomScheduler::stRng; @@ -33,33 +38,31 @@ void RandomScheduler::ensureRngInitialized() { } } -IWorkScheduler::ScheduleResult RandomScheduler::selectNextGroup( - const std::vector& groups, - const SchedulingContext& context -) { +IWorkScheduler::ScheduleResult RandomScheduler::selectNextGroup(const std::vector& groups, + const SchedulingContext& context) { ensureRngInitialized(); - + // First, count groups with work std::vector groupsWithWork; groupsWithWork.reserve(groups.size()); - + for (auto* group : groups) { if (group && group->scheduledCount() > 0) { groupsWithWork.push_back(group); } } - + if (groupsWithWork.empty()) { return {nullptr, true}; } - + // Randomly select from groups with work std::uniform_int_distribution dist(0, groupsWithWork.size() - 1); size_t selectedIndex = dist(stRng); - + return {groupsWithWork[selectedIndex], false}; } -} // namespace Concurrency -} // namespace Core -} // namespace EntropyEngine \ No newline at end of file +} // namespace Concurrency +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Concurrency/RandomScheduler.h b/src/Concurrency/RandomScheduler.h index 9f31f6d..5d49146 100644 --- a/src/Concurrency/RandomScheduler.h +++ b/src/Concurrency/RandomScheduler.h @@ -10,66 +10,71 @@ /** * @file RandomScheduler.h * @brief Randomized work scheduler for load balancing and contention avoidance - * + * * This file contains RandomScheduler, which uses randomization to distribute work * and break up contention patterns. */ #pragma once -#include "IWorkScheduler.h" #include -namespace EntropyEngine { -namespace Core { -namespace Concurrency { +#include "IWorkScheduler.h" + +namespace EntropyEngine +{ +namespace Core +{ +namespace Concurrency +{ /** * @brief The chaos monkey of schedulers - picks groups at random. - * + * * Sometimes the best strategy is no strategy. This scheduler just rolls the dice * and picks a random group that has work. It's surprisingly effective at avoiding * certain pathological patterns that can emerge with deterministic schedulers. - * + * * This scheduler uses a Mersenne Twister random number generator for * quality randomization. - * + * * The Good: * - Natural load balancing - randomness spreads work evenly over time * - Breaks up contention patterns - threads won't fight over the same groups * - Simple implementation - no state to maintain or update * - Each thread has its own RNG - no synchronization needed - * + * * The Not-So-Good: * - Zero cache locality - threads jump randomly between groups * - RNG computation cost * - Unpredictable execution order * - Might pick the same empty groups repeatedly (bad luck) - * + * * When to use this: * - You're seeing contention with deterministic schedulers * - Work distribution is unpredictable or bursty * - You want to test if scheduling order affects your results * - Cache locality doesn't matter for your workload - * + * * When NOT to use this: * - You need predictable, reproducible execution * - Cache performance is critical * - You have groups with vastly different work amounts - * + * * Fun fact: Uses reservoir sampling to ensure uniform selection among groups * with work. Every eligible group has equal probability of being chosen. - * + * * @code * // Random scheduling can help with "thundering herd" problems * // where all threads hit the same group at once * auto scheduler = std::make_unique(config); * WorkService service(wsConfig, std::move(scheduler)); - * + * * // Now threads naturally spread out across groups * @endcode */ -class RandomScheduler : public IWorkScheduler { +class RandomScheduler : public IWorkScheduler +{ private: /// Thread-local Mersenne Twister random number generator /// Each worker thread maintains its own RNG to avoid synchronization overhead @@ -77,35 +82,35 @@ class RandomScheduler : public IWorkScheduler { /// properties for uniform work distribution. Thread-local because shared RNGs would /// require synchronization and could create correlation between threads. static thread_local std::mt19937 stRng; - + /// Thread-local initialization flag for lazy RNG setup /// Tracks whether this thread's RNG has been seeded. Each thread initializes its /// RNG on first use with a unique seed combining thread ID and timestamp. /// Thread-local because each thread needs its own initialization state. static thread_local bool stRngInitialized; - + public: /** * @brief Constructs random scheduler - * + * * Config is ignored. Each thread initializes its own RNG on first use. - * + * * @param config Scheduler configuration (unused) */ explicit RandomScheduler(const Config& config); - + ~RandomScheduler() override = default; - + /** * @brief Randomly selects a group with available work - * + * * Uses reservoir sampling for uniform selection among eligible groups. * Each group with work has equal probability of being chosen. - * + * * @param groups Available work groups * @param context Current thread context (ignored) * @return Randomly selected group with work, or nullptr if none - * + * * @code * // What happens inside (simplified): * // 1. Start with no candidate @@ -116,36 +121,35 @@ class RandomScheduler : public IWorkScheduler { * // This gives each group exactly 1/N probability! * @endcode */ - ScheduleResult selectNextGroup( - const std::vector& groups, - const SchedulingContext& context - ) override; - + ScheduleResult selectNextGroup(const std::vector& groups, + const SchedulingContext& context) override; + /** * @brief No-op - random selection doesn't learn from history */ void notifyWorkExecuted(WorkContractGroup* group, size_t threadId) override {} - + /** * @brief No-op - random scheduler has no state to reset */ void reset() override {} - + /** * @brief Returns "Random" */ - const char* getName() const override { return "Random"; } - + const char* getName() const override { + return "Random"; + } + private: /** * @brief Ensures thread-local RNG is initialized - * + * * Seeds with thread ID and time to prevent correlated sequences. */ static void ensureRngInitialized(); }; -} // namespace Concurrency -} // namespace Core -} // namespace EntropyEngine - +} // namespace Concurrency +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Concurrency/RoundRobinScheduler.cpp b/src/Concurrency/RoundRobinScheduler.cpp index 9294187..3ea4a65 100644 --- a/src/Concurrency/RoundRobinScheduler.cpp +++ b/src/Concurrency/RoundRobinScheduler.cpp @@ -8,11 +8,15 @@ */ #include "RoundRobinScheduler.h" + #include "WorkContractGroup.h" -namespace EntropyEngine { -namespace Core { -namespace Concurrency { +namespace EntropyEngine +{ +namespace Core +{ +namespace Concurrency +{ // Thread-local state definition thread_local size_t RoundRobinScheduler::stCurrentIndex = 0; @@ -21,35 +25,33 @@ RoundRobinScheduler::RoundRobinScheduler(const Config& config) { // Config mostly unused for round-robin } -IWorkScheduler::ScheduleResult RoundRobinScheduler::selectNextGroup( - const std::vector& groups, - const SchedulingContext& context -) { +IWorkScheduler::ScheduleResult RoundRobinScheduler::selectNextGroup(const std::vector& groups, + const SchedulingContext& context) { if (groups.empty()) { return {nullptr, true}; } - + // Try each group once, starting from current position size_t attempts = 0; - + while (attempts < groups.size()) { // Wrap around if needed if (stCurrentIndex >= groups.size()) { stCurrentIndex = 0; } - + WorkContractGroup* group = groups[stCurrentIndex]; - + // Move to next position for next call stCurrentIndex++; attempts++; - + // Check if this group has work if (group && group->scheduledCount() > 0) { return {group, false}; } } - + // No groups have work return {nullptr, true}; } @@ -58,6 +60,6 @@ void RoundRobinScheduler::reset() { stCurrentIndex = 0; } -} // namespace Concurrency -} // namespace Core -} // namespace EntropyEngine \ No newline at end of file +} // namespace Concurrency +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Concurrency/RoundRobinScheduler.h b/src/Concurrency/RoundRobinScheduler.h index b1c1e54..def6726 100644 --- a/src/Concurrency/RoundRobinScheduler.h +++ b/src/Concurrency/RoundRobinScheduler.h @@ -10,52 +10,56 @@ /** * @file RoundRobinScheduler.h * @brief Fair round-robin work scheduler for uniform load distribution - * + * * This file contains RoundRobinScheduler, which cycles through work groups in order * for fair scheduling with predictable behavior. */ #pragma once -#include "IWorkScheduler.h" #include -namespace EntropyEngine { -namespace Core { -namespace Concurrency { +#include "IWorkScheduler.h" + +namespace EntropyEngine +{ +namespace Core +{ +namespace Concurrency +{ /** * @brief Fair round-robin scheduler providing uniform work distribution - * + * * RoundRobinScheduler implements a simple circular scheduling strategy where each * thread cycles through work groups in sequential order. Upon reaching the last * group, the scheduler wraps back to the first, ensuring equal opportunity for * all groups. This approach prioritizes fairness and simplicity over adaptive * optimization. - * - * + * + * * Advantages: * - Simple implementation facilitates understanding and debugging * - Provides perfect fairness with equal execution opportunities * - No complex calculations or state management * - Zero contention through thread-local counter implementation - * + * * Limitations: * - No empty group detection (cycles through all groups regardless of work availability) * - Disregards cache locality (threads alternate between groups) * - Cannot adapt to workload imbalances (uniform treatment of all groups) - * + * * Recommended use cases: * - Workloads with similar work distribution across groups * - Systems requiring predictable, deterministic scheduling behavior * - Debugging scenarios where scheduler variability must be eliminated * - Applications prioritizing fairness over throughput - * + * * Not recommended when: * - Significant work imbalances exist between groups * - Work arrives in bursts to specific groups * - Cache locality is important - * + * * @code * // Perfect for evenly distributed work * for (auto& group : groups) { @@ -64,13 +68,14 @@ namespace Concurrency { * handle.schedule(); * } * } - * + * * // Use round-robin for predictable execution * auto scheduler = std::make_unique(config); * WorkService service(wsConfig, std::move(scheduler)); * @endcode */ -class RoundRobinScheduler : public IWorkScheduler { +class RoundRobinScheduler : public IWorkScheduler +{ private: /// Thread-local position in the round-robin rotation /// Each worker thread maintains its own position in the group list to ensure @@ -78,29 +83,29 @@ class RoundRobinScheduler : public IWorkScheduler { /// spread out across different starting positions, providing good load balancing. /// Thread-local because each thread needs its own independent rotation state. static thread_local size_t stCurrentIndex; - + public: /** * @brief Constructs round-robin scheduler - * + * * Config is unused - round-robin needs no tuning. - * + * * @param config Scheduler configuration (unused) */ explicit RoundRobinScheduler(const Config& config); - + ~RoundRobinScheduler() override = default; - + /** * @brief Selects next group in round-robin order - * + * * Starts where we left off, checks each group for work. Each thread * maintains its own position - no synchronization needed. - * + * * @param groups Available work groups * @param context Current thread context (ignored) * @return Next group with work, or nullptr if none - * + * * @code * // What actually happens inside: * // 1. Get this thread's current position @@ -110,30 +115,29 @@ class RoundRobinScheduler : public IWorkScheduler { * // 5. Return the winner (or nullptr) * @endcode */ - ScheduleResult selectNextGroup( - const std::vector& groups, - const SchedulingContext& context - ) override; - + ScheduleResult selectNextGroup(const std::vector& groups, + const SchedulingContext& context) override; + /** * @brief No-op - round-robin doesn't track execution history */ void notifyWorkExecuted(WorkContractGroup* group, size_t threadId) override {} - + /** * @brief Resets thread-local rotation index to 0 - * + * * Only affects calling thread. Others reset on next schedule. */ void reset() override; - + /** * @brief Returns "RoundRobin" */ - const char* getName() const override { return "RoundRobin"; } + const char* getName() const override { + return "RoundRobin"; + } }; -} // namespace Concurrency -} // namespace Core -} // namespace EntropyEngine - +} // namespace Concurrency +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Concurrency/SignalTree.h b/src/Concurrency/SignalTree.h index 65c719a..4f6f463 100644 --- a/src/Concurrency/SignalTree.h +++ b/src/Concurrency/SignalTree.h @@ -10,7 +10,7 @@ /** * @file SignalTree.h * @brief Lock-free binary tree for concurrent signal management - * + * * This file contains the SignalTree implementation, a specialized data structure * that provides lock-free signal setting, selection, and clearing operations. * It's designed for scenarios where multiple threads need to coordinate @@ -19,436 +19,444 @@ #pragma once #include +#include // For std::countr_zero +#include // For uint64_t #include -#include // For uint64_t -#include // For std::countr_zero -#include // For std::invalid_argument +#include // For std::invalid_argument + #include "../CoreCommon.h" -namespace EntropyEngine { -namespace Core { -namespace Concurrency { +namespace EntropyEngine +{ +namespace Core +{ +namespace Concurrency +{ + +/** + * @brief Abstract base class for SignalTree to enable polymorphic usage + * + * This allows WorkContractGroup to use SignalTree instances of different + * sizes without templates, selected at runtime based on capacity needs. + */ +class SignalTreeBase +{ +public: + static constexpr size_t S_INVALID_SIGNAL_INDEX = ~0ULL; + virtual ~SignalTreeBase() = default; + + virtual void set(size_t leafIndex) = 0; + virtual std::pair select(uint64_t& biasFlags) = 0; + virtual void clear(size_t leafIndex) = 0; + virtual bool isEmpty() const = 0; + virtual size_t getCapacity() const = 0; +}; + +/** + * @brief A lock-free binary tree for signal selection and management. + * + * A signal dispatcher that can handle large numbers of signals without + * locks. Suitable for work-stealing schedulers, event systems, or any scenario + * requiring atomic signal selection and processing from multiple threads. + * + * The key innovation is the tree structure - internal nodes track active signal counts + * in their subtrees, while leaf nodes pack 64 signals each into bit fields. + * This provides O(log n) signal selection with excellent cache coherence. + * + * Key features: + * - **Lock-free**: Multiple threads can set/select signals concurrently + * - **Cache-friendly**: Entire tree lives in a contiguous array + * - **Scalable**: Supports LeafCapacity * 64 total signals + * - **Fair**: The bias system prevents signal starvation + * + * @tparam LeafCapacity Number of leaf nodes (must be power of 2). Total signal capacity is LeafCapacity * 64. + * + * @code + * // Complete multi-threaded workflow + * SignalTree tree(4); // 256 signals capacity + * std::atomic running{true}; + * + * // Producer threads: submit work signals + * std::thread producer([&tree]() { + * for (int i = 0; i < 100; ++i) { + * tree.set(i % 256); // Set signal + * std::this_thread::sleep_for(1ms); + * } + * }); + * + * // Consumer threads: process signals with fairness + * std::thread consumer([&tree, &running]() { + * uint64_t bias = 0; + * while (running) { + * auto [index, found] = tree.select(bias); + * if (found) { + * processSignal(index); + * // Rotate bias for fairness + * bias = (bias << 1) | (bias >> 63); + * } else { + * std::this_thread::yield(); + * } + * } + * }); + * + * producer.join(); + * running = false; + * consumer.join(); + * @endcode + */ +class SignalTree : public SignalTreeBase +{ +private: + const size_t _leafCapacity; + const size_t _totalNodes; + std::unique_ptr[]> + _nodes; ///< Tree storage: internal nodes are counters, leaf nodes are bitmaps /** - * @brief Abstract base class for SignalTree to enable polymorphic usage - * - * This allows WorkContractGroup to use SignalTree instances of different - * sizes without templates, selected at runtime based on capacity needs. + * @brief Runtime power-of-2 validation helper + * + * This is static because it's a pure utility function with no dependencies + * on instance state. Making it static clarifies that it has no side effects + * and can be used during construction before the object is fully initialized. + * + * @param n Number to check + * @return true if n is a power of 2 and greater than 0 */ - class SignalTreeBase { - public: - static constexpr size_t S_INVALID_SIGNAL_INDEX = ~0ULL; - - virtual ~SignalTreeBase() = default; - - virtual void set(size_t leafIndex) = 0; - virtual std::pair select(uint64_t& biasFlags) = 0; - virtual void clear(size_t leafIndex) = 0; - virtual bool isEmpty() const = 0; - virtual size_t getCapacity() const = 0; - }; + static bool isPowerOf2(size_t n) { + return n > 0 && (n & (n - 1)) == 0; + } + + static constexpr uint64_t S_BIT_ONE = 1ULL; ///< Single bit constant + static constexpr size_t S_BITS_PER_LEAF_NODE = + 64; ///< Bits per uint64_t leaf - constexpr because this is a fundamental architectural constant + + static constexpr uint64_t S_BIAS_BIT_START = 1ULL; ///< Starting bit for bias traversal + static constexpr size_t S_BIAS_SHIFT_AMOUNT = 1; ///< Bit shift for bias progression +public: + static constexpr size_t S_INVALID_SIGNAL_INDEX = ~0ULL; ///< Returned when no signal is available + + enum class TreePath + { + Left = 1, + Right = 2 + }; /** - * @brief A lock-free binary tree for signal selection and management. - * - * A signal dispatcher that can handle large numbers of signals without - * locks. Suitable for work-stealing schedulers, event systems, or any scenario - * requiring atomic signal selection and processing from multiple threads. - * - * The key innovation is the tree structure - internal nodes track active signal counts - * in their subtrees, while leaf nodes pack 64 signals each into bit fields. - * This provides O(log n) signal selection with excellent cache coherence. - * - * Key features: - * - **Lock-free**: Multiple threads can set/select signals concurrently - * - **Cache-friendly**: Entire tree lives in a contiguous array - * - **Scalable**: Supports LeafCapacity * 64 total signals - * - **Fair**: The bias system prevents signal starvation - * - * @tparam LeafCapacity Number of leaf nodes (must be power of 2). Total signal capacity is LeafCapacity * 64. - * - * @code - * // Complete multi-threaded workflow - * SignalTree tree(4); // 256 signals capacity - * std::atomic running{true}; - * - * // Producer threads: submit work signals - * std::thread producer([&tree]() { - * for (int i = 0; i < 100; ++i) { - * tree.set(i % 256); // Set signal - * std::this_thread::sleep_for(1ms); - * } - * }); - * - * // Consumer threads: process signals with fairness - * std::thread consumer([&tree, &running]() { - * uint64_t bias = 0; - * while (running) { - * auto [index, found] = tree.select(bias); - * if (found) { - * processSignal(index); - * // Rotate bias for fairness - * bias = (bias << 1) | (bias >> 63); - * } else { - * std::this_thread::yield(); - * } - * } - * }); - * - * producer.join(); - * running = false; - * consumer.join(); - * @endcode + * @brief Constructs a SignalTree with specified leaf capacity + * @param leafCapacity Number of leaf nodes (must be power of 2) + * @throws std::invalid_argument if leafCapacity is not a power of 2 */ - class SignalTree : public SignalTreeBase { - private: - const size_t _leafCapacity; - const size_t _totalNodes; - std::unique_ptr[]> _nodes; ///< Tree storage: internal nodes are counters, leaf nodes are bitmaps - - /** - * @brief Runtime power-of-2 validation helper - * - * This is static because it's a pure utility function with no dependencies - * on instance state. Making it static clarifies that it has no side effects - * and can be used during construction before the object is fully initialized. - * - * @param n Number to check - * @return true if n is a power of 2 and greater than 0 - */ - static bool isPowerOf2(size_t n) { - return n > 0 && (n & (n - 1)) == 0; + explicit SignalTree(size_t leafCapacity) + : _leafCapacity(leafCapacity), + _totalNodes(2 * leafCapacity - 1), + _nodes(std::make_unique[]>(_totalNodes)) { + if (!isPowerOf2(_leafCapacity)) { + throw std::invalid_argument("LeafCapacity must be a power of 2 and greater than 0"); } - static constexpr uint64_t S_BIT_ONE = 1ULL; ///< Single bit constant - static constexpr size_t S_BITS_PER_LEAF_NODE = 64; ///< Bits per uint64_t leaf - constexpr because this is a fundamental architectural constant - - static constexpr uint64_t S_BIAS_BIT_START = 1ULL; ///< Starting bit for bias traversal - static constexpr size_t S_BIAS_SHIFT_AMOUNT = 1; ///< Bit shift for bias progression - - public: - static constexpr size_t S_INVALID_SIGNAL_INDEX = ~0ULL; ///< Returned when no signal is available - - enum class TreePath { - Left = 1, - Right = 2 - }; - /** - * @brief Constructs a SignalTree with specified leaf capacity - * @param leafCapacity Number of leaf nodes (must be power of 2) - * @throws std::invalid_argument if leafCapacity is not a power of 2 - */ - explicit SignalTree(size_t leafCapacity): - _leafCapacity(leafCapacity) - , _totalNodes(2 * leafCapacity - 1) - , _nodes(std::make_unique[]>(_totalNodes)) { - - if (!isPowerOf2(_leafCapacity)) { - throw std::invalid_argument("LeafCapacity must be a power of 2 and greater than 0"); - } - - // Initialize all atomics to 0 - for (size_t i = 0; i < _totalNodes; ++i) { - _nodes[i].store(0, std::memory_order_relaxed); - } - } - - SignalTree(const SignalTree&) = delete; - SignalTree& operator=(const SignalTree&) = delete; - SignalTree(SignalTree&&) = delete; // Deleted move constructor - SignalTree& operator=(SignalTree&&) = delete; // Deleted move assignment operator - - /** - * @brief Gets direct access to the root node - * - * Advanced use only. Root value = total active signals. - * - * @return Reference to the atomic root node counter - */ - std::atomic& getRoot() { - return _nodes[0]; - } - - /** - * @brief Gets a child node given parent index and direction - * - * Internal navigation helper for tree traversal. - * - * @param parent Index of the parent node - * @param path Which child to get (Left or Right) - * @return Reference to the child node - */ - std::atomic& getChild(size_t parent, TreePath path) { - return _nodes[parent * 2 + static_cast(path)]; + // Initialize all atomics to 0 + for (size_t i = 0; i < _totalNodes; ++i) { + _nodes[i].store(0, std::memory_order_relaxed); } + } - /** - * @brief Calculates child node index without accessing the node - * - * @param parent Index of the parent node - * @param path Which child (Left or Right) - * @return Index of the child node in the internal array - */ - size_t getChildIndex(size_t parent, TreePath path) const { - size_t childIndex = parent * 2 + static_cast(path); - ENTROPY_ASSERT(childIndex < _totalNodes, "Child index out of bounds!"); - return childIndex; - } + SignalTree(const SignalTree&) = delete; + SignalTree& operator=(const SignalTree&) = delete; + SignalTree(SignalTree&&) = delete; // Deleted move constructor + SignalTree& operator=(SignalTree&&) = delete; // Deleted move assignment operator - /** - * @brief Direct access to any node by index - * - * Low-level access. Internal nodes: 0 to LeafCapacity-2, leaf nodes: rest. - * - * @param index Node index in the internal array - * @return Reference to the atomic node - */ - std::atomic& getNode(size_t index) { - ENTROPY_ASSERT(index < _totalNodes, "Node index out of bounds!"); - return _nodes[index]; - } + /** + * @brief Gets direct access to the root node + * + * Advanced use only. Root value = total active signals. + * + * @return Reference to the atomic root node counter + */ + std::atomic& getRoot() { + return _nodes[0]; + } - /** - * @brief Calculates parent node index for tree traversal - * - * Formula: parent = (child - 1) / 2 - * - * @param child Index of the child node (must not be root) - * @return Index of the parent node - */ - size_t getParentIndex(size_t child) const { - ENTROPY_ASSERT(child > 0 && child < _totalNodes, "Cannot get parent of root or invalid index!"); - return (child - 1) / 2; - } + /** + * @brief Gets a child node given parent index and direction + * + * Internal navigation helper for tree traversal. + * + * @param parent Index of the parent node + * @param path Which child to get (Left or Right) + * @return Reference to the child node + */ + std::atomic& getChild(size_t parent, TreePath path) { + return _nodes[parent * 2 + static_cast(path)]; + } - /** - * @brief Gets the number of leaf nodes in the tree - * @return LeafCapacity template parameter value - */ - size_t getLeafCapacity() const { - return _leafCapacity; - } + /** + * @brief Calculates child node index without accessing the node + * + * @param parent Index of the parent node + * @param path Which child (Left or Right) + * @return Index of the child node in the internal array + */ + size_t getChildIndex(size_t parent, TreePath path) const { + size_t childIndex = parent * 2 + static_cast(path); + ENTROPY_ASSERT(childIndex < _totalNodes, "Child index out of bounds!"); + return childIndex; + } - /** - * @brief Gets total number of nodes in the tree (internal + leaf) - * @return Total node count (always 2*LeafCapacity - 1) - */ - size_t getTotalNodes() const { - return _totalNodes; - } + /** + * @brief Direct access to any node by index + * + * Low-level access. Internal nodes: 0 to LeafCapacity-2, leaf nodes: rest. + * + * @param index Node index in the internal array + * @return Reference to the atomic node + */ + std::atomic& getNode(size_t index) { + ENTROPY_ASSERT(index < _totalNodes, "Node index out of bounds!"); + return _nodes[index]; + } + + /** + * @brief Calculates parent node index for tree traversal + * + * Formula: parent = (child - 1) / 2 + * + * @param child Index of the child node (must not be root) + * @return Index of the parent node + */ + size_t getParentIndex(size_t child) const { + ENTROPY_ASSERT(child > 0 && child < _totalNodes, "Cannot get parent of root or invalid index!"); + return (child - 1) / 2; + } + /** + * @brief Gets the number of leaf nodes in the tree + * @return LeafCapacity template parameter value + */ + size_t getLeafCapacity() const { + return _leafCapacity; + } + + /** + * @brief Gets total number of nodes in the tree (internal + leaf) + * @return Total node count (always 2*LeafCapacity - 1) + */ + size_t getTotalNodes() const { + return _totalNodes; + } - /** - * @brief Sets a signal as active in the tree - * - * Thread-safe and lock-free. Updates internal counters up to root. - * - * @param leafIndex Signal index to set (0 to LeafCapacity*64-1) - * - * @code - * // Worker thread marks task 42 as ready - * signals.set(42); - * - * // Multiple threads can set signals concurrently - * std::thread t1([&]() { signals.set(10); }); - * std::thread t2([&]() { signals.set(20); }); - * @endcode - */ - void set(size_t leafIndex) override { - // 1. Input Validation - ENTROPY_ASSERT(leafIndex < _leafCapacity * S_BITS_PER_LEAF_NODE, "Leaf index out of bounds!"); - - // 2. Calculate Leaf Node Array Index - // Leaf nodes start after all internal nodes. - // The number of internal nodes is LeafCapacity - 1 (for a complete binary tree) - // So, the first leaf node is at index (LeafCapacity - 1).\ - // Each leaf node (uint64_t) can hold 64 signals. - size_t leafNodeArrayStartIndex = _totalNodes - _leafCapacity; - size_t leafNodeOffsetInArray = leafIndex / S_BITS_PER_LEAF_NODE; // Which uint64_t leaf node - size_t actualLeafNodeIndex = leafNodeArrayStartIndex + leafNodeOffsetInArray; - - // 3. Calculate Bit Position within Leaf Node - size_t bitPos = leafIndex % S_BITS_PER_LEAF_NODE; - - // 4. Atomically Set Bit and get the OLD value - uint64_t oldValue = _nodes[actualLeafNodeIndex].fetch_or(S_BIT_ONE << bitPos, std::memory_order_release); - - // 5. Propagate Up only if the bit was not already set - if (!(oldValue & (S_BIT_ONE << bitPos))) { - // The bit was 0 before, so we need to update the counters. - size_t currentNodeIndex = actualLeafNodeIndex; - while (currentNodeIndex > 0) { // Stop when we reach the root (index 0) - size_t parentIndex = getParentIndex(currentNodeIndex); - // Atomically increment the parent's counter - // Use memory_order_relaxed as we are only concerned with the total count, - // and the ordering is handled by the leaf node's fetch_or. - _nodes[parentIndex].fetch_add(1, std::memory_order_relaxed); - currentNodeIndex = parentIndex; - } + /** + * @brief Sets a signal as active in the tree + * + * Thread-safe and lock-free. Updates internal counters up to root. + * + * @param leafIndex Signal index to set (0 to LeafCapacity*64-1) + * + * @code + * // Worker thread marks task 42 as ready + * signals.set(42); + * + * // Multiple threads can set signals concurrently + * std::thread t1([&]() { signals.set(10); }); + * std::thread t2([&]() { signals.set(20); }); + * @endcode + */ + void set(size_t leafIndex) override { + // 1. Input Validation + ENTROPY_ASSERT(leafIndex < _leafCapacity * S_BITS_PER_LEAF_NODE, "Leaf index out of bounds!"); + + // 2. Calculate Leaf Node Array Index + // Leaf nodes start after all internal nodes. + // The number of internal nodes is LeafCapacity - 1 (for a complete binary tree) + // So, the first leaf node is at index (LeafCapacity - 1).\ + // Each leaf node (uint64_t) can hold 64 signals. + size_t leafNodeArrayStartIndex = _totalNodes - _leafCapacity; + size_t leafNodeOffsetInArray = leafIndex / S_BITS_PER_LEAF_NODE; // Which uint64_t leaf node + size_t actualLeafNodeIndex = leafNodeArrayStartIndex + leafNodeOffsetInArray; + + // 3. Calculate Bit Position within Leaf Node + size_t bitPos = leafIndex % S_BITS_PER_LEAF_NODE; + + // 4. Atomically Set Bit and get the OLD value + uint64_t oldValue = _nodes[actualLeafNodeIndex].fetch_or(S_BIT_ONE << bitPos, std::memory_order_release); + + // 5. Propagate Up only if the bit was not already set + if (!(oldValue & (S_BIT_ONE << bitPos))) { + // The bit was 0 before, so we need to update the counters. + size_t currentNodeIndex = actualLeafNodeIndex; + while (currentNodeIndex > 0) { // Stop when we reach the root (index 0) + size_t parentIndex = getParentIndex(currentNodeIndex); + // Atomically increment the parent's counter + // Use memory_order_relaxed as we are only concerned with the total count, + // and the ordering is handled by the leaf node's fetch_or. + _nodes[parentIndex].fetch_add(1, std::memory_order_relaxed); + currentNodeIndex = parentIndex; } } + } - /** - * @brief Selects and clears an active signal from the tree - * - * Atomically finds, clears and returns signal index. Lock-free. Bias controls - * fairness by guiding traversal (each bit chooses left/right at each level). - * - * @param biasFlags Bit pattern controlling traversal (LSB at root, shifts left) - * @return Pair of {signal_index, tree_is_empty}. signal_index is S_INVALID_SIGNAL_INDEX if none available - * - * @code - * // Basic work-stealing loop with empty detection - * uint64_t bias = 0; - * while (running) { - * auto [signal, isEmpty] = signals.select(bias); - * if (signal != SignalTree<4>::S_INVALID_SIGNAL_INDEX) { - * processWork(signal); - * if (isEmpty) { - * // Tree is now empty, might want to steal work - * } - * } else { - * // No work available, steal from another queue - * } - * bias = rotateBias(bias); // Ensure fairness - * } - * @endcode - */ - std::pair select(uint64_t& biasFlags) override { - size_t currentNodeIndex = 0; // Start at the root - uint64_t localBiasHint = 0; // Build up bias hint during traversal - uint64_t currentBiasBit = S_BIAS_BIT_START; // Start with LSB - - // Traverse down the tree to find the leaf node - while (currentNodeIndex < _totalNodes - _leafCapacity) { // While not a leaf node - uint64_t leftChildValue = _nodes[getChildIndex(currentNodeIndex, TreePath::Left)].load(std::memory_order_acquire); - uint64_t rightChildValue = _nodes[getChildIndex(currentNodeIndex, TreePath::Right)].load(std::memory_order_acquire); - - // Use current bias bit to decide which child to prioritize (LSB approach) - bool biasRight = (biasFlags & currentBiasBit) != 0; - bool chooseRight = (biasRight && rightChildValue > 0) || (leftChildValue == 0); - - // Build bias hint: set current bit if right child has work - if (rightChildValue > 0) { - localBiasHint |= currentBiasBit; - } - - if (chooseRight && rightChildValue > 0) { - currentNodeIndex = getChildIndex(currentNodeIndex, TreePath::Right); - } else if (leftChildValue > 0) { - currentNodeIndex = getChildIndex(currentNodeIndex, TreePath::Left); - } else { - return {S_INVALID_SIGNAL_INDEX, true}; // No active signals, tree is empty - } - - currentBiasBit <<= S_BIAS_SHIFT_AMOUNT; // Move to next higher bit (LSB to MSB) + /** + * @brief Selects and clears an active signal from the tree + * + * Atomically finds, clears and returns signal index. Lock-free. Bias controls + * fairness by guiding traversal (each bit chooses left/right at each level). + * + * @param biasFlags Bit pattern controlling traversal (LSB at root, shifts left) + * @return Pair of {signal_index, tree_is_empty}. signal_index is S_INVALID_SIGNAL_INDEX if none available + * + * @code + * // Basic work-stealing loop with empty detection + * uint64_t bias = 0; + * while (running) { + * auto [signal, isEmpty] = signals.select(bias); + * if (signal != SignalTree<4>::S_INVALID_SIGNAL_INDEX) { + * processWork(signal); + * if (isEmpty) { + * // Tree is now empty, might want to steal work + * } + * } else { + * // No work available, steal from another queue + * } + * bias = rotateBias(bias); // Ensure fairness + * } + * @endcode + */ + std::pair select(uint64_t& biasFlags) override { + size_t currentNodeIndex = 0; // Start at the root + uint64_t localBiasHint = 0; // Build up bias hint during traversal + uint64_t currentBiasBit = S_BIAS_BIT_START; // Start with LSB + + // Traverse down the tree to find the leaf node + while (currentNodeIndex < _totalNodes - _leafCapacity) { // While not a leaf node + uint64_t leftChildValue = + _nodes[getChildIndex(currentNodeIndex, TreePath::Left)].load(std::memory_order_acquire); + uint64_t rightChildValue = + _nodes[getChildIndex(currentNodeIndex, TreePath::Right)].load(std::memory_order_acquire); + + // Use current bias bit to decide which child to prioritize (LSB approach) + bool biasRight = (biasFlags & currentBiasBit) != 0; + bool chooseRight = (biasRight && rightChildValue > 0) || (leftChildValue == 0); + + // Build bias hint: set current bit if right child has work + if (rightChildValue > 0) { + localBiasHint |= currentBiasBit; } - // Now current_node_index is a leaf node (or the start of a block of leaf nodes) - // We need to find an active bit within this leaf node's uint64_t - - uint64_t leafValueExpected; - size_t bitPos; - bool success = false; - - // Retry loop for compare_exchange_weak - do { - leafValueExpected = _nodes[currentNodeIndex].load(std::memory_order_acquire); - if (leafValueExpected == 0) { - return {S_INVALID_SIGNAL_INDEX, true}; // Leaf node became empty, no signal found - } - - // Find the first set bit (LSB) using C++20 standard function - bitPos = std::countr_zero(leafValueExpected); - - // Attempt to atomically clear the bit - // If this fails, leafValueExpected is updated with the current value, and the loop retries. - success = _nodes[currentNodeIndex].compare_exchange_weak(leafValueExpected, leafValueExpected & ~(S_BIT_ONE << bitPos), - std::memory_order_release, std::memory_order_relaxed); - } while (!success); // Keep retrying until compare_exchange_weak succeeds - - // Update caller's bias with the pattern we found - biasFlags = localBiasHint; - - // Calculate global leaf index - size_t leafNodeArrayStartIndex = _totalNodes - _leafCapacity; - size_t leafNodeOffsetInArray = currentNodeIndex - leafNodeArrayStartIndex; - size_t globalLeafIndex = (leafNodeOffsetInArray * S_BITS_PER_LEAF_NODE) + bitPos; - - // Propagate Up (Decrement Counters) - size_t tempNodeIndex = currentNodeIndex; - while (tempNodeIndex > 0) { - size_t parentIndex = getParentIndex(tempNodeIndex); - _nodes[parentIndex].fetch_sub(1, std::memory_order_relaxed); - tempNodeIndex = parentIndex; + if (chooseRight && rightChildValue > 0) { + currentNodeIndex = getChildIndex(currentNodeIndex, TreePath::Right); + } else if (leftChildValue > 0) { + currentNodeIndex = getChildIndex(currentNodeIndex, TreePath::Left); + } else { + return {S_INVALID_SIGNAL_INDEX, true}; // No active signals, tree is empty } - bool treeIsEmpty = (_nodes[0].load(std::memory_order_acquire) == 0); - return {globalLeafIndex, treeIsEmpty}; - } - /** - * @brief Alias for set() - signals a contract is ready for execution - * @param leafIndex Signal index to signal (0 to LeafCapacity*64-1) - */ - void signal(size_t leafIndex) { - set(leafIndex); + currentBiasBit <<= S_BIAS_SHIFT_AMOUNT; // Move to next higher bit (LSB to MSB) } - /** - * @brief Clears a signal from the tree without selecting it - * @param leafIndex Signal index to clear (0 to LeafCapacity*64-1) - */ - void clear(size_t leafIndex) override { - // Input validation - ENTROPY_ASSERT(leafIndex < _leafCapacity * S_BITS_PER_LEAF_NODE, "Leaf index out of bounds!"); - - // Calculate leaf node position - size_t leafNodeArrayStartIndex = _totalNodes - _leafCapacity; - size_t leafNodeOffsetInArray = leafIndex / S_BITS_PER_LEAF_NODE; - size_t actualLeafNodeIndex = leafNodeArrayStartIndex + leafNodeOffsetInArray; - - // Calculate bit position - size_t bitPos = leafIndex % S_BITS_PER_LEAF_NODE; - - // Atomically clear the bit - uint64_t oldValue = _nodes[actualLeafNodeIndex].fetch_and(~(S_BIT_ONE << bitPos), std::memory_order_release); - - // Only propagate if the bit was actually set - if (oldValue & (S_BIT_ONE << bitPos)) { - // Propagate up (decrement counters) - size_t currentNodeIndex = actualLeafNodeIndex; - while (currentNodeIndex > 0) { - size_t parentIndex = getParentIndex(currentNodeIndex); - _nodes[parentIndex].fetch_sub(1, std::memory_order_relaxed); - currentNodeIndex = parentIndex; - } + // Now current_node_index is a leaf node (or the start of a block of leaf nodes) + // We need to find an active bit within this leaf node's uint64_t + + uint64_t leafValueExpected; + size_t bitPos; + bool success = false; + + // Retry loop for compare_exchange_weak + do { + leafValueExpected = _nodes[currentNodeIndex].load(std::memory_order_acquire); + if (leafValueExpected == 0) { + return {S_INVALID_SIGNAL_INDEX, true}; // Leaf node became empty, no signal found } - } - /** - * @brief Checks if the tree has no active signals - * @return true if no signals are set - */ - bool isEmpty() const override { - return _nodes[0].load(std::memory_order_acquire) == 0; + // Find the first set bit (LSB) using C++20 standard function + bitPos = std::countr_zero(leafValueExpected); + + // Attempt to atomically clear the bit + // If this fails, leafValueExpected is updated with the current value, and the loop retries. + success = _nodes[currentNodeIndex].compare_exchange_weak( + leafValueExpected, leafValueExpected & ~(S_BIT_ONE << bitPos), std::memory_order_release, + std::memory_order_relaxed); + } while (!success); // Keep retrying until compare_exchange_weak succeeds + + // Update caller's bias with the pattern we found + biasFlags = localBiasHint; + + // Calculate global leaf index + size_t leafNodeArrayStartIndex = _totalNodes - _leafCapacity; + size_t leafNodeOffsetInArray = currentNodeIndex - leafNodeArrayStartIndex; + size_t globalLeafIndex = (leafNodeOffsetInArray * S_BITS_PER_LEAF_NODE) + bitPos; + + // Propagate Up (Decrement Counters) + size_t tempNodeIndex = currentNodeIndex; + while (tempNodeIndex > 0) { + size_t parentIndex = getParentIndex(tempNodeIndex); + _nodes[parentIndex].fetch_sub(1, std::memory_order_relaxed); + tempNodeIndex = parentIndex; } + bool treeIsEmpty = (_nodes[0].load(std::memory_order_acquire) == 0); + return {globalLeafIndex, treeIsEmpty}; + } - /** - * @brief Gets the total capacity of this SignalTree - * @return Maximum number of signals (LeafCapacity * 64) - */ - size_t getCapacity() const override { - return _leafCapacity * S_BITS_PER_LEAF_NODE; + /** + * @brief Alias for set() - signals a contract is ready for execution + * @param leafIndex Signal index to signal (0 to LeafCapacity*64-1) + */ + void signal(size_t leafIndex) { + set(leafIndex); + } + + /** + * @brief Clears a signal from the tree without selecting it + * @param leafIndex Signal index to clear (0 to LeafCapacity*64-1) + */ + void clear(size_t leafIndex) override { + // Input validation + ENTROPY_ASSERT(leafIndex < _leafCapacity * S_BITS_PER_LEAF_NODE, "Leaf index out of bounds!"); + + // Calculate leaf node position + size_t leafNodeArrayStartIndex = _totalNodes - _leafCapacity; + size_t leafNodeOffsetInArray = leafIndex / S_BITS_PER_LEAF_NODE; + size_t actualLeafNodeIndex = leafNodeArrayStartIndex + leafNodeOffsetInArray; + + // Calculate bit position + size_t bitPos = leafIndex % S_BITS_PER_LEAF_NODE; + + // Atomically clear the bit + uint64_t oldValue = _nodes[actualLeafNodeIndex].fetch_and(~(S_BIT_ONE << bitPos), std::memory_order_release); + + // Only propagate if the bit was actually set + if (oldValue & (S_BIT_ONE << bitPos)) { + // Propagate up (decrement counters) + size_t currentNodeIndex = actualLeafNodeIndex; + while (currentNodeIndex > 0) { + size_t parentIndex = getParentIndex(currentNodeIndex); + _nodes[parentIndex].fetch_sub(1, std::memory_order_relaxed); + currentNodeIndex = parentIndex; + } } - - /** - * @brief Constant for invalid signal (alias for compatibility) - */ - static constexpr size_t INVALID_SIGNAL = S_INVALID_SIGNAL_INDEX; - }; + } + + /** + * @brief Checks if the tree has no active signals + * @return true if no signals are set + */ + bool isEmpty() const override { + return _nodes[0].load(std::memory_order_acquire) == 0; + } -} // Concurrency -} // Core -} // EntropyEngine + /** + * @brief Gets the total capacity of this SignalTree + * @return Maximum number of signals (LeafCapacity * 64) + */ + size_t getCapacity() const override { + return _leafCapacity * S_BITS_PER_LEAF_NODE; + } + + /** + * @brief Constant for invalid signal (alias for compatibility) + */ + static constexpr size_t INVALID_SIGNAL = S_INVALID_SIGNAL_INDEX; +}; +} // namespace Concurrency +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Concurrency/SpinningDirectScheduler.h b/src/Concurrency/SpinningDirectScheduler.h index 356352b..cb472d8 100644 --- a/src/Concurrency/SpinningDirectScheduler.h +++ b/src/Concurrency/SpinningDirectScheduler.h @@ -10,7 +10,7 @@ /** * @file SpinningDirectScheduler.h * @brief CPU-burning scheduler for benchmarking thread wake/sleep overhead - * + * * This file contains SpinningDirectScheduler, a diagnostic scheduler that never * sleeps threads. For benchmarking sleep/wake overhead. */ @@ -18,65 +18,70 @@ #pragma once #include "IWorkScheduler.h" +#include "WorkContractGroup.h" -namespace EntropyEngine { -namespace Core { -namespace Concurrency { +namespace EntropyEngine +{ +namespace Core +{ +namespace Concurrency +{ /** * @brief CPU-intensive scheduler that eliminates sleep/wake overhead for benchmarking - * + * * SpinningDirectScheduler extends the DirectScheduler concept by eliminating all thread * sleep operations. While DirectScheduler allows threads to sleep when no work is available, * this implementation maintains continuous CPU activity through spinning, even when work * queues are empty. - * + * * Purpose: This scheduler specifically addresses the benchmarking requirement to measure * and isolate thread sleep/wake overhead by providing a comparison baseline where such * overhead is completely eliminated. - * + * * Characteristics: * - CPU usage when idle: 100% per thread * - No thread sleep/wake cycles * - Threads remain active continuously - * + * * Recommended use cases: * - Diagnosing impact of OS thread scheduling * - Measuring sleep/wake cycle overhead in workloads * - Testing scenarios requiring minimal latency * - Comparative benchmarking against DirectScheduler - * + * * Not recommended for: * - Production systems (excessive CPU consumption) * - Battery-powered devices (rapid power drain) * - Shared computing environments (resource monopolization) * - Any scenario requiring power efficiency - * + * * Benchmarking insight: Comparing this scheduler against DirectScheduler reveals OS-specific * thread wake latencies, which vary significantly by operating system and system load. - * + * * @code * // Use this to compare against DirectScheduler * auto directScheduler = std::make_unique(config); * WorkService directService(config, std::move(directScheduler)); * // Run benchmark... - * + * * auto spinningScheduler = std::make_unique(config); * WorkService spinningService(config, std::move(spinningScheduler)); * // Run same benchmark... - * + * * // The difference in execution time = thread wake overhead * @endcode */ -class SpinningDirectScheduler : public IWorkScheduler { +class SpinningDirectScheduler : public IWorkScheduler +{ public: /** * @brief Creates a scheduler that maintains continuous thread activity - * + * * Config is ignored - always operates in continuous spinning mode. - * + * * @param config Accepted for interface compatibility but unused - * + * * @code * // Configuration parameters are ignored * IWorkScheduler::Config config; @@ -85,22 +90,22 @@ class SpinningDirectScheduler : public IWorkScheduler { * @endcode */ explicit SpinningDirectScheduler(const Config& config) {} - + /** * @brief Destroys the scheduler */ ~SpinningDirectScheduler() override = default; - + /** * @brief Selects the first group with work, never sleeps - * + * * Like DirectScheduler but shouldSleep is ALWAYS false. Keeps threads * spinning to maintain CPU cache residency at cost of cycles. - * + * * @param groups List of work groups to check * @param context Thread context (ignored) * @return First group with work, or {nullptr, false} to keep spinning - * + * * @code * // When there's work, behaves like DirectScheduler * auto result = scheduler->selectNextGroup(groups, context); @@ -113,28 +118,27 @@ class SpinningDirectScheduler : public IWorkScheduler { * } * @endcode */ - ScheduleResult selectNextGroup( - const std::vector& groups, - const SchedulingContext& context - ) override { + ScheduleResult selectNextGroup(const std::vector& groups, + const SchedulingContext& context) override { for (auto* group : groups) { if (group && group->scheduledCount() > 0) { - return {group, false}; // Never sleep + return {group, false}; // Never sleep } } // Even when no work, don't sleep - just spin - return {nullptr, false}; // shouldSleep = false + return {nullptr, false}; // shouldSleep = false } - + /** * @brief Returns the scheduler's name - * + * * @return "SpinningDirect" */ - const char* getName() const override { return "SpinningDirect"; } + const char* getName() const override { + return "SpinningDirect"; + } }; -} // namespace Concurrency -} // namespace Core -} // namespace EntropyEngine - +} // namespace Concurrency +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Concurrency/WorkContractGroup.cpp b/src/Concurrency/WorkContractGroup.cpp index 6b027d7..3631097 100644 --- a/src/Concurrency/WorkContractGroup.cpp +++ b/src/Concurrency/WorkContractGroup.cpp @@ -8,949 +8,940 @@ */ #include "WorkContractGroup.h" -#include "../TypeSystem/TypeID.h" -#include -#include "IConcurrencyProvider.h" + #include #include +#include #include #include -namespace EntropyEngine { -namespace Core { -namespace Concurrency { - static size_t roundUpToPowerOf2(size_t n) { - if (n <= 1) return 1; - return static_cast(std::pow(2, std::ceil(std::log2(n)))); - } - - // Helper function to create appropriately sized SignalTree - std::unique_ptr WorkContractGroup::createSignalTree(size_t capacity) { - size_t leafCount = (capacity + 63) / 64; - // Ensure minimum of 2 leaves to avoid single-node tree bug - // where the same node serves as both root counter and leaf bitmap - size_t powerOf2 = std::max(roundUpToPowerOf2(leafCount), size_t(2)); - - return std::make_unique(powerOf2); - } - - WorkContractGroup::WorkContractGroup(size_t capacity, std::string name) - : _capacity(capacity) - , _contracts(capacity) - , _name(name) { - - // Create SignalTree for ready contracts - _readyContracts = createSignalTree(capacity); - - // Create SignalTree for main thread contracts - _mainThreadContracts = createSignalTree(capacity); - - // Initialize the lock-free free list - // Build a linked list through all slots - for (size_t i = 0; i < _capacity - 1; ++i) { - _contracts[i].nextFree.store(static_cast(i + 1), std::memory_order_relaxed); - } - // Last slot points to INVALID_INDEX - _contracts[_capacity - 1].nextFree.store(INVALID_INDEX, std::memory_order_relaxed); - - // Head points to first slot - _freeListHead.store(0, std::memory_order_relaxed); - } - - WorkContractGroup::WorkContractGroup(WorkContractGroup&& other) noexcept - : _capacity(other._capacity) - , _contracts(std::move(other._contracts)) - , _readyContracts(std::move(other._readyContracts)) - , _mainThreadContracts(std::move(other._mainThreadContracts)) - , _freeListHead(other._freeListHead.load(std::memory_order_acquire)) - , _activeCount(other._activeCount.load(std::memory_order_acquire)) - , _scheduledCount(other._scheduledCount.load(std::memory_order_acquire)) - , _executingCount(other._executingCount.load(std::memory_order_acquire)) - , _selectingCount(other._selectingCount.load(std::memory_order_acquire)) - , _mainThreadScheduledCount(other._mainThreadScheduledCount.load(std::memory_order_acquire)) - , _mainThreadExecutingCount(other._mainThreadExecutingCount.load(std::memory_order_acquire)) - , _mainThreadSelectingCount(other._mainThreadSelectingCount.load(std::memory_order_acquire)) - , _name(std::move(other._name)) - , _concurrencyProvider(other._concurrencyProvider) - , _stopping(other._stopping.load(std::memory_order_acquire)) - { - // Clear the other object to prevent double cleanup +#include "../TypeSystem/TypeID.h" +#include "IConcurrencyProvider.h" + +namespace EntropyEngine +{ +namespace Core +{ +namespace Concurrency +{ +static size_t roundUpToPowerOf2(size_t n) { + if (n <= 1) return 1; + return static_cast(std::pow(2, std::ceil(std::log2(n)))); +} + +// Helper function to create appropriately sized SignalTree +std::unique_ptr WorkContractGroup::createSignalTree(size_t capacity) { + size_t leafCount = (capacity + 63) / 64; + // Ensure minimum of 2 leaves to avoid single-node tree bug + // where the same node serves as both root counter and leaf bitmap + size_t powerOf2 = std::max(roundUpToPowerOf2(leafCount), size_t(2)); + + return std::make_unique(powerOf2); +} + +WorkContractGroup::WorkContractGroup(size_t capacity, std::string name) + : _capacity(capacity), _contracts(capacity), _name(name) { + // Create SignalTree for ready contracts + _readyContracts = createSignalTree(capacity); + + // Create SignalTree for main thread contracts + _mainThreadContracts = createSignalTree(capacity); + + // Initialize the lock-free free list + // Build a linked list through all slots + for (size_t i = 0; i < _capacity - 1; ++i) { + _contracts[i].nextFree.store(static_cast(i + 1), std::memory_order_relaxed); + } + // Last slot points to INVALID_INDEX + _contracts[_capacity - 1].nextFree.store(INVALID_INDEX, std::memory_order_relaxed); + + // Head points to first slot + _freeListHead.store(0, std::memory_order_relaxed); +} + +WorkContractGroup::WorkContractGroup(WorkContractGroup&& other) noexcept + : _capacity(other._capacity), + _contracts(std::move(other._contracts)), + _readyContracts(std::move(other._readyContracts)), + _mainThreadContracts(std::move(other._mainThreadContracts)), + _freeListHead(other._freeListHead.load(std::memory_order_acquire)), + _activeCount(other._activeCount.load(std::memory_order_acquire)), + _scheduledCount(other._scheduledCount.load(std::memory_order_acquire)), + _executingCount(other._executingCount.load(std::memory_order_acquire)), + _selectingCount(other._selectingCount.load(std::memory_order_acquire)), + _mainThreadScheduledCount(other._mainThreadScheduledCount.load(std::memory_order_acquire)), + _mainThreadExecutingCount(other._mainThreadExecutingCount.load(std::memory_order_acquire)), + _mainThreadSelectingCount(other._mainThreadSelectingCount.load(std::memory_order_acquire)), + _name(std::move(other._name)), + _concurrencyProvider(other._concurrencyProvider), + _stopping(other._stopping.load(std::memory_order_acquire)) { + // Clear the other object to prevent double cleanup + other._concurrencyProvider = nullptr; + other._stopping.store(true, std::memory_order_release); + other._activeCount.store(0, std::memory_order_release); + other._scheduledCount.store(0, std::memory_order_release); + other._executingCount.store(0, std::memory_order_release); + other._selectingCount.store(0, std::memory_order_release); + other._mainThreadScheduledCount.store(0, std::memory_order_release); + other._mainThreadExecutingCount.store(0, std::memory_order_release); + other._mainThreadSelectingCount.store(0, std::memory_order_release); +} + +WorkContractGroup& WorkContractGroup::operator=(WorkContractGroup&& other) noexcept { + if (this != &other) { + // Clean up current state + stop(); + wait(); + // Clear the provider reference + _concurrencyProvider = nullptr; + + // Move from other + const_cast(_capacity) = other._capacity; + _contracts = std::move(other._contracts); + _readyContracts = std::move(other._readyContracts); + _mainThreadContracts = std::move(other._mainThreadContracts); + _freeListHead.store(other._freeListHead.load(std::memory_order_acquire), std::memory_order_release); + _activeCount.store(other._activeCount.load(std::memory_order_acquire), std::memory_order_release); + _scheduledCount.store(other._scheduledCount.load(std::memory_order_acquire), std::memory_order_release); + _executingCount.store(other._executingCount.load(std::memory_order_acquire), std::memory_order_release); + _selectingCount.store(other._selectingCount.load(std::memory_order_acquire), std::memory_order_release); + _mainThreadScheduledCount.store(other._mainThreadScheduledCount.load(std::memory_order_acquire), + std::memory_order_release); + _mainThreadExecutingCount.store(other._mainThreadExecutingCount.load(std::memory_order_acquire), + std::memory_order_release); + _mainThreadSelectingCount.store(other._mainThreadSelectingCount.load(std::memory_order_acquire), + std::memory_order_release); + _name = std::move(other._name); + _concurrencyProvider = other._concurrencyProvider; + _stopping.store(other._stopping.load(std::memory_order_acquire), std::memory_order_release); + + // Clear the other object other._concurrencyProvider = nullptr; other._stopping.store(true, std::memory_order_release); other._activeCount.store(0, std::memory_order_release); other._scheduledCount.store(0, std::memory_order_release); other._executingCount.store(0, std::memory_order_release); - other._selectingCount.store(0, std::memory_order_release); other._mainThreadScheduledCount.store(0, std::memory_order_release); other._mainThreadExecutingCount.store(0, std::memory_order_release); + other._selectingCount.store(0, std::memory_order_release); other._mainThreadSelectingCount.store(0, std::memory_order_release); } - - WorkContractGroup& WorkContractGroup::operator=(WorkContractGroup&& other) noexcept { - if (this != &other) { - // Clean up current state - stop(); - wait(); - // Clear the provider reference - _concurrencyProvider = nullptr; - - // Move from other - const_cast(_capacity) = other._capacity; - _contracts = std::move(other._contracts); - _readyContracts = std::move(other._readyContracts); - _mainThreadContracts = std::move(other._mainThreadContracts); - _freeListHead.store(other._freeListHead.load(std::memory_order_acquire), std::memory_order_release); - _activeCount.store(other._activeCount.load(std::memory_order_acquire), std::memory_order_release); - _scheduledCount.store(other._scheduledCount.load(std::memory_order_acquire), std::memory_order_release); - _executingCount.store(other._executingCount.load(std::memory_order_acquire), std::memory_order_release); - _selectingCount.store(other._selectingCount.load(std::memory_order_acquire), std::memory_order_release); - _mainThreadScheduledCount.store(other._mainThreadScheduledCount.load(std::memory_order_acquire), std::memory_order_release); - _mainThreadExecutingCount.store(other._mainThreadExecutingCount.load(std::memory_order_acquire), std::memory_order_release); - _mainThreadSelectingCount.store(other._mainThreadSelectingCount.load(std::memory_order_acquire), std::memory_order_release); - _name = std::move(other._name); - _concurrencyProvider = other._concurrencyProvider; - _stopping.store(other._stopping.load(std::memory_order_acquire), std::memory_order_release); - - // Clear the other object - other._concurrencyProvider = nullptr; - other._stopping.store(true, std::memory_order_release); - other._activeCount.store(0, std::memory_order_release); - other._scheduledCount.store(0, std::memory_order_release); - other._executingCount.store(0, std::memory_order_release); - other._mainThreadScheduledCount.store(0, std::memory_order_release); - other._mainThreadExecutingCount.store(0, std::memory_order_release); - other._selectingCount.store(0, std::memory_order_release); - other._mainThreadSelectingCount.store(0, std::memory_order_release); - } - return *this; - } - - void WorkContractGroup::releaseAllContracts() { - // Iterate through all contract slots and release any that are still allocated or scheduled - for (uint32_t i = 0; i < _capacity; ++i) { - auto& slot = _contracts[i]; - - // Check if this slot is occupied (not free) - ContractState currentState = slot.state.load(std::memory_order_acquire); - if (currentState != ContractState::Free) { - // Try to transition directly to Free state - ContractState expected = currentState; - if (slot.state.compare_exchange_strong(expected, ContractState::Free, - std::memory_order_acq_rel)) { - // Successfully transitioned, now clean up - bool isMainThread = (slot.executionType == ExecutionType::MainThread); - returnSlotToFreeList(i, currentState, isMainThread); - } - // If CAS failed, another thread (or our own iteration) already handled this slot - // This is fine - we just continue to the next slot - } - } - } - - void WorkContractGroup::unscheduleAllContracts() { - // Iterate through all contract slots and unschedule any that are scheduled - for (uint32_t i = 0; i < _capacity; ++i) { - auto& slot = _contracts[i]; - - // Check if this slot is scheduled - ContractState currentState = slot.state.load(std::memory_order_acquire); - if (currentState == ContractState::Scheduled) { - // Try to transition from Scheduled to Allocated - ContractState expected = ContractState::Scheduled; - if (slot.state.compare_exchange_strong(expected, ContractState::Allocated, - std::memory_order_acq_rel)) { - // Remove from appropriate ready set based on execution type - size_t newScheduledCount; - if (slot.executionType == ExecutionType::MainThread) { - _mainThreadContracts->clear(i); - newScheduledCount = _mainThreadScheduledCount.fetch_sub(1, std::memory_order_acq_rel) - 1; - } else { - _readyContracts->clear(i); - newScheduledCount = _scheduledCount.fetch_sub(1, std::memory_order_acq_rel) - 1; - } - - // Notify waiters if all scheduled contracts are complete - if (newScheduledCount == 0) { - std::lock_guard lock(_waitMutex); - _waitCondition.notify_all(); - } - } - // If CAS failed, state changed - likely now executing, which is fine - } - } - } - - WorkContractGroup::~WorkContractGroup() { - // Stop accepting new work first - this prevents any new selections - stop(); - - // Wait for executing work to complete - // This ensures no thread is in the middle of selectForExecution - wait(); - - // Unschedule all scheduled contracts first (move them back to allocated state) - // This ensures we don't have contracts stuck in scheduled state - unscheduleAllContracts(); - - // Release all remaining contracts (allocated and any still scheduled) - // This ensures no contracts are left hanging when the group is destroyed - releaseAllContracts(); - - // Validate that all contracts have been properly cleaned up - ENTROPY_DEBUG_BLOCK( - size_t activeCount = _activeCount.load(std::memory_order_acquire); - ENTROPY_ASSERT(activeCount == 0, "WorkContractGroup destroyed with active contracts still allocated"); - - // Double-check that no threads are still selecting - size_t selectingCount = _selectingCount.load(std::memory_order_acquire); - ENTROPY_ASSERT(selectingCount == 0, "WorkContractGroup destroyed with threads still in selectForExecution"); - - size_t mainThreadSelectingCount = _mainThreadSelectingCount.load(std::memory_order_acquire); - ENTROPY_ASSERT(mainThreadSelectingCount == 0, "WorkContractGroup destroyed with threads still in selectForMainThreadExecution"); - ); - - // Then notify the concurrency provider to remove us from active groups - // CRITICAL: Read provider without holding lock to avoid ABBA deadlock - IConcurrencyProvider* provider = nullptr; - { - std::unique_lock lock(_concurrencyProviderMutex); - provider = _concurrencyProvider; - } - - if (provider) { - provider->notifyGroupDestroyed(this); - } - } + return *this; +} - WorkContractHandle WorkContractGroup::createContract(std::function work, ExecutionType executionType) { - // Pop a free slot from the lock-free stack (ABA-resistant with tagged head) - auto packHead = [](uint32_t idx, uint32_t tag) -> uint64_t { - return (static_cast(tag) << 32) | static_cast(idx); - }; - auto headIndex = [](uint64_t h) -> uint32_t { return static_cast(h & 0xFFFFFFFFull); }; - auto headTag = [](uint64_t h) -> uint32_t { return static_cast(h >> 32); }; +void WorkContractGroup::releaseAllContracts() { + // Iterate through all contract slots and release any that are still allocated or scheduled + for (uint32_t i = 0; i < _capacity; ++i) { + auto& slot = _contracts[i]; - uint64_t head = _freeListHead.load(std::memory_order_acquire); - for (;;) { - uint32_t idx = headIndex(head); - if (idx == INVALID_INDEX) { - return WorkContractHandle(); // No free slots available - } - uint32_t next = _contracts[idx].nextFree.load(std::memory_order_acquire); - uint64_t newHead = packHead(next, headTag(head) + 1); - if (_freeListHead.compare_exchange_weak(head, newHead, - std::memory_order_acq_rel, - std::memory_order_acquire)) { - head = newHead; // Not necessary, but keeps head updated - // We successfully popped idx - uint32_t index = idx; - - auto& slot = _contracts[index]; - - // Get current generation for handle before any modifications - uint32_t generation = slot.generation.load(std::memory_order_acquire); - - // Assign work with noexcept wrapper to ensure termination on exceptions - slot.work = [fn = std::move(work)]() noexcept { - if (fn) fn(); - }; - slot.executionType = executionType; - - // Increment active count BEFORE making the slot visible as allocated. - // This ensures that any thread that successfully observes the Allocated state - // (via acquire) also observes the increased activeCount due to release/acquire - // synchronization on slot.state. - _activeCount.fetch_add(1, std::memory_order_acq_rel); - // Transition state to allocated - slot.state.store(ContractState::Allocated, std::memory_order_release); - - return WorkContractHandle(this, static_cast(index), generation); - } - // CAS failed; head updated; retry - } - } - - ScheduleResult WorkContractGroup::scheduleContract(const WorkContractHandle& handle) { - if (!validateHandle(handle)) return ScheduleResult::Invalid; - - uint32_t index = handle.handleIndex(); - auto& slot = _contracts[index]; - - // Try to transition from Allocated to Scheduled - ContractState expected = ContractState::Allocated; - if (!slot.state.compare_exchange_strong(expected, ContractState::Scheduled, - std::memory_order_acq_rel)) { - // Check why it failed - ContractState current = slot.state.load(std::memory_order_acquire); - if (current == ContractState::Scheduled) { - return ScheduleResult::AlreadyScheduled; - } else if (current == ContractState::Executing) { - return ScheduleResult::Executing; - } - return ScheduleResult::Invalid; - } - - // Add to appropriate ready set based on execution type - if (slot.executionType == ExecutionType::MainThread) { - _mainThreadContracts->set(index); - _mainThreadScheduledCount.fetch_add(1, std::memory_order_acq_rel); - } else { - _readyContracts->set(index); - _scheduledCount.fetch_add(1, std::memory_order_acq_rel); - } - - // Notify concurrency provider if set - { - std::shared_lock lock(_concurrencyProviderMutex); - if (_concurrencyProvider) { - _concurrencyProvider->notifyWorkAvailable(this); + // Check if this slot is occupied (not free) + ContractState currentState = slot.state.load(std::memory_order_acquire); + if (currentState != ContractState::Free) { + // Try to transition directly to Free state + ContractState expected = currentState; + if (slot.state.compare_exchange_strong(expected, ContractState::Free, std::memory_order_acq_rel)) { + // Successfully transitioned, now clean up + bool isMainThread = (slot.executionType == ExecutionType::MainThread); + returnSlotToFreeList(i, currentState, isMainThread); } + // If CAS failed, another thread (or our own iteration) already handled this slot + // This is fine - we just continue to the next slot } - - return ScheduleResult::Scheduled; } +} - ScheduleResult WorkContractGroup::unscheduleContract(const WorkContractHandle& handle) { - // Relaxed validation to preserve semantics under unified execution: - // If the handle belongs to this group and index is in range, but generation - // has advanced due to execution starting, report Executing rather than Invalid. - if (handle.handleOwner() != static_cast(this)) { - return ScheduleResult::Invalid; - } - uint32_t index = handle.handleIndex(); - if (index >= _capacity) { - return ScheduleResult::Invalid; - } +void WorkContractGroup::unscheduleAllContracts() { + // Iterate through all contract slots and unschedule any that are scheduled + for (uint32_t i = 0; i < _capacity; ++i) { + auto& slot = _contracts[i]; - auto& slot = _contracts[index]; - uint32_t currentGen = slot.generation.load(std::memory_order_acquire); - if (currentGen != handle.handleGeneration()) { - // Slot was freed/reused. It may be due to execution having started (unified flow). - ContractState st = slot.state.load(std::memory_order_acquire); - if (st == ContractState::Executing) { - return ScheduleResult::Executing; - } - // In unified flow, we set state to Free while the task is still running. - if (st == ContractState::Free) { - size_t exec = _executingCount.load(std::memory_order_acquire) + - _mainThreadExecutingCount.load(std::memory_order_acquire); - if (exec > 0) { - return ScheduleResult::Executing; - } - } - return ScheduleResult::Invalid; - } - - // Generation matches: proceed with normal unschedule logic - // Check current state + // Check if this slot is scheduled ContractState currentState = slot.state.load(std::memory_order_acquire); - if (currentState == ContractState::Scheduled) { - // Try to transition back to Allocated + // Try to transition from Scheduled to Allocated ContractState expected = ContractState::Scheduled; - if (slot.state.compare_exchange_strong(expected, ContractState::Allocated, - std::memory_order_acq_rel)) { + if (slot.state.compare_exchange_strong(expected, ContractState::Allocated, std::memory_order_acq_rel)) { // Remove from appropriate ready set based on execution type size_t newScheduledCount; if (slot.executionType == ExecutionType::MainThread) { - _mainThreadContracts->clear(index); + _mainThreadContracts->clear(i); newScheduledCount = _mainThreadScheduledCount.fetch_sub(1, std::memory_order_acq_rel) - 1; } else { - _readyContracts->clear(index); + _readyContracts->clear(i); newScheduledCount = _scheduledCount.fetch_sub(1, std::memory_order_acq_rel) - 1; } - + // Notify waiters if all scheduled contracts are complete if (newScheduledCount == 0) { std::lock_guard lock(_waitMutex); _waitCondition.notify_all(); } - - return ScheduleResult::NotScheduled; } - // State changed while we were checking - likely now executing - return ScheduleResult::Executing; - } else if (currentState == ContractState::Executing) { + // If CAS failed, state changed - likely now executing, which is fine + } + } +} + +WorkContractGroup::~WorkContractGroup() { + // Stop accepting new work first - this prevents any new selections + stop(); + + // Wait for executing work to complete + // This ensures no thread is in the middle of selectForExecution + wait(); + + // Unschedule all scheduled contracts first (move them back to allocated state) + // This ensures we don't have contracts stuck in scheduled state + unscheduleAllContracts(); + + // Release all remaining contracts (allocated and any still scheduled) + // This ensures no contracts are left hanging when the group is destroyed + releaseAllContracts(); + + // Validate that all contracts have been properly cleaned up + ENTROPY_DEBUG_BLOCK( + size_t activeCount = _activeCount.load(std::memory_order_acquire); + ENTROPY_ASSERT(activeCount == 0, "WorkContractGroup destroyed with active contracts still allocated"); + + // Double-check that no threads are still selecting + size_t selectingCount = _selectingCount.load(std::memory_order_acquire); + ENTROPY_ASSERT(selectingCount == 0, "WorkContractGroup destroyed with threads still in selectForExecution"); + + size_t mainThreadSelectingCount = _mainThreadSelectingCount.load(std::memory_order_acquire); + ENTROPY_ASSERT(mainThreadSelectingCount == 0, + "WorkContractGroup destroyed with threads still in selectForMainThreadExecution");); + + // Then notify the concurrency provider to remove us from active groups + // CRITICAL: Read provider without holding lock to avoid ABBA deadlock + IConcurrencyProvider* provider = nullptr; + { + std::unique_lock lock(_concurrencyProviderMutex); + provider = _concurrencyProvider; + } + + if (provider) { + provider->notifyGroupDestroyed(this); + } +} + +WorkContractHandle WorkContractGroup::createContract(std::function work, ExecutionType executionType) { + // Pop a free slot from the lock-free stack (ABA-resistant with tagged head) + auto packHead = [](uint32_t idx, uint32_t tag) -> uint64_t { + return (static_cast(tag) << 32) | static_cast(idx); + }; + auto headIndex = [](uint64_t h) -> uint32_t { return static_cast(h & 0xFFFFFFFFull); }; + auto headTag = [](uint64_t h) -> uint32_t { return static_cast(h >> 32); }; + + uint64_t head = _freeListHead.load(std::memory_order_acquire); + for (;;) { + uint32_t idx = headIndex(head); + if (idx == INVALID_INDEX) { + return WorkContractHandle(); // No free slots available + } + uint32_t next = _contracts[idx].nextFree.load(std::memory_order_acquire); + uint64_t newHead = packHead(next, headTag(head) + 1); + if (_freeListHead.compare_exchange_weak(head, newHead, std::memory_order_acq_rel, std::memory_order_acquire)) { + head = newHead; // Not necessary, but keeps head updated + // We successfully popped idx + uint32_t index = idx; + + auto& slot = _contracts[index]; + + // Get current generation for handle before any modifications + uint32_t generation = slot.generation.load(std::memory_order_acquire); + + // Assign work with noexcept wrapper to ensure termination on exceptions + slot.work = [fn = std::move(work)]() noexcept { + if (fn) fn(); + }; + slot.executionType = executionType; + + // Increment active count BEFORE making the slot visible as allocated. + // This ensures that any thread that successfully observes the Allocated state + // (via acquire) also observes the increased activeCount due to release/acquire + // synchronization on slot.state. + _activeCount.fetch_add(1, std::memory_order_acq_rel); + // Transition state to allocated + slot.state.store(ContractState::Allocated, std::memory_order_release); + + return WorkContractHandle(this, static_cast(index), generation); + } + // CAS failed; head updated; retry + } +} + +ScheduleResult WorkContractGroup::scheduleContract(const WorkContractHandle& handle) { + if (!validateHandle(handle)) return ScheduleResult::Invalid; + + uint32_t index = handle.handleIndex(); + auto& slot = _contracts[index]; + + // Try to transition from Allocated to Scheduled + ContractState expected = ContractState::Allocated; + if (!slot.state.compare_exchange_strong(expected, ContractState::Scheduled, std::memory_order_acq_rel)) { + // Check why it failed + ContractState current = slot.state.load(std::memory_order_acquire); + if (current == ContractState::Scheduled) { + return ScheduleResult::AlreadyScheduled; + } else if (current == ContractState::Executing) { return ScheduleResult::Executing; - } else if (currentState == ContractState::Allocated) { - return ScheduleResult::NotScheduled; } - return ScheduleResult::Invalid; } - void WorkContractGroup::releaseContract(const WorkContractHandle& handle) { - if (!validateHandle(handle)) return; + // Add to appropriate ready set based on execution type + if (slot.executionType == ExecutionType::MainThread) { + _mainThreadContracts->set(index); + _mainThreadScheduledCount.fetch_add(1, std::memory_order_acq_rel); + } else { + _readyContracts->set(index); + _scheduledCount.fetch_add(1, std::memory_order_acq_rel); + } - uint32_t index = handle.handleIndex(); - - // Bounds check to prevent out-of-bounds access - if (index >= _capacity) return; - - auto& slot = _contracts[index]; + // Notify concurrency provider if set + { + std::shared_lock lock(_concurrencyProviderMutex); + if (_concurrencyProvider) { + _concurrencyProvider->notifyWorkAvailable(this); + } + } - // Atomically try to transition from Allocated or Scheduled to Free. - // This is the core of handling the race with selectForExecution. - ContractState currentState = slot.state.load(std::memory_order_acquire); + return ScheduleResult::Scheduled; +} - while (true) { - if (currentState == ContractState::Allocated) { - // Try to transition from Allocated -> Free - ContractState expected = ContractState::Allocated; - if (slot.state.compare_exchange_weak(expected, ContractState::Free, - std::memory_order_acq_rel, - std::memory_order_acquire)) { - // Success, we are responsible for cleanup - bool isMainThread = (slot.executionType == ExecutionType::MainThread); - returnSlotToFreeList(index, ContractState::Allocated, isMainThread); - return; - } - // CAS failed, currentState is updated, loop again - currentState = expected; - continue; - } - - if (currentState == ContractState::Scheduled) { - // Try to transition from Scheduled -> Free - ContractState expected = ContractState::Scheduled; - if (slot.state.compare_exchange_weak(expected, ContractState::Free, - std::memory_order_acq_rel, - std::memory_order_acquire)) { - // Success, we are responsible for cleanup - bool isMainThread = (slot.executionType == ExecutionType::MainThread); - returnSlotToFreeList(index, ContractState::Scheduled, isMainThread); - return; - } - // CAS failed, currentState is updated. It might have become Executing. Loop again. - currentState = expected; - continue; +ScheduleResult WorkContractGroup::unscheduleContract(const WorkContractHandle& handle) { + // Relaxed validation to preserve semantics under unified execution: + // If the handle belongs to this group and index is in range, but generation + // has advanced due to execution starting, report Executing rather than Invalid. + if (handle.handleOwner() != static_cast(this)) { + return ScheduleResult::Invalid; + } + uint32_t index = handle.handleIndex(); + if (index >= _capacity) { + return ScheduleResult::Invalid; + } + + auto& slot = _contracts[index]; + uint32_t currentGen = slot.generation.load(std::memory_order_acquire); + if (currentGen != handle.handleGeneration()) { + // Slot was freed/reused. It may be due to execution having started (unified flow). + ContractState st = slot.state.load(std::memory_order_acquire); + if (st == ContractState::Executing) { + return ScheduleResult::Executing; + } + // In unified flow, we set state to Free while the task is still running. + if (st == ContractState::Free) { + size_t exec = _executingCount.load(std::memory_order_acquire) + + _mainThreadExecutingCount.load(std::memory_order_acquire); + if (exec > 0) { + return ScheduleResult::Executing; } - - // If we are here, the state is either Free, Executing, or invalid. - // In any of these cases, this thread can no longer act. - return; } + return ScheduleResult::Invalid; } - bool WorkContractGroup::isValidHandle(const WorkContractHandle& handle) const noexcept { - return validateHandle(handle); - } + // Generation matches: proceed with normal unschedule logic + // Check current state + ContractState currentState = slot.state.load(std::memory_order_acquire); - WorkContractHandle WorkContractGroup::selectForExecution(std::optional> bias) { - // RAII guard to track threads in selection - struct SelectionGuard { - WorkContractGroup* group; - bool active; - - SelectionGuard(WorkContractGroup* g) : group(g), active(true) { - group->_selectingCount.fetch_add(1, std::memory_order_acq_rel); + if (currentState == ContractState::Scheduled) { + // Try to transition back to Allocated + ContractState expected = ContractState::Scheduled; + if (slot.state.compare_exchange_strong(expected, ContractState::Allocated, std::memory_order_acq_rel)) { + // Remove from appropriate ready set based on execution type + size_t newScheduledCount; + if (slot.executionType == ExecutionType::MainThread) { + _mainThreadContracts->clear(index); + newScheduledCount = _mainThreadScheduledCount.fetch_sub(1, std::memory_order_acq_rel) - 1; + } else { + _readyContracts->clear(index); + newScheduledCount = _scheduledCount.fetch_sub(1, std::memory_order_acq_rel) - 1; } - - ~SelectionGuard() { - if (active) { - auto count = group->_selectingCount.fetch_sub(1, std::memory_order_acq_rel); - if (count == 1) { - // We were the last selecting thread, notify waiters - std::lock_guard lock(group->_waitMutex); - group->_waitCondition.notify_all(); - } - } + + // Notify waiters if all scheduled contracts are complete + if (newScheduledCount == 0) { + std::lock_guard lock(_waitMutex); + _waitCondition.notify_all(); } - - void deactivate() { active = false; } - }; - - SelectionGuard guard(this); - - // Don't allow selection if we're stopping - if (_stopping.load(std::memory_order_seq_cst)) { - return WorkContractHandle(); + + return ScheduleResult::NotScheduled; } + // State changed while we were checking - likely now executing + return ScheduleResult::Executing; + } else if (currentState == ContractState::Executing) { + return ScheduleResult::Executing; + } else if (currentState == ContractState::Allocated) { + return ScheduleResult::NotScheduled; + } - - // Use provided bias or create a local one - uint64_t localBias = 0; - uint64_t& biasRef = bias ? bias->get() : localBias; - - // Check stopping flag again right before accessing _readyContracts - // This reduces the race window significantly - if (_stopping.load(std::memory_order_seq_cst)) { - return WorkContractHandle(); + return ScheduleResult::Invalid; +} + +void WorkContractGroup::releaseContract(const WorkContractHandle& handle) { + if (!validateHandle(handle)) return; + + uint32_t index = handle.handleIndex(); + + // Bounds check to prevent out-of-bounds access + if (index >= _capacity) return; + + auto& slot = _contracts[index]; + + // Atomically try to transition from Allocated or Scheduled to Free. + // This is the core of handling the race with selectForExecution. + ContractState currentState = slot.state.load(std::memory_order_acquire); + + while (true) { + if (currentState == ContractState::Allocated) { + // Try to transition from Allocated -> Free + ContractState expected = ContractState::Allocated; + if (slot.state.compare_exchange_weak(expected, ContractState::Free, std::memory_order_acq_rel, + std::memory_order_acquire)) { + // Success, we are responsible for cleanup + bool isMainThread = (slot.executionType == ExecutionType::MainThread); + returnSlotToFreeList(index, ContractState::Allocated, isMainThread); + return; + } + // CAS failed, currentState is updated, loop again + currentState = expected; + continue; } - - auto [index, _] = _readyContracts->select(biasRef); - - if (index == SignalTreeBase::S_INVALID_SIGNAL_INDEX) { - return WorkContractHandle(); + + if (currentState == ContractState::Scheduled) { + // Try to transition from Scheduled -> Free + ContractState expected = ContractState::Scheduled; + if (slot.state.compare_exchange_weak(expected, ContractState::Free, std::memory_order_acq_rel, + std::memory_order_acquire)) { + // Success, we are responsible for cleanup + bool isMainThread = (slot.executionType == ExecutionType::MainThread); + returnSlotToFreeList(index, ContractState::Scheduled, isMainThread); + return; + } + // CAS failed, currentState is updated. It might have become Executing. Loop again. + currentState = expected; + continue; } - - auto& slot = _contracts[index]; - - // Try to transition from Scheduled to Executing - ContractState expected = ContractState::Scheduled; - if (!slot.state.compare_exchange_strong(expected, ContractState::Executing, - std::memory_order_acq_rel)) { - // Someone else got it first or state changed - return WorkContractHandle(); + + // If we are here, the state is either Free, Executing, or invalid. + // In any of these cases, this thread can no longer act. + return; + } +} + +bool WorkContractGroup::isValidHandle(const WorkContractHandle& handle) const noexcept { + return validateHandle(handle); +} + +WorkContractHandle WorkContractGroup::selectForExecution(std::optional> bias) { + // RAII guard to track threads in selection + struct SelectionGuard + { + WorkContractGroup* group; + bool active; + + SelectionGuard(WorkContractGroup* g) : group(g), active(true) { + group->_selectingCount.fetch_add(1, std::memory_order_acq_rel); } - // Clear from ready set immediately upon successful selection to avoid stale ready bits. - // CRITICAL: This clear is part of a triple-redundancy strategy to ensure no stale bits remain - // in the signal tree under any thread interleaving. See returnSlotToFreeList() for defensive - // clear that handles the race where this thread is preempted before clearing. - _readyContracts->clear(index); - - // Get current generation for handle - uint32_t generation = slot.generation.load(std::memory_order_acquire); - - // Update counters: decrement scheduled, increment executing - _scheduledCount.fetch_sub(1, std::memory_order_acq_rel); - _executingCount.fetch_add(1, std::memory_order_acq_rel); - - // Return valid handle - return WorkContractHandle(this, static_cast(index), generation); - } - - WorkContractHandle WorkContractGroup::selectForMainThreadExecution(std::optional> bias) { - // RAII guard to track threads in selection - struct SelectionGuard { - WorkContractGroup* group; - bool active; - - SelectionGuard(WorkContractGroup* g) : group(g), active(true) { - group->_mainThreadSelectingCount.fetch_add(1, std::memory_order_acq_rel); - } - - ~SelectionGuard() { - if (active) { - auto count = group->_mainThreadSelectingCount.fetch_sub(1, std::memory_order_acq_rel); - if (count == 1) { - // We were the last selecting thread, notify waiters - std::lock_guard lock(group->_waitMutex); - group->_waitCondition.notify_all(); - } + ~SelectionGuard() { + if (active) { + auto count = group->_selectingCount.fetch_sub(1, std::memory_order_acq_rel); + if (count == 1) { + // We were the last selecting thread, notify waiters + std::lock_guard lock(group->_waitMutex); + group->_waitCondition.notify_all(); } } - - void deactivate() { active = false; } - }; - - SelectionGuard guard(this); - - // Don't allow selection if we're stopping - if (_stopping.load(std::memory_order_seq_cst)) { - return WorkContractHandle(); - } - - // Use provided bias or create a local one - uint64_t localBias = 0; - uint64_t& biasRef = bias ? bias->get() : localBias; - - // Check stopping flag again right before accessing _mainThreadContracts - if (_stopping.load(std::memory_order_seq_cst)) { - return WorkContractHandle(); } - - auto [index, _] = _mainThreadContracts->select(biasRef); - - if (index == SignalTreeBase::S_INVALID_SIGNAL_INDEX) { - return WorkContractHandle(); - } - - auto& slot = _contracts[index]; - - // Try to transition from Scheduled to Executing - ContractState expected = ContractState::Scheduled; - if (!slot.state.compare_exchange_strong(expected, ContractState::Executing, - std::memory_order_acq_rel)) { - // Someone else got it first or state changed - return WorkContractHandle(); + + void deactivate() { + active = false; } + }; - // Clear from main-thread ready set immediately upon successful selection. - // CRITICAL: This clear is part of a triple-redundancy strategy to ensure no stale bits remain - // in the signal tree under any thread interleaving. See returnSlotToFreeList() for defensive - // clear that handles the race where this thread is preempted before clearing. - _mainThreadContracts->clear(index); - - // Get current generation for handle - uint32_t generation = slot.generation.load(std::memory_order_acquire); + SelectionGuard guard(this); + + // Don't allow selection if we're stopping + if (_stopping.load(std::memory_order_seq_cst)) { + return WorkContractHandle(); + } - // Update counters: decrement scheduled, increment executing - _mainThreadScheduledCount.fetch_sub(1, std::memory_order_acq_rel); - _mainThreadExecutingCount.fetch_add(1, std::memory_order_acq_rel); + // Use provided bias or create a local one + uint64_t localBias = 0; + uint64_t& biasRef = bias ? bias->get() : localBias; - // Return valid handle - return WorkContractHandle(this, static_cast(index), generation); + // Check stopping flag again right before accessing _readyContracts + // This reduces the race window significantly + if (_stopping.load(std::memory_order_seq_cst)) { + return WorkContractHandle(); } - void WorkContractGroup::executeContract(const WorkContractHandle& handle) { - if (!handle.valid()) return; + auto [index, _] = _readyContracts->select(biasRef); - const uint32_t index = handle.handleIndex(); - auto& slot = _contracts[index]; + if (index == SignalTreeBase::S_INVALID_SIGNAL_INDEX) { + return WorkContractHandle(); + } - const bool isMainThread = (slot.executionType == ExecutionType::MainThread); + auto& slot = _contracts[index]; - // Move work out (point of no return) - auto task = std::move(slot.work); + // Try to transition from Scheduled to Executing + ContractState expected = ContractState::Scheduled; + if (!slot.state.compare_exchange_strong(expected, ContractState::Executing, std::memory_order_acq_rel)) { + // Someone else got it first or state changed + return WorkContractHandle(); + } - // Free the slot BEFORE executing to allow re-entrance - // Invalidate handles and transition to Free - slot.generation.fetch_add(1, std::memory_order_acq_rel); - slot.state.store(ContractState::Free, std::memory_order_release); + // Clear from ready set immediately upon successful selection to avoid stale ready bits. + // CRITICAL: This clear is part of a triple-redundancy strategy to ensure no stale bits remain + // in the signal tree under any thread interleaving. See returnSlotToFreeList() for defensive + // clear that handles the race where this thread is preempted before clearing. + _readyContracts->clear(index); - // Layer 3: Defensive clear (guard against selector preemption before clear) - if (isMainThread) { - _mainThreadContracts->clear(index); - } else { - _readyContracts->clear(index); + // Get current generation for handle + uint32_t generation = slot.generation.load(std::memory_order_acquire); + + // Update counters: decrement scheduled, increment executing + _scheduledCount.fetch_sub(1, std::memory_order_acq_rel); + _executingCount.fetch_add(1, std::memory_order_acq_rel); + + // Return valid handle + return WorkContractHandle(this, static_cast(index), generation); +} + +WorkContractHandle WorkContractGroup::selectForMainThreadExecution( + std::optional> bias) { + // RAII guard to track threads in selection + struct SelectionGuard + { + WorkContractGroup* group; + bool active; + + SelectionGuard(WorkContractGroup* g) : group(g), active(true) { + group->_mainThreadSelectingCount.fetch_add(1, std::memory_order_acq_rel); } - // Return slot to freelist (ABA-resistant) - // Note: activeCount will be decremented AFTER task execution to maintain - // the invariant that executing contracts are included in activeCount - auto packHead = [](uint32_t idx, uint32_t tag) -> uint64_t { - return (static_cast(tag) << 32) | static_cast(idx); - }; - auto headIndex = [](uint64_t h) -> uint32_t { return static_cast(h & 0xFFFFFFFFull); }; - auto headTag = [](uint64_t h) -> uint32_t { return static_cast(h >> 32); }; - - uint64_t old = _freeListHead.load(std::memory_order_acquire); - for (;;) { - slot.nextFree.store(headIndex(old), std::memory_order_release); - uint64_t newH = packHead(index, headTag(old) + 1); - if (_freeListHead.compare_exchange_weak(old, newH, - std::memory_order_acq_rel, - std::memory_order_acquire)) { - break; + ~SelectionGuard() { + if (active) { + auto count = group->_mainThreadSelectingCount.fetch_sub(1, std::memory_order_acq_rel); + if (count == 1) { + // We were the last selecting thread, notify waiters + std::lock_guard lock(group->_waitMutex); + group->_waitCondition.notify_all(); + } } } - - // Execute outside of slot ownership - if (task) { - task(); + void deactivate() { + active = false; } + }; - // Finally, decrement executing counters and notify if needed - size_t newExecCount = isMainThread - ? _mainThreadExecutingCount.fetch_sub(1, std::memory_order_acq_rel) - 1 - : _executingCount.fetch_sub(1, std::memory_order_acq_rel) - 1; + SelectionGuard guard(this); - if (newExecCount == 0) { - std::lock_guard lock(_waitMutex); - _waitCondition.notify_all(); - } + // Don't allow selection if we're stopping + if (_stopping.load(std::memory_order_seq_cst)) { + return WorkContractHandle(); + } - // Now decrement active count and fire capacity callbacks - auto newActiveCount = _activeCount.fetch_sub(1, std::memory_order_acq_rel) - 1; - if (newActiveCount < _capacity) { - std::lock_guard lock(_callbackMutex); - for (const auto& cb : _onCapacityAvailableCallbacks) { - if (cb) cb(); - } - } + // Use provided bias or create a local one + uint64_t localBias = 0; + uint64_t& biasRef = bias ? bias->get() : localBias; + + // Check stopping flag again right before accessing _mainThreadContracts + if (_stopping.load(std::memory_order_seq_cst)) { + return WorkContractHandle(); } - void WorkContractGroup::abortExecution(const WorkContractHandle& handle) { - if (!handle.valid()) return; + auto [index, _] = _mainThreadContracts->select(biasRef); - const uint32_t index = handle.handleIndex(); - auto& slot = _contracts[index]; + if (index == SignalTreeBase::S_INVALID_SIGNAL_INDEX) { + return WorkContractHandle(); + } - const bool isMainThread = (slot.executionType == ExecutionType::MainThread); + auto& slot = _contracts[index]; - // Drop work; we are not executing - slot.work = nullptr; + // Try to transition from Scheduled to Executing + ContractState expected = ContractState::Scheduled; + if (!slot.state.compare_exchange_strong(expected, ContractState::Executing, std::memory_order_acq_rel)) { + // Someone else got it first or state changed + return WorkContractHandle(); + } - // Invalidate handles and free the slot - slot.generation.fetch_add(1, std::memory_order_acq_rel); - slot.state.store(ContractState::Free, std::memory_order_release); + // Clear from main-thread ready set immediately upon successful selection. + // CRITICAL: This clear is part of a triple-redundancy strategy to ensure no stale bits remain + // in the signal tree under any thread interleaving. See returnSlotToFreeList() for defensive + // clear that handles the race where this thread is preempted before clearing. + _mainThreadContracts->clear(index); - // Defensive clear to keep signal tree clean - if (isMainThread) { - _mainThreadContracts->clear(index); - } else { - _readyContracts->clear(index); - } + // Get current generation for handle + uint32_t generation = slot.generation.load(std::memory_order_acquire); - // Decrement active BEFORE returning to freelist - _activeCount.fetch_sub(1, std::memory_order_acq_rel); - - // Return slot to freelist (ABA-resistant) - auto packHead = [](uint32_t idx, uint32_t tag) -> uint64_t { - return (static_cast(tag) << 32) | static_cast(idx); - }; - auto headIndex = [](uint64_t h) -> uint32_t { return static_cast(h & 0xFFFFFFFFull); }; - auto headTag = [](uint64_t h) -> uint32_t { return static_cast(h >> 32); }; - - uint64_t old = _freeListHead.load(std::memory_order_acquire); - for (;;) { - slot.nextFree.store(headIndex(old), std::memory_order_release); - uint64_t newH = packHead(index, headTag(old) + 1); - if (_freeListHead.compare_exchange_weak(old, newH, - std::memory_order_acq_rel, - std::memory_order_acquire)) { - break; - } - } + // Update counters: decrement scheduled, increment executing + _mainThreadScheduledCount.fetch_sub(1, std::memory_order_acq_rel); + _mainThreadExecutingCount.fetch_add(1, std::memory_order_acq_rel); - // Decrement executing and notify - size_t newExecCount = isMainThread - ? _mainThreadExecutingCount.fetch_sub(1, std::memory_order_acq_rel) - 1 - : _executingCount.fetch_sub(1, std::memory_order_acq_rel) - 1; + // Return valid handle + return WorkContractHandle(this, static_cast(index), generation); +} - if (newExecCount == 0) { - std::lock_guard lock(_waitMutex); - _waitCondition.notify_all(); +void WorkContractGroup::executeContract(const WorkContractHandle& handle) { + if (!handle.valid()) return; + + const uint32_t index = handle.handleIndex(); + auto& slot = _contracts[index]; + + const bool isMainThread = (slot.executionType == ExecutionType::MainThread); + + // Move work out (point of no return) + auto task = std::move(slot.work); + + // Free the slot BEFORE executing to allow re-entrance + // Invalidate handles and transition to Free + slot.generation.fetch_add(1, std::memory_order_acq_rel); + slot.state.store(ContractState::Free, std::memory_order_release); + + // Layer 3: Defensive clear (guard against selector preemption before clear) + if (isMainThread) { + _mainThreadContracts->clear(index); + } else { + _readyContracts->clear(index); + } + + // Return slot to freelist (ABA-resistant) + // Note: activeCount will be decremented AFTER task execution to maintain + // the invariant that executing contracts are included in activeCount + auto packHead = [](uint32_t idx, uint32_t tag) -> uint64_t { + return (static_cast(tag) << 32) | static_cast(idx); + }; + auto headIndex = [](uint64_t h) -> uint32_t { return static_cast(h & 0xFFFFFFFFull); }; + auto headTag = [](uint64_t h) -> uint32_t { return static_cast(h >> 32); }; + + uint64_t old = _freeListHead.load(std::memory_order_acquire); + for (;;) { + slot.nextFree.store(headIndex(old), std::memory_order_release); + uint64_t newH = packHead(index, headTag(old) + 1); + if (_freeListHead.compare_exchange_weak(old, newH, std::memory_order_acq_rel, std::memory_order_acquire)) { + break; } } - void WorkContractGroup::completeExecution(const WorkContractHandle& /*handle*/) { - // DEPRECATED: No-op for backward compatibility. - // All cleanup now happens inside executeContract() to enable re-entrance. - // This method can be safely removed once all call sites are updated. + // Execute outside of slot ownership + if (task) { + task(); + } + + // Finally, decrement executing counters and notify if needed + size_t newExecCount = isMainThread ? _mainThreadExecutingCount.fetch_sub(1, std::memory_order_acq_rel) - 1 + : _executingCount.fetch_sub(1, std::memory_order_acq_rel) - 1; + + if (newExecCount == 0) { + std::lock_guard lock(_waitMutex); + _waitCondition.notify_all(); } - void WorkContractGroup::completeMainThreadExecution(const WorkContractHandle& /*handle*/) { - // DEPRECATED: No-op for backward compatibility. - // All cleanup now happens inside executeContract() to enable re-entrance. - // This method can be safely removed once all call sites are updated. + // Now decrement active count and fire capacity callbacks + auto newActiveCount = _activeCount.fetch_sub(1, std::memory_order_acq_rel) - 1; + if (newActiveCount < _capacity) { + std::lock_guard lock(_callbackMutex); + for (const auto& cb : _onCapacityAvailableCallbacks) { + if (cb) cb(); + } } - - size_t WorkContractGroup::executeAllMainThreadWork() { - return executeMainThreadWork(std::numeric_limits::max()); +} + +void WorkContractGroup::abortExecution(const WorkContractHandle& handle) { + if (!handle.valid()) return; + + const uint32_t index = handle.handleIndex(); + auto& slot = _contracts[index]; + + const bool isMainThread = (slot.executionType == ExecutionType::MainThread); + + // Drop work; we are not executing + slot.work = nullptr; + + // Invalidate handles and free the slot + slot.generation.fetch_add(1, std::memory_order_acq_rel); + slot.state.store(ContractState::Free, std::memory_order_release); + + // Defensive clear to keep signal tree clean + if (isMainThread) { + _mainThreadContracts->clear(index); + } else { + _readyContracts->clear(index); } - - size_t WorkContractGroup::executeMainThreadWork(size_t maxContracts) { - size_t executed = 0; - uint64_t localBias = 0; - - while (executed < maxContracts) { - auto handle = selectForMainThreadExecution(std::ref(localBias)); - if (!handle.valid()) { - break; // No more main thread contracts scheduled - } - - // Execute the contract (includes all cleanup) - executeContract(handle); - executed++; - - // Rotate bias to ensure fairness - localBias = (localBias << 1) | (localBias >> 63); + + // Decrement active BEFORE returning to freelist + _activeCount.fetch_sub(1, std::memory_order_acq_rel); + + // Return slot to freelist (ABA-resistant) + auto packHead = [](uint32_t idx, uint32_t tag) -> uint64_t { + return (static_cast(tag) << 32) | static_cast(idx); + }; + auto headIndex = [](uint64_t h) -> uint32_t { return static_cast(h & 0xFFFFFFFFull); }; + auto headTag = [](uint64_t h) -> uint32_t { return static_cast(h >> 32); }; + + uint64_t old = _freeListHead.load(std::memory_order_acquire); + for (;;) { + slot.nextFree.store(headIndex(old), std::memory_order_release); + uint64_t newH = packHead(index, headTag(old) + 1); + if (_freeListHead.compare_exchange_weak(old, newH, std::memory_order_acq_rel, std::memory_order_acquire)) { + break; } - - return executed; } - void WorkContractGroup::stop() { - _stopping.store(true, std::memory_order_seq_cst); - // Wake up any threads waiting in wait() + // Decrement executing and notify + size_t newExecCount = isMainThread ? _mainThreadExecutingCount.fetch_sub(1, std::memory_order_acq_rel) - 1 + : _executingCount.fetch_sub(1, std::memory_order_acq_rel) - 1; + + if (newExecCount == 0) { + std::lock_guard lock(_waitMutex); _waitCondition.notify_all(); } - - void WorkContractGroup::resume() { - _stopping.store(false, std::memory_order_seq_cst); - // Note: We don't notify here - the caller should use their - // concurrency provider to notify of available work if needed - } - - void WorkContractGroup::wait() { - // Use condition variable for efficient waiting instead of busy-wait - std::unique_lock lock(_waitMutex); - _waitCondition.wait(lock, [this]() { - if (_stopping.load(std::memory_order_seq_cst)) { - // When stopping, wait for both executing work AND selecting threads - return _executingCount.load(std::memory_order_acquire) == 0 && - _selectingCount.load(std::memory_order_acquire) == 0 && - _mainThreadExecutingCount.load(std::memory_order_acquire) == 0 && - _mainThreadSelectingCount.load(std::memory_order_acquire) == 0; - } - // Normal wait - wait for all scheduled AND executing work to complete - return _scheduledCount.load(std::memory_order_acquire) == 0 && - _executingCount.load(std::memory_order_acquire) == 0 && - _mainThreadScheduledCount.load(std::memory_order_acquire) == 0 && - _mainThreadExecutingCount.load(std::memory_order_acquire) == 0; - }); - } - - void WorkContractGroup::executeAllBackgroundWork() { - // Maintain local bias for fair selection - uint64_t localBias = 0; - - // Keep executing until no more scheduled contracts - while (true) { - WorkContractHandle handle = selectForExecution(std::ref(localBias)); - if (!handle.valid()) { - break; // No more scheduled contracts - } - - // Use the existing executeContract method for consistency (includes all cleanup) - executeContract(handle); +} - // Rotate bias to ensure fairness across all tree branches - localBias = (localBias << 1) | (localBias >> 63); +void WorkContractGroup::completeExecution(const WorkContractHandle& /*handle*/) { + // DEPRECATED: No-op for backward compatibility. + // All cleanup now happens inside executeContract() to enable re-entrance. + // This method can be safely removed once all call sites are updated. +} + +void WorkContractGroup::completeMainThreadExecution(const WorkContractHandle& /*handle*/) { + // DEPRECATED: No-op for backward compatibility. + // All cleanup now happens inside executeContract() to enable re-entrance. + // This method can be safely removed once all call sites are updated. +} + +size_t WorkContractGroup::executeAllMainThreadWork() { + return executeMainThreadWork(std::numeric_limits::max()); +} + +size_t WorkContractGroup::executeMainThreadWork(size_t maxContracts) { + size_t executed = 0; + uint64_t localBias = 0; + + while (executed < maxContracts) { + auto handle = selectForMainThreadExecution(std::ref(localBias)); + if (!handle.valid()) { + break; // No more main thread contracts scheduled } + + // Execute the contract (includes all cleanup) + executeContract(handle); + executed++; + + // Rotate bias to ensure fairness + localBias = (localBias << 1) | (localBias >> 63); } - bool WorkContractGroup::validateHandle(const WorkContractHandle& handle) const noexcept { - // Check owner via stamped identity - if (handle.handleOwner() != static_cast(this)) return false; - - // Check index bounds - uint32_t index = handle.handleIndex(); - if (index >= _capacity) return false; - - // Check generation - uint32_t currentGen = _contracts[index].generation.load(std::memory_order_acquire); - return currentGen == handle.handleGeneration(); - } - - ContractState WorkContractGroup::getContractState(const WorkContractHandle& handle) const noexcept { - if (!validateHandle(handle)) return ContractState::Free; - - uint32_t index = handle.handleIndex(); - return _contracts[index].state.load(std::memory_order_acquire); - } - - size_t WorkContractGroup::executingCount() const noexcept { - return _executingCount.load(std::memory_order_acquire); - } - - void WorkContractGroup::returnSlotToFreeList(uint32_t index, ContractState previousState, bool isMainThread) { - auto& slot = _contracts[index]; - - // Increment generation to invalidate all handles - slot.generation.fetch_add(1, std::memory_order_acq_rel); - - // Clear the work function to release resources - slot.work = nullptr; - - // Signal tree clearing strategy (triple-redundancy for correctness): - // Layer 1: Primary clear immediately after selection (selectForExecution/selectForMainThreadExecution) - // Layer 2: Scheduled cleanup - clear if released before execution starts - // Layer 3: Defensive clear - handles race where selection thread was preempted before clearing - // This ensures no stale ready bits remain in the signal tree regardless of thread scheduling. - - // Layer 2: Clear if contract was released while still scheduled (never selected for execution) - if (previousState == ContractState::Scheduled) { - if (isMainThread) { - _mainThreadContracts->clear(index); - } else { - _readyContracts->clear(index); - } + return executed; +} + +void WorkContractGroup::stop() { + _stopping.store(true, std::memory_order_seq_cst); + // Wake up any threads waiting in wait() + _waitCondition.notify_all(); +} + +void WorkContractGroup::resume() { + _stopping.store(false, std::memory_order_seq_cst); + // Note: We don't notify here - the caller should use their + // concurrency provider to notify of available work if needed +} + +void WorkContractGroup::wait() { + // Use condition variable for efficient waiting instead of busy-wait + std::unique_lock lock(_waitMutex); + _waitCondition.wait(lock, [this]() { + if (_stopping.load(std::memory_order_seq_cst)) { + // When stopping, wait for both executing work AND selecting threads + return _executingCount.load(std::memory_order_acquire) == 0 && + _selectingCount.load(std::memory_order_acquire) == 0 && + _mainThreadExecutingCount.load(std::memory_order_acquire) == 0 && + _mainThreadSelectingCount.load(std::memory_order_acquire) == 0; + } + // Normal wait - wait for all scheduled AND executing work to complete + return _scheduledCount.load(std::memory_order_acquire) == 0 && + _executingCount.load(std::memory_order_acquire) == 0 && + _mainThreadScheduledCount.load(std::memory_order_acquire) == 0 && + _mainThreadExecutingCount.load(std::memory_order_acquire) == 0; + }); +} + +void WorkContractGroup::executeAllBackgroundWork() { + // Maintain local bias for fair selection + uint64_t localBias = 0; + + // Keep executing until no more scheduled contracts + while (true) { + WorkContractHandle handle = selectForExecution(std::ref(localBias)); + if (!handle.valid()) { + break; // No more scheduled contracts } - - // Update counters based on previous state - if (previousState == ContractState::Allocated) { - // Contract was allocated but never scheduled - only decrement active count - // (active count will be decremented below) - } else if (previousState == ContractState::Scheduled) { - // Only decrement scheduled count if it was scheduled (not yet executing) - size_t newScheduledCount; - if (isMainThread) { - newScheduledCount = _mainThreadScheduledCount.fetch_sub(1, std::memory_order_acq_rel) - 1; - } else { - newScheduledCount = _scheduledCount.fetch_sub(1, std::memory_order_acq_rel) - 1; - } - - // Notify waiters if all scheduled contracts are complete - if (newScheduledCount == 0) { - std::lock_guard lock(_waitMutex); - _waitCondition.notify_all(); - } - } else if (previousState == ContractState::Executing) { - // Layer 3: Defensive clear to handle race condition where selectForExecution() successfully - // transitioned state to Executing but was preempted before executing Layer 1 clear. - // Edge case scenario: - // 1. Thread A: select() returns index N, CAS Scheduled->Executing succeeds - // 2. Thread A: preempted before _readyContracts->clear(N) - // 3. Thread B: executeContract(N) + completeExecution(N) - // 4. Without this clear: signal tree still has stale bit N set - // This defensive clear ensures correctness under all thread interleavings. - if (isMainThread) { - _mainThreadContracts->clear(index); - } else { - _readyContracts->clear(index); - } - size_t newExecutingCount; - if (isMainThread) { - newExecutingCount = _mainThreadExecutingCount.fetch_sub(1, std::memory_order_acq_rel) - 1; - } else { - newExecutingCount = _executingCount.fetch_sub(1, std::memory_order_acq_rel) - 1; - } - - // Notify waiters if this was the last executing contract - // (either when stopping OR when wait() is waiting for all work to complete) - if (newExecutingCount == 0) { - std::lock_guard lock(_waitMutex); - _waitCondition.notify_all(); - } + // Use the existing executeContract method for consistency (includes all cleanup) + executeContract(handle); + + // Rotate bias to ensure fairness across all tree branches + localBias = (localBias << 1) | (localBias >> 63); + } +} + +bool WorkContractGroup::validateHandle(const WorkContractHandle& handle) const noexcept { + // Check owner via stamped identity + if (handle.handleOwner() != static_cast(this)) return false; + + // Check index bounds + uint32_t index = handle.handleIndex(); + if (index >= _capacity) return false; + + // Check generation + uint32_t currentGen = _contracts[index].generation.load(std::memory_order_acquire); + return currentGen == handle.handleGeneration(); +} + +ContractState WorkContractGroup::getContractState(const WorkContractHandle& handle) const noexcept { + if (!validateHandle(handle)) return ContractState::Free; + + uint32_t index = handle.handleIndex(); + return _contracts[index].state.load(std::memory_order_acquire); +} + +size_t WorkContractGroup::executingCount() const noexcept { + return _executingCount.load(std::memory_order_acquire); +} + +void WorkContractGroup::returnSlotToFreeList(uint32_t index, ContractState previousState, bool isMainThread) { + auto& slot = _contracts[index]; + + // Increment generation to invalidate all handles + slot.generation.fetch_add(1, std::memory_order_acq_rel); + + // Clear the work function to release resources + slot.work = nullptr; + + // Signal tree clearing strategy (triple-redundancy for correctness): + // Layer 1: Primary clear immediately after selection (selectForExecution/selectForMainThreadExecution) + // Layer 2: Scheduled cleanup - clear if released before execution starts + // Layer 3: Defensive clear - handles race where selection thread was preempted before clearing + // This ensures no stale ready bits remain in the signal tree regardless of thread scheduling. + + // Layer 2: Clear if contract was released while still scheduled (never selected for execution) + if (previousState == ContractState::Scheduled) { + if (isMainThread) { + _mainThreadContracts->clear(index); + } else { + _readyContracts->clear(index); } + } - // Always decrement active count BEFORE exposing slot to free list to avoid transient - // activeCount > capacity windows under contention. - auto newActiveCount = _activeCount.fetch_sub(1, std::memory_order_acq_rel) - 1; - - // Now push the slot back onto the free list so new createContract() can reuse it (ABA-resistant) - auto packHead = [](uint32_t idx, uint32_t tag) -> uint64_t { - return (static_cast(tag) << 32) | static_cast(idx); - }; - auto headIndex = [](uint64_t h) -> uint32_t { return static_cast(h & 0xFFFFFFFFull); }; - auto headTag = [](uint64_t h) -> uint32_t { return static_cast(h >> 32); }; - - uint64_t old = _freeListHead.load(std::memory_order_acquire); - for (;;) { - uint32_t oldIdx = headIndex(old); - slot.nextFree.store(oldIdx, std::memory_order_release); - uint64_t newH = packHead(index, headTag(old) + 1); - if (_freeListHead.compare_exchange_weak(old, newH, - std::memory_order_acq_rel, - std::memory_order_acquire)) { - break; - } + // Update counters based on previous state + if (previousState == ContractState::Allocated) { + // Contract was allocated but never scheduled - only decrement active count + // (active count will be decremented below) + } else if (previousState == ContractState::Scheduled) { + // Only decrement scheduled count if it was scheduled (not yet executing) + size_t newScheduledCount; + if (isMainThread) { + newScheduledCount = _mainThreadScheduledCount.fetch_sub(1, std::memory_order_acq_rel) - 1; + } else { + newScheduledCount = _scheduledCount.fetch_sub(1, std::memory_order_acq_rel) - 1; } - - // Notify all registered callbacks that capacity is available - // This allows WorkGraphs to process deferred nodes - if (newActiveCount < _capacity) { - std::lock_guard lock(_callbackMutex); - for (const auto& callback : _onCapacityAvailableCallbacks) { - if (callback) { - callback(); - } - } + + // Notify waiters if all scheduled contracts are complete + if (newScheduledCount == 0) { + std::lock_guard lock(_waitMutex); + _waitCondition.notify_all(); + } + } else if (previousState == ContractState::Executing) { + // Layer 3: Defensive clear to handle race condition where selectForExecution() successfully + // transitioned state to Executing but was preempted before executing Layer 1 clear. + // Edge case scenario: + // 1. Thread A: select() returns index N, CAS Scheduled->Executing succeeds + // 2. Thread A: preempted before _readyContracts->clear(N) + // 3. Thread B: executeContract(N) + completeExecution(N) + // 4. Without this clear: signal tree still has stale bit N set + // This defensive clear ensures correctness under all thread interleavings. + if (isMainThread) { + _mainThreadContracts->clear(index); + } else { + _readyContracts->clear(index); + } + + size_t newExecutingCount; + if (isMainThread) { + newExecutingCount = _mainThreadExecutingCount.fetch_sub(1, std::memory_order_acq_rel) - 1; + } else { + newExecutingCount = _executingCount.fetch_sub(1, std::memory_order_acq_rel) - 1; + } + + // Notify waiters if this was the last executing contract + // (either when stopping OR when wait() is waiting for all work to complete) + if (newExecutingCount == 0) { + std::lock_guard lock(_waitMutex); + _waitCondition.notify_all(); } } - - void WorkContractGroup::setConcurrencyProvider(IConcurrencyProvider* provider) { - std::unique_lock lock(_concurrencyProviderMutex); - _concurrencyProvider = provider; - } - - WorkContractGroup::CapacityCallback WorkContractGroup::addOnCapacityAvailable(std::function callback) { - std::lock_guard lock(_callbackMutex); - _onCapacityAvailableCallbacks.push_back(std::move(callback)); - return std::prev(_onCapacityAvailableCallbacks.end()); + + // Always decrement active count BEFORE exposing slot to free list to avoid transient + // activeCount > capacity windows under contention. + auto newActiveCount = _activeCount.fetch_sub(1, std::memory_order_acq_rel) - 1; + + // Now push the slot back onto the free list so new createContract() can reuse it (ABA-resistant) + auto packHead = [](uint32_t idx, uint32_t tag) -> uint64_t { + return (static_cast(tag) << 32) | static_cast(idx); + }; + auto headIndex = [](uint64_t h) -> uint32_t { return static_cast(h & 0xFFFFFFFFull); }; + auto headTag = [](uint64_t h) -> uint32_t { return static_cast(h >> 32); }; + + uint64_t old = _freeListHead.load(std::memory_order_acquire); + for (;;) { + uint32_t oldIdx = headIndex(old); + slot.nextFree.store(oldIdx, std::memory_order_release); + uint64_t newH = packHead(index, headTag(old) + 1); + if (_freeListHead.compare_exchange_weak(old, newH, std::memory_order_acq_rel, std::memory_order_acquire)) { + break; + } } - - void WorkContractGroup::removeOnCapacityAvailable(CapacityCallback it) { + + // Notify all registered callbacks that capacity is available + // This allows WorkGraphs to process deferred nodes + if (newActiveCount < _capacity) { std::lock_guard lock(_callbackMutex); - _onCapacityAvailableCallbacks.erase(it); + for (const auto& callback : _onCapacityAvailableCallbacks) { + if (callback) { + callback(); + } + } } +} + +void WorkContractGroup::setConcurrencyProvider(IConcurrencyProvider* provider) { + std::unique_lock lock(_concurrencyProviderMutex); + _concurrencyProvider = provider; +} + +WorkContractGroup::CapacityCallback WorkContractGroup::addOnCapacityAvailable(std::function callback) { + std::lock_guard lock(_callbackMutex); + _onCapacityAvailableCallbacks.push_back(std::move(callback)); + return std::prev(_onCapacityAvailableCallbacks.end()); +} + +void WorkContractGroup::removeOnCapacityAvailable(CapacityCallback it) { + std::lock_guard lock(_callbackMutex); + _onCapacityAvailableCallbacks.erase(it); +} // Introspection and debug description overrides (EntropyObject) uint64_t WorkContractGroup::classHash() const noexcept { - static const uint64_t hash = static_cast(EntropyEngine::Core::TypeSystem::createTypeId().id); + static const uint64_t hash = + static_cast(EntropyEngine::Core::TypeSystem::createTypeId().id); return hash; } std::string WorkContractGroup::toString() const { // Include name and capacity for quick identification - return std::format("{}@{}(name=\"{}\", cap={})", - className(), static_cast(this), _name, _capacity); + return std::format("{}@{}(name=\"{}\", cap={})", className(), static_cast(this), _name, _capacity); } std::string WorkContractGroup::debugString() const { @@ -988,6 +979,6 @@ void WorkContractGroup::setTimedDeferralCallback(std::function callbac _timedDeferralCallback = std::move(callback); } -} // namespace Concurrency -} // namespace Core -} // namespace EntropyEngine \ No newline at end of file +} // namespace Concurrency +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Concurrency/WorkContractGroup.h b/src/Concurrency/WorkContractGroup.h index 3c06587..02628c9 100644 --- a/src/Concurrency/WorkContractGroup.h +++ b/src/Concurrency/WorkContractGroup.h @@ -10,7 +10,7 @@ /** * @file WorkContractGroup.h * @brief Lock-free work contract pool with concurrent scheduling - * + * * This file contains the WorkContractGroup class, which manages a pool of work * contracts using lock-free data structures. It provides the core scheduling * primitives for the concurrency system, enabling work distribution @@ -19,643 +19,659 @@ #pragma once -#include "WorkContractHandle.h" -#include "SignalTree.h" -#include "WorkGraphTypes.h" -#include -#include -#include -#include -#include #include -#include -#include -#include +#include #include +#include +#include +#include #include +#include +#include +#include #include #include +#include + #include "../Core/EntropyObject.h" +#include "SignalTree.h" +#include "WorkContractHandle.h" +#include "WorkGraphTypes.h" + +namespace EntropyEngine +{ +namespace Core +{ +namespace Concurrency +{ + +// Forward declaration +class IConcurrencyProvider; + +/** + * @brief Factory and manager for work contracts with lock-free scheduling + * + * WorkContractGroup implements a work dispatcher capable of managing + * thousands of tasks without locks or blocking operations. It provides a comprehensive + * pool of work contracts with allocation, scheduling, and execution primitives suitable + * for job systems, task graphs, and high-throughput work management scenarios. + * + * The implementation uses SignalTree-based lock-free operations, + * enabling multiple threads to schedule and select work concurrently without contention. + * This design is optimized for game engines, parallel processing systems, and applications + * requiring management of numerous small work units. + * + * Key features: + * - Lock-free contract scheduling and selection + * - Generation-based handles prevent use-after-free bugs + * - Immediate resource cleanup on completion + * - Statistical monitoring (active/scheduled counts) + * - Wait functionality for synchronization points + * + * Important: This class provides scheduling primitives without handling parallel + * execution directly. External executors such as WorkService are required for + * concurrent work processing. The class functions as a centralized work registry + * where tasks are posted and claimed by worker threads. + * + * Handle semantics: + * - WorkContractHandle derives from EntropyObject and is stamped with (owner + index + generation) + * - Copying a handle copies the stamped identity; validation is performed against this group's slots + * - The group owns the lifetime; when a slot is released, the object's identity is cleared and the + * generation is incremented to invalidate stale handles + * + * @code + * // Complete workflow: mixed execution with worker service + * WorkContractGroup group(1024); + * WorkService service(4); // 4 worker threads + * service.addWorkContractGroup(&group); + * service.start(); + * + * // Submit background work + * std::vector handles; + * for (int i = 0; i < 10; ++i) { + * auto handle = group.createContract([i]() { + * processData(i); + * }); + * handle.schedule(); + * handles.push_back(handle); + * } + * + * // Submit main thread work + * auto uiHandle = group.createContract([]() { + * updateProgressBar(); + * }, ExecutionType::MainThread); + * uiHandle.schedule(); + * + * // Main thread pumps its work + * while (group.hasMainThreadWork()) { + * group.executeMainThreadWork(5); // Process up to 5 per frame + * renderFrame(); + * } + * + * // Wait for all background work to complete + * group.wait(); + * service.stop(); + * @endcode + */ +class WorkContractGroup : public EntropyEngine::Core::EntropyObject +{ +public: + const char* className() const noexcept override { + return "WorkContractGroup"; + } + uint64_t classHash() const noexcept override; + std::string toString() const override; + std::string debugString() const override; + std::string description() const override; + +private: + /// Sentinel value indicating end of lock-free linked list or invalid slot + /// Used in the free list implementation to mark the end of the chain and + /// in tagged pointers to indicate null references. Maximum uint32_t value + /// ensures it's never a valid array index. Static constexpr because it's + /// a fundamental constant used throughout the lock-free data structure. + static constexpr uint32_t INVALID_INDEX = ~0u; + + /** + * @brief Internal storage for a single work contract + * + * Each slot represents one work contract and tracks its lifecycle through + * atomic state transitions. The generation counter prevents use-after-free + * by invalidating old handles when slots are reused. + */ + struct ContractSlot + { + std::atomic generation{1}; ///< Handle validation counter + std::atomic state{ContractState::Free}; ///< Current lifecycle state + std::function work; ///< Work function + std::atomic nextFree{INVALID_INDEX}; ///< Next free slot + ExecutionType executionType{ExecutionType::AnyThread}; ///< Execution context (main/any thread) + }; + + std::vector _contracts; ///< Contract storage + std::unique_ptr _readyContracts; ///< Ready work queue + std::unique_ptr _mainThreadContracts; ///< Main thread work queue + std::atomic _freeListHead{0}; ///< Free list head (packed: [tag:32(upper) | index:32(lower)]) + + std::atomic _activeCount{0}; ///< Active contract count + std::atomic _scheduledCount{0}; ///< Scheduled count + std::atomic _executingCount{0}; ///< Executing count + std::atomic _selectingCount{0}; ///< Selection in progress + std::atomic _mainThreadScheduledCount{0}; ///< Main thread work pending + std::atomic _mainThreadExecutingCount{0}; ///< Main thread work running + std::atomic _mainThreadSelectingCount{0}; ///< Main thread selection count + + // Synchronization for wait() operations + mutable std::mutex _waitMutex; ///< Mutex for condition variable + mutable std::condition_variable _waitCondition; ///< Condition variable for waiting + + std::string _name; + + const size_t _capacity; ///< Maximum contracts + + // Concurrency provider support + IConcurrencyProvider* _concurrencyProvider = nullptr; ///< Work notification provider + mutable std::shared_mutex _concurrencyProviderMutex; ///< Protects provider during setup/teardown (COLD PATH ONLY) + std::list> _onCapacityAvailableCallbacks; ///< Capacity callbacks + mutable std::mutex _callbackMutex; ///< Protects callback list -namespace EntropyEngine { -namespace Core { -namespace Concurrency { - - // Forward declaration - class IConcurrencyProvider; - - /** - * @brief Factory and manager for work contracts with lock-free scheduling - * - * WorkContractGroup implements a work dispatcher capable of managing - * thousands of tasks without locks or blocking operations. It provides a comprehensive - * pool of work contracts with allocation, scheduling, and execution primitives suitable - * for job systems, task graphs, and high-throughput work management scenarios. - * - * The implementation uses SignalTree-based lock-free operations, - * enabling multiple threads to schedule and select work concurrently without contention. - * This design is optimized for game engines, parallel processing systems, and applications - * requiring management of numerous small work units. - * - * Key features: - * - Lock-free contract scheduling and selection - * - Generation-based handles prevent use-after-free bugs - * - Immediate resource cleanup on completion - * - Statistical monitoring (active/scheduled counts) - * - Wait functionality for synchronization points - * - * Important: This class provides scheduling primitives without handling parallel - * execution directly. External executors such as WorkService are required for - * concurrent work processing. The class functions as a centralized work registry - * where tasks are posted and claimed by worker threads. - * - * Handle semantics: - * - WorkContractHandle derives from EntropyObject and is stamped with (owner + index + generation) - * - Copying a handle copies the stamped identity; validation is performed against this group's slots - * - The group owns the lifetime; when a slot is released, the object's identity is cleared and the - * generation is incremented to invalidate stale handles - * + // Stopping support + std::atomic _stopping{false}; ///< Stopping flag + + // Timed deferral support (for WorkGraph timer integration) + std::function _timedDeferralCallback; ///< Callback for checking timed deferrals + mutable std::mutex _timedDeferralCallbackMutex; ///< Protects callback access + +public: + /** + * @brief Constructs a work contract group with specified capacity + * + * Pre-allocates all data structures for lock-free operation. Choose capacity + * based on peak concurrent load. + * + * @param capacity Maximum number of contracts (typically 1024-8192) + * + * @code + * // For a game engine handling frame tasks + * WorkContractGroup frameWork(2048); + * + * // For background processing + * WorkContractGroup backgroundTasks(512); + * @endcode + */ + explicit WorkContractGroup(size_t capacity, std::string name = "WorkContractGroup"); + + /** + * @brief Destructor ensures all work is stopped and completed + * + * Follows a strict destruction protocol to prevent deadlocks: + * 1. Calls stop() to prevent new work selection + * 2. Calls wait() to ensure all executing work completes + * 3. Unschedules and releases all remaining contracts + * 4. Reads concurrency provider pointer WITHOUT holding mutex lock + * 5. Calls notifyGroupDestroyed() to inform provider of destruction + * + * CRITICAL: The provider notification is made without holding the group's + * concurrency provider mutex to prevent ABBA deadlock with WorkService. + * Any deviation from this protocol may result in deadlock during destruction. + * + * The provider will then: + * - Remove this group from its internal lists + * - Call setConcurrencyProvider(nullptr) to clear the back-reference + * + * This ensures proper bidirectional cleanup without lock ordering issues. + */ + ~WorkContractGroup(); + + // Delete copy operations - lock-free data structures shouldn't be copied + WorkContractGroup(const WorkContractGroup&) = delete; + WorkContractGroup& operator=(const WorkContractGroup&) = delete; + + // Move operations + WorkContractGroup(WorkContractGroup&& other) noexcept; + WorkContractGroup& operator=(WorkContractGroup&& other) noexcept; + + /** + * @brief Creates a new work contract with the given work function + * + * @param work Function to execute when contract runs (should be thread-safe) + * @param executionType Where this contract should be executed (default: AnyThread) + * @return Handle to the created contract, or invalid handle if group is full + * * @code - * // Complete workflow: mixed execution with worker service - * WorkContractGroup group(1024); - * WorkService service(4); // 4 worker threads - * service.addWorkContractGroup(&group); - * service.start(); - * - * // Submit background work - * std::vector handles; + * // Simple work for any thread + * auto handle = group.createContract([]() { + * std::cout << "Hello from work thread!\n"; + * }); + * + * // Main thread targeted work + * auto mainHandle = group.createContract([]() { + * updateUI(); + * }, ExecutionType::MainThread); + * + * // Check if creation succeeded + * if (!handle.valid()) { + * std::cerr << "Group is full - can't create more work\n"; + * } + * @endcode + */ + WorkContractHandle createContract(std::function work, + ExecutionType executionType = ExecutionType::AnyThread); + + /** + * @brief Waits for all scheduled and executing contracts to complete + * + * Blocks until all work finishes. Includes scheduled and executing contracts. + * + * @code + * // Submit a batch of work + * for (int i = 0; i < 100; ++i) { + * auto handle = group.createContract([i]() { processItem(i); }); + * handle.schedule(); + * } + * + * // Wait for all work to complete + * group.wait(); + * std::cout << "All work finished!\n"; + * @endcode + */ + void wait(); + + /** + * @brief Stops the group from accepting new work selections + * + * Prevents new work selection. Executing work continues. + * Thread-safe. + */ + void stop(); + + /** + * @brief Resumes the group to allow new work selections + * + * Clears the stopping flag to allow selectForExecution() to return work + * again. Does NOT automatically notify waiting threads. + * + * Thread-safe. + */ + void resume(); + + /** + * @brief Checks if the group is in the process of stopping + * + * @return true if stop() has been called, false otherwise + */ + bool isStopping() const noexcept { + return _stopping.load(std::memory_order_seq_cst); + } + + /** + * @brief Executes all background (non-main-thread) contracts sequentially in the calling thread + * + * Grabs every scheduled background contract and executes them one by one in the current thread. + * Uses bias rotation to prevent starvation. Does NOT execute main thread targeted contracts. + * + * @code + * // Schedule several background tasks * for (int i = 0; i < 10; ++i) { - * auto handle = group.createContract([i]() { - * processData(i); - * }); + * auto handle = group.createContract([i]() { + * std::cout << "Task " << i << "\n"; + * }); // Default is ExecutionType::AnyThread * handle.schedule(); - * handles.push_back(handle); * } - * - * // Submit main thread work - * auto uiHandle = group.createContract([]() { - * updateProgressBar(); - * }, ExecutionType::MainThread); - * uiHandle.schedule(); - * - * // Main thread pumps its work - * while (group.hasMainThreadWork()) { - * group.executeMainThreadWork(5); // Process up to 5 per frame + * + * // Execute all background contracts + * group.executeAllBackgroundWork(); + * // All background tasks are now complete + * @endcode + */ + void executeAllBackgroundWork(); + + /** + * @brief Gets the maximum capacity of this group + * + * @return Maximum number of contracts this group can handle + */ + size_t capacity() const noexcept { + return _capacity; + } + + /** + * @brief Gets the number of currently allocated contracts + * + * @return Number of contracts that have been created but not yet released + * + * @code + * std::cout << "Using " << group.activeCount() << " of " + * << group.capacity() << " available slots\n"; + * @endcode + */ + size_t activeCount() const noexcept { + return _activeCount.load(std::memory_order_acquire); + } + + /** + * @brief Gets the number of contracts currently scheduled for execution + * + * @return Number of contracts currently scheduled and waiting for execution + * + * @code + * if (group.scheduledCount() > 100) { + * std::cout << "Work load is getting full - might want to throttle\n"; + * } + * @endcode + */ + size_t scheduledCount() const noexcept { + return _scheduledCount.load(std::memory_order_acquire); + } + + /** + * @brief Gets the number of main thread contracts currently scheduled + * + * @return Number of main thread contracts waiting for execution + */ + size_t mainThreadScheduledCount() const noexcept { + return _mainThreadScheduledCount.load(std::memory_order_acquire); + } + + /** + * @brief Gets the number of main thread contracts currently executing + * + * @return Number of main thread contracts being executed + */ + size_t mainThreadExecutingCount() const noexcept { + return _mainThreadExecutingCount.load(std::memory_order_acquire); + } + + /** + * @brief Checks if there are any main thread contracts ready to execute + * + * @return true if main thread work is available + */ + bool hasMainThreadWork() const noexcept { + return mainThreadScheduledCount() > 0; + } + + /** + * @brief Schedules a contract for execution (called by handle.schedule()) + * + * Transitions a contract from Allocated to Scheduled state. Use the handle + * method instead of calling this directly. + * + * @param handle Handle to the contract to schedule + * @return Result indicating success or failure reason + */ + ScheduleResult scheduleContract(const WorkContractHandle& handle); + + /** + * @brief Removes a contract from scheduling (called by handle.unschedule()) + * + * Removes from ready list if not yet executing. Use handle method instead. + * + * @param handle Handle to the contract to unschedule + * @return Result indicating success or failure reason + */ + ScheduleResult unscheduleContract(const WorkContractHandle& handle); + + /** + * @brief Immediately releases a contract (called by handle.release()) + * + * Forcibly frees a contract. Use the handle method instead. + * + * @param handle Handle to the contract to release + */ + void releaseContract(const WorkContractHandle& handle); + + /** + * @brief Validates a handle belongs to this group (called by handle.valid()) + * + * Checks handle validity and generation. Use handle method instead. + * + * @param handle Handle to validate + * @return true if handle is valid and belongs to this group + */ + bool isValidHandle(const WorkContractHandle& handle) const noexcept; + + /** + * @brief Selects a scheduled contract for execution + * + * Atomically transitions a contract from Scheduled to Executing state. + * + * @param bias Optional selection bias for fair work distribution + * @return Handle to an executing contract, or invalid handle if none available + */ + WorkContractHandle selectForExecution(std::optional> bias = std::nullopt); + + /** + * @brief Selects a main thread scheduled contract for execution + * + * Use this from your main thread to pick up work that must run there. + * Typically called in a loop until no more work is available. Thread-safe + * with other selections. + * + * @param bias Optional selection bias for fair work distribution + * @return Handle to an executing contract, or invalid handle if none available + * + * @code + * // Main thread pump pattern + * uint64_t bias = 0; + * while (auto handle = group.selectForMainThreadExecution(std::ref(bias))) { + * group.executeContract(handle); + * group.completeMainThreadExecution(handle); + * } + * @endcode + */ + WorkContractHandle selectForMainThreadExecution( + std::optional> bias = std::nullopt); + + /** + * @brief Executes all main thread targeted work contracts + * + * Convenience method that handles the full pump cycle internally. + * Use this when you want to drain all main thread work at once. + * Must be called from the main thread. + * + * @return Number of contracts actually executed + * + * @code + * // In your game loop or UI thread + * void updateMainThread() { + * size_t executed = group.executeAllMainThreadWork(); + * if (executed > 0) { + * LOG_DEBUG("Processed {} main thread tasks", executed); + * } + * } + * @endcode + */ + size_t executeAllMainThreadWork(); + + /** + * @brief Executes main thread targeted work contracts with a limit + * + * Use when you need to bound main thread work per frame/iteration. + * Prevents blocking the main thread for too long. Must be called + * from the main thread. + * + * @param maxContracts Maximum number of contracts to execute + * @return Number of contracts actually executed + * + * @code + * // Limit main thread work to maintain 60 FPS + * void gameLoop() { + * // Execute at most 5 tasks per frame + * size_t executed = group.executeMainThreadWork(5); * renderFrame(); * } - * - * // Wait for all background work to complete - * group.wait(); - * service.stop(); * @endcode */ - class WorkContractGroup : public EntropyEngine::Core::EntropyObject { - public: - const char* className() const noexcept override { return "WorkContractGroup"; } - uint64_t classHash() const noexcept override; - std::string toString() const override; - std::string debugString() const override; - std::string description() const override; - private: - /// Sentinel value indicating end of lock-free linked list or invalid slot - /// Used in the free list implementation to mark the end of the chain and - /// in tagged pointers to indicate null references. Maximum uint32_t value - /// ensures it's never a valid array index. Static constexpr because it's - /// a fundamental constant used throughout the lock-free data structure. - static constexpr uint32_t INVALID_INDEX = ~0u; - - - /** - * @brief Internal storage for a single work contract - * - * Each slot represents one work contract and tracks its lifecycle through - * atomic state transitions. The generation counter prevents use-after-free - * by invalidating old handles when slots are reused. - */ - struct ContractSlot { - std::atomic generation{1}; ///< Handle validation counter - std::atomic state{ContractState::Free}; ///< Current lifecycle state - std::function work; ///< Work function - std::atomic nextFree{INVALID_INDEX}; ///< Next free slot - ExecutionType executionType{ExecutionType::AnyThread}; ///< Execution context (main/any thread) - }; - - std::vector _contracts; ///< Contract storage - std::unique_ptr _readyContracts; ///< Ready work queue - std::unique_ptr _mainThreadContracts; ///< Main thread work queue - std::atomic _freeListHead{0}; ///< Free list head (packed: [tag:32(upper) | index:32(lower)]) - - std::atomic _activeCount{0}; ///< Active contract count - std::atomic _scheduledCount{0}; ///< Scheduled count - std::atomic _executingCount{0}; ///< Executing count - std::atomic _selectingCount{0}; ///< Selection in progress - std::atomic _mainThreadScheduledCount{0}; ///< Main thread work pending - std::atomic _mainThreadExecutingCount{0}; ///< Main thread work running - std::atomic _mainThreadSelectingCount{0}; ///< Main thread selection count - - // Synchronization for wait() operations - mutable std::mutex _waitMutex; ///< Mutex for condition variable - mutable std::condition_variable _waitCondition; ///< Condition variable for waiting - - std::string _name; - - const size_t _capacity; ///< Maximum contracts - - // Concurrency provider support - IConcurrencyProvider* _concurrencyProvider = nullptr; ///< Work notification provider - mutable std::shared_mutex _concurrencyProviderMutex; ///< Protects provider during setup/teardown (COLD PATH ONLY) - std::list> _onCapacityAvailableCallbacks; ///< Capacity callbacks - mutable std::mutex _callbackMutex; ///< Protects callback list - - // Stopping support - std::atomic _stopping{false}; ///< Stopping flag - - // Timed deferral support (for WorkGraph timer integration) - std::function _timedDeferralCallback; ///< Callback for checking timed deferrals - mutable std::mutex _timedDeferralCallbackMutex; ///< Protects callback access - - public: - - /** - * @brief Constructs a work contract group with specified capacity - * - * Pre-allocates all data structures for lock-free operation. Choose capacity - * based on peak concurrent load. - * - * @param capacity Maximum number of contracts (typically 1024-8192) - * - * @code - * // For a game engine handling frame tasks - * WorkContractGroup frameWork(2048); - * - * // For background processing - * WorkContractGroup backgroundTasks(512); - * @endcode - */ - explicit WorkContractGroup(size_t capacity, std::string name = "WorkContractGroup"); - - /** - * @brief Destructor ensures all work is stopped and completed - * - * Follows a strict destruction protocol to prevent deadlocks: - * 1. Calls stop() to prevent new work selection - * 2. Calls wait() to ensure all executing work completes - * 3. Unschedules and releases all remaining contracts - * 4. Reads concurrency provider pointer WITHOUT holding mutex lock - * 5. Calls notifyGroupDestroyed() to inform provider of destruction - * - * CRITICAL: The provider notification is made without holding the group's - * concurrency provider mutex to prevent ABBA deadlock with WorkService. - * Any deviation from this protocol may result in deadlock during destruction. - * - * The provider will then: - * - Remove this group from its internal lists - * - Call setConcurrencyProvider(nullptr) to clear the back-reference - * - * This ensures proper bidirectional cleanup without lock ordering issues. - */ - ~WorkContractGroup(); - - // Delete copy operations - lock-free data structures shouldn't be copied - WorkContractGroup(const WorkContractGroup&) = delete; - WorkContractGroup& operator=(const WorkContractGroup&) = delete; - - // Move operations - WorkContractGroup(WorkContractGroup&& other) noexcept; - WorkContractGroup& operator=(WorkContractGroup&& other) noexcept; - - /** - * @brief Creates a new work contract with the given work function - * - * @param work Function to execute when contract runs (should be thread-safe) - * @param executionType Where this contract should be executed (default: AnyThread) - * @return Handle to the created contract, or invalid handle if group is full - * - * @code - * // Simple work for any thread - * auto handle = group.createContract([]() { - * std::cout << "Hello from work thread!\n"; - * }); - * - * // Main thread targeted work - * auto mainHandle = group.createContract([]() { - * updateUI(); - * }, ExecutionType::MainThread); - * - * // Check if creation succeeded - * if (!handle.valid()) { - * std::cerr << "Group is full - can't create more work\n"; - * } - * @endcode - */ - WorkContractHandle createContract(std::function work, - ExecutionType executionType = ExecutionType::AnyThread); - - /** - * @brief Waits for all scheduled and executing contracts to complete - * - * Blocks until all work finishes. Includes scheduled and executing contracts. - * - * @code - * // Submit a batch of work - * for (int i = 0; i < 100; ++i) { - * auto handle = group.createContract([i]() { processItem(i); }); - * handle.schedule(); - * } - * - * // Wait for all work to complete - * group.wait(); - * std::cout << "All work finished!\n"; - * @endcode - */ - void wait(); - - /** - * @brief Stops the group from accepting new work selections - * - * Prevents new work selection. Executing work continues. - * Thread-safe. - */ - void stop(); - - /** - * @brief Resumes the group to allow new work selections - * - * Clears the stopping flag to allow selectForExecution() to return work - * again. Does NOT automatically notify waiting threads. - * - * Thread-safe. - */ - void resume(); - - /** - * @brief Checks if the group is in the process of stopping - * - * @return true if stop() has been called, false otherwise - */ - bool isStopping() const noexcept { return _stopping.load(std::memory_order_seq_cst); } - - /** - * @brief Executes all background (non-main-thread) contracts sequentially in the calling thread - * - * Grabs every scheduled background contract and executes them one by one in the current thread. - * Uses bias rotation to prevent starvation. Does NOT execute main thread targeted contracts. - * - * @code - * // Schedule several background tasks - * for (int i = 0; i < 10; ++i) { - * auto handle = group.createContract([i]() { - * std::cout << "Task " << i << "\n"; - * }); // Default is ExecutionType::AnyThread - * handle.schedule(); - * } - * - * // Execute all background contracts - * group.executeAllBackgroundWork(); - * // All background tasks are now complete - * @endcode - */ - void executeAllBackgroundWork(); - - /** - * @brief Gets the maximum capacity of this group - * - * @return Maximum number of contracts this group can handle - */ - size_t capacity() const noexcept { return _capacity; } - - /** - * @brief Gets the number of currently allocated contracts - * - * @return Number of contracts that have been created but not yet released - * - * @code - * std::cout << "Using " << group.activeCount() << " of " - * << group.capacity() << " available slots\n"; - * @endcode - */ - size_t activeCount() const noexcept { return _activeCount.load(std::memory_order_acquire); } - - /** - * @brief Gets the number of contracts currently scheduled for execution - * - * @return Number of contracts currently scheduled and waiting for execution - * - * @code - * if (group.scheduledCount() > 100) { - * std::cout << "Work load is getting full - might want to throttle\n"; - * } - * @endcode - */ - size_t scheduledCount() const noexcept { return _scheduledCount.load(std::memory_order_acquire); } - - /** - * @brief Gets the number of main thread contracts currently scheduled - * - * @return Number of main thread contracts waiting for execution - */ - size_t mainThreadScheduledCount() const noexcept { - return _mainThreadScheduledCount.load(std::memory_order_acquire); - } - - /** - * @brief Gets the number of main thread contracts currently executing - * - * @return Number of main thread contracts being executed - */ - size_t mainThreadExecutingCount() const noexcept { - return _mainThreadExecutingCount.load(std::memory_order_acquire); - } - - /** - * @brief Checks if there are any main thread contracts ready to execute - * - * @return true if main thread work is available - */ - bool hasMainThreadWork() const noexcept { - return mainThreadScheduledCount() > 0; - } - - /** - * @brief Schedules a contract for execution (called by handle.schedule()) - * - * Transitions a contract from Allocated to Scheduled state. Use the handle - * method instead of calling this directly. - * - * @param handle Handle to the contract to schedule - * @return Result indicating success or failure reason - */ - ScheduleResult scheduleContract(const WorkContractHandle& handle); - - /** - * @brief Removes a contract from scheduling (called by handle.unschedule()) - * - * Removes from ready list if not yet executing. Use handle method instead. - * - * @param handle Handle to the contract to unschedule - * @return Result indicating success or failure reason - */ - ScheduleResult unscheduleContract(const WorkContractHandle& handle); - - /** - * @brief Immediately releases a contract (called by handle.release()) - * - * Forcibly frees a contract. Use the handle method instead. - * - * @param handle Handle to the contract to release - */ - void releaseContract(const WorkContractHandle& handle); - - /** - * @brief Validates a handle belongs to this group (called by handle.valid()) - * - * Checks handle validity and generation. Use handle method instead. - * - * @param handle Handle to validate - * @return true if handle is valid and belongs to this group - */ - bool isValidHandle(const WorkContractHandle& handle) const noexcept; - - /** - * @brief Selects a scheduled contract for execution - * - * Atomically transitions a contract from Scheduled to Executing state. - * - * @param bias Optional selection bias for fair work distribution - * @return Handle to an executing contract, or invalid handle if none available - */ - WorkContractHandle selectForExecution(std::optional> bias = std::nullopt); - - /** - * @brief Selects a main thread scheduled contract for execution - * - * Use this from your main thread to pick up work that must run there. - * Typically called in a loop until no more work is available. Thread-safe - * with other selections. - * - * @param bias Optional selection bias for fair work distribution - * @return Handle to an executing contract, or invalid handle if none available - * - * @code - * // Main thread pump pattern - * uint64_t bias = 0; - * while (auto handle = group.selectForMainThreadExecution(std::ref(bias))) { - * group.executeContract(handle); - * group.completeMainThreadExecution(handle); - * } - * @endcode - */ - WorkContractHandle selectForMainThreadExecution(std::optional> bias = std::nullopt); - - /** - * @brief Executes all main thread targeted work contracts - * - * Convenience method that handles the full pump cycle internally. - * Use this when you want to drain all main thread work at once. - * Must be called from the main thread. - * - * @return Number of contracts actually executed - * - * @code - * // In your game loop or UI thread - * void updateMainThread() { - * size_t executed = group.executeAllMainThreadWork(); - * if (executed > 0) { - * LOG_DEBUG("Processed {} main thread tasks", executed); - * } - * } - * @endcode - */ - size_t executeAllMainThreadWork(); - - /** - * @brief Executes main thread targeted work contracts with a limit - * - * Use when you need to bound main thread work per frame/iteration. - * Prevents blocking the main thread for too long. Must be called - * from the main thread. - * - * @param maxContracts Maximum number of contracts to execute - * @return Number of contracts actually executed - * - * @code - * // Limit main thread work to maintain 60 FPS - * void gameLoop() { - * // Execute at most 5 tasks per frame - * size_t executed = group.executeMainThreadWork(5); - * renderFrame(); - * } - * @endcode - */ - size_t executeMainThreadWork(size_t maxContracts); - - /** - * @brief Executes the work function of a contract - * - * Only call on contracts returned by selectForExecution(). - * - * @param handle Handle to the contract to execute (must be in Executing state) - */ - void executeContract(const WorkContractHandle& handle); - - /** - * @brief Aborts execution without running the task (shutdown-only path) - */ - void abortExecution(const WorkContractHandle& handle); - - /** - * @brief Completes execution and cleans up a contract - * - * Must be called after executeContract() to complete the lifecycle. - * - * @param handle Handle to the contract that finished executing - */ - void completeExecution(const WorkContractHandle& handle); - - /** - * @brief Completes execution and cleans up a main thread contract - * - * Like completeExecution() but for main thread contracts. Updates the - * correct counters and frees the contract for reuse. Always call this - * after executeContract() for main thread work. - * - * @param handle Handle to the main thread contract that finished executing - * - * @code - * auto handle = group.selectForMainThreadExecution(); - * if (handle.valid()) { - * group.executeContract(handle); - * group.completeMainThreadExecution(handle); // Essential cleanup - * } - * @endcode - */ - void completeMainThreadExecution(const WorkContractHandle& handle); - - /** - * @brief Gets the current state of a contract - * - * @param handle Handle to query - * @return Current state of the contract, or Free if handle is invalid - */ - ContractState getContractState(const WorkContractHandle& handle) const noexcept; - - /** - * @brief Returns the current number of contracts being actively executed - * - * Useful for thread scheduling and load balancing decisions. - * - * @return The number of currently executing contracts - */ - size_t executingCount() const noexcept; - - /** - * @brief Associates this group with a concurrency provider - * - * Provider will be notified when work becomes available. Call during - * setup/teardown, not during active work execution. - * - * @param provider The concurrency provider to associate with, or nullptr to clear - */ - void setConcurrencyProvider(IConcurrencyProvider* provider); - - /** - * @brief Gets the currently associated concurrency provider - * - * @return The current provider, or nullptr if none is set - */ - IConcurrencyProvider* getConcurrencyProvider() const noexcept { return _concurrencyProvider; } - - using CapacityCallback = std::list>::iterator; - - /** - * @brief Add a callback to be invoked when capacity becomes available - * - * Called after a contract completes and frees up capacity. - * - * @param callback Function to call when capacity is available - * @return Iterator that can be used to remove the callback - */ - CapacityCallback addOnCapacityAvailable(std::function callback); - - /** - * @brief Remove a capacity available callback - * - * @param it Iterator returned from addOnCapacityAvailable - */ - void removeOnCapacityAvailable(CapacityCallback it); - - /** - * @brief Checks for timed deferrals and schedules ready nodes - * - * Invokes the timed deferral callback if one is set (used by WorkGraph for timer support). - * Returns 0 if no callback is registered (standard WorkContractGroups don't support timers). - * Thread-safe: Protected by mutex. - * - * @return Number of nodes that were scheduled from timed deferral queue - */ - size_t checkTimedDeferrals(); - - /** - * @brief Sets a callback for checking timed deferrals - * - * Allows external owners (like WorkGraph) to provide timer functionality - * without requiring inheritance or RTTI/dynamic_cast. - * Thread-safe: Protected by mutex. - * - * @param callback Function that checks and schedules timed deferrals, or nullptr to clear - */ - void setTimedDeferralCallback(std::function callback); - - private: - /** - * @brief Creates a SignalTree sized appropriately for the given capacity - * - * Handles power-of-2 rounding required by SignalTree's binary structure. - * - * @param capacity Number of work contracts the tree needs to support - * @return Unique pointer to properly sized SignalTree - */ - static std::unique_ptr createSignalTree(size_t capacity); - - /** - * @brief Validates that a handle belongs to this group with correct generation - * - * Internal validation checking owner, bounds, and generation. - * - * @param handle Handle to validate - * @return true if handle is completely valid for this group - */ - bool validateHandle(const WorkContractHandle& handle) const noexcept; - - /** - * @brief Returns a contract slot to the free list after cleanup - * - * Increments generation, clears work function, updates counters, - * and notifies waiters. - * - * @param index The slot index to return to the free list - * @param previousState The state the slot was in before being freed - * @param isMainThread Whether this is a main thread contract (default: false) - */ - void returnSlotToFreeList(uint32_t index, ContractState previousState, bool isMainThread = false); - - /** - * @brief Releases all remaining contracts in the group - * - * Used during destruction to ensure no contracts are left hanging. - */ - void releaseAllContracts(); - - /** - * @brief Unschedules all scheduled contracts in the group - * - * Moves scheduled contracts back to allocated state during destruction. - */ - void unscheduleAllContracts(); - }; + size_t executeMainThreadWork(size_t maxContracts); -} // namespace Concurrency -} // namespace Core -} // namespace EntropyEngine + /** + * @brief Executes the work function of a contract + * + * Only call on contracts returned by selectForExecution(). + * + * @param handle Handle to the contract to execute (must be in Executing state) + */ + void executeContract(const WorkContractHandle& handle); + /** + * @brief Aborts execution without running the task (shutdown-only path) + */ + void abortExecution(const WorkContractHandle& handle); + + /** + * @brief Completes execution and cleans up a contract + * + * Must be called after executeContract() to complete the lifecycle. + * + * @param handle Handle to the contract that finished executing + */ + void completeExecution(const WorkContractHandle& handle); + + /** + * @brief Completes execution and cleans up a main thread contract + * + * Like completeExecution() but for main thread contracts. Updates the + * correct counters and frees the contract for reuse. Always call this + * after executeContract() for main thread work. + * + * @param handle Handle to the main thread contract that finished executing + * + * @code + * auto handle = group.selectForMainThreadExecution(); + * if (handle.valid()) { + * group.executeContract(handle); + * group.completeMainThreadExecution(handle); // Essential cleanup + * } + * @endcode + */ + void completeMainThreadExecution(const WorkContractHandle& handle); + + /** + * @brief Gets the current state of a contract + * + * @param handle Handle to query + * @return Current state of the contract, or Free if handle is invalid + */ + ContractState getContractState(const WorkContractHandle& handle) const noexcept; + + /** + * @brief Returns the current number of contracts being actively executed + * + * Useful for thread scheduling and load balancing decisions. + * + * @return The number of currently executing contracts + */ + size_t executingCount() const noexcept; + + /** + * @brief Associates this group with a concurrency provider + * + * Provider will be notified when work becomes available. Call during + * setup/teardown, not during active work execution. + * + * @param provider The concurrency provider to associate with, or nullptr to clear + */ + void setConcurrencyProvider(IConcurrencyProvider* provider); + + /** + * @brief Gets the currently associated concurrency provider + * + * @return The current provider, or nullptr if none is set + */ + IConcurrencyProvider* getConcurrencyProvider() const noexcept { + return _concurrencyProvider; + } + + using CapacityCallback = std::list>::iterator; + + /** + * @brief Add a callback to be invoked when capacity becomes available + * + * Called after a contract completes and frees up capacity. + * + * @param callback Function to call when capacity is available + * @return Iterator that can be used to remove the callback + */ + CapacityCallback addOnCapacityAvailable(std::function callback); + + /** + * @brief Remove a capacity available callback + * + * @param it Iterator returned from addOnCapacityAvailable + */ + void removeOnCapacityAvailable(CapacityCallback it); + + /** + * @brief Checks for timed deferrals and schedules ready nodes + * + * Invokes the timed deferral callback if one is set (used by WorkGraph for timer support). + * Returns 0 if no callback is registered (standard WorkContractGroups don't support timers). + * Thread-safe: Protected by mutex. + * + * @return Number of nodes that were scheduled from timed deferral queue + */ + size_t checkTimedDeferrals(); + + /** + * @brief Sets a callback for checking timed deferrals + * + * Allows external owners (like WorkGraph) to provide timer functionality + * without requiring inheritance or RTTI/dynamic_cast. + * Thread-safe: Protected by mutex. + * + * @param callback Function that checks and schedules timed deferrals, or nullptr to clear + */ + void setTimedDeferralCallback(std::function callback); + +private: + /** + * @brief Creates a SignalTree sized appropriately for the given capacity + * + * Handles power-of-2 rounding required by SignalTree's binary structure. + * + * @param capacity Number of work contracts the tree needs to support + * @return Unique pointer to properly sized SignalTree + */ + static std::unique_ptr createSignalTree(size_t capacity); + + /** + * @brief Validates that a handle belongs to this group with correct generation + * + * Internal validation checking owner, bounds, and generation. + * + * @param handle Handle to validate + * @return true if handle is completely valid for this group + */ + bool validateHandle(const WorkContractHandle& handle) const noexcept; + + /** + * @brief Returns a contract slot to the free list after cleanup + * + * Increments generation, clears work function, updates counters, + * and notifies waiters. + * + * @param index The slot index to return to the free list + * @param previousState The state the slot was in before being freed + * @param isMainThread Whether this is a main thread contract (default: false) + */ + void returnSlotToFreeList(uint32_t index, ContractState previousState, bool isMainThread = false); + + /** + * @brief Releases all remaining contracts in the group + * + * Used during destruction to ensure no contracts are left hanging. + */ + void releaseAllContracts(); + + /** + * @brief Unschedules all scheduled contracts in the group + * + * Moves scheduled contracts back to allocated state during destruction. + */ + void unscheduleAllContracts(); +}; +} // namespace Concurrency +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Concurrency/WorkContractHandle.cpp b/src/Concurrency/WorkContractHandle.cpp index 879b376..ae50085 100644 --- a/src/Concurrency/WorkContractHandle.cpp +++ b/src/Concurrency/WorkContractHandle.cpp @@ -8,64 +8,70 @@ */ #include "WorkContractHandle.h" -#include "WorkContractGroup.h" -#include "../TypeSystem/TypeID.h" + #include -namespace EntropyEngine { -namespace Core { -namespace Concurrency { +#include "../TypeSystem/TypeID.h" +#include "WorkContractGroup.h" - ScheduleResult WorkContractHandle::schedule() { - auto* group = handleOwnerAs(); - if (!group) return ScheduleResult::Invalid; - return group->scheduleContract(*this); - } - - ScheduleResult WorkContractHandle::unschedule() { - auto* group = handleOwnerAs(); - if (!group) return ScheduleResult::Invalid; - return group->unscheduleContract(*this); - } - - bool WorkContractHandle::valid() const { - auto* group = handleOwnerAs(); - return group && group->isValidHandle(*this); - } - - void WorkContractHandle::release() { - if (auto* group = handleOwnerAs()) { - group->releaseContract(*this); - } - // Clear stamped identity to make subsequent calls fast no-ops - HandleAccess::clear(*this); - } - - bool WorkContractHandle::isScheduled() const { - auto* group = handleOwnerAs(); - if (!group) return false; - return group->getContractState(*this) == ContractState::Scheduled; - } - - bool WorkContractHandle::isExecuting() const { - auto* group = handleOwnerAs(); - if (!group) return false; - return group->getContractState(*this) == ContractState::Executing; +namespace EntropyEngine +{ +namespace Core +{ +namespace Concurrency +{ + +ScheduleResult WorkContractHandle::schedule() { + auto* group = handleOwnerAs(); + if (!group) return ScheduleResult::Invalid; + return group->scheduleContract(*this); +} + +ScheduleResult WorkContractHandle::unschedule() { + auto* group = handleOwnerAs(); + if (!group) return ScheduleResult::Invalid; + return group->unscheduleContract(*this); +} + +bool WorkContractHandle::valid() const { + auto* group = handleOwnerAs(); + return group && group->isValidHandle(*this); +} + +void WorkContractHandle::release() { + if (auto* group = handleOwnerAs()) { + group->releaseContract(*this); } + // Clear stamped identity to make subsequent calls fast no-ops + HandleAccess::clear(*this); +} + +bool WorkContractHandle::isScheduled() const { + auto* group = handleOwnerAs(); + if (!group) return false; + return group->getContractState(*this) == ContractState::Scheduled; +} + +bool WorkContractHandle::isExecuting() const { + auto* group = handleOwnerAs(); + if (!group) return false; + return group->getContractState(*this) == ContractState::Executing; +} uint64_t WorkContractHandle::classHash() const noexcept { - static const uint64_t hash = static_cast(EntropyEngine::Core::TypeSystem::createTypeId().id); + static const uint64_t hash = + static_cast(EntropyEngine::Core::TypeSystem::createTypeId().id); return hash; } std::string WorkContractHandle::toString() const { if (hasHandle()) { - return std::format("{}@{}(owner={}, idx={}, gen={})", - className(), static_cast(this), handleOwner(), handleIndex(), handleGeneration()); + return std::format("{}@{}(owner={}, idx={}, gen={})", className(), static_cast(this), + handleOwner(), handleIndex(), handleGeneration()); } return std::format("{}@{}(invalid)", className(), static_cast(this)); } -} // namespace Concurrency -} // namespace Core -} // namespace EntropyEngine \ No newline at end of file +} // namespace Concurrency +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Concurrency/WorkContractHandle.h b/src/Concurrency/WorkContractHandle.h index d23682f..52d80c2 100644 --- a/src/Concurrency/WorkContractHandle.h +++ b/src/Concurrency/WorkContractHandle.h @@ -20,178 +20,180 @@ #include #include + #include "../Core/EntropyObject.h" -namespace EntropyEngine { -namespace Core { -namespace Concurrency { +namespace EntropyEngine +{ +namespace Core +{ +namespace Concurrency +{ - // Forward declaration - class WorkContractGroup; - - // Tag type for work contract handles - struct WorkContractTag {}; +// Forward declaration +class WorkContractGroup; - /** - * @brief States that a work contract can be in during its lifecycle - */ - enum class ContractState : uint32_t { - Free = 0, ///< Contract slot is available for allocation - Allocated = 1, ///< Contract has been allocated but not scheduled - Scheduled = 2, ///< Contract is scheduled and ready for execution - Executing = 3, ///< Contract is currently being executed - Completed = 4 ///< Contract has completed execution - }; +// Tag type for work contract handles +struct WorkContractTag +{ +}; - /** - * @brief Result of schedule/unschedule operations - */ - enum class ScheduleResult { - Scheduled, ///< Contract is now scheduled (successful schedule operation) - AlreadyScheduled, ///< Contract was already scheduled (schedule operation failed) - NotScheduled, ///< Contract is not scheduled (successful unschedule operation) - Executing, ///< Cannot modify - currently executing - Invalid ///< Invalid handle provided - }; +/** + * @brief States that a work contract can be in during its lifecycle + */ +enum class ContractState : uint32_t +{ + Free = 0, ///< Contract slot is available for allocation + Allocated = 1, ///< Contract has been allocated but not scheduled + Scheduled = 2, ///< Contract is scheduled and ready for execution + Executing = 3, ///< Contract is currently being executed + Completed = 4 ///< Contract has completed execution +}; - /** - * @class WorkContractHandle - * @brief EntropyObject-stamped handle for work contracts - * - * This handle derives from EntropyObject and carries a stamped identity: - * owner (WorkContractGroup*), slot index, and generation. The group is the - * source of truth; validation compares the stamp against the group's slot. - * - * Copy semantics: - * - Copying a handle copies only its stamped identity (no ownership transfer). - * - The group owns lifetime; when a slot is freed, the handle becomes invalid. - * - * Typical workflow: - * 1. Create via WorkContractGroup::createContract() - * 2. Call schedule(), optionally unschedule() - * 3. After execution starts or release(), valid() becomes false - * - * @code - * WorkContractGroup group(1024); - * auto h = group.createContract([]{ doWork(); }); - * if (h.schedule() == ScheduleResult::Scheduled) { // queued } - * if (h.valid()) { // still schedulable } - * @endcode - */ - class WorkContractHandle : public EntropyEngine::Core::EntropyObject { - private: - friend class WorkContractGroup; +/** + * @brief Result of schedule/unschedule operations + */ +enum class ScheduleResult +{ + Scheduled, ///< Contract is now scheduled (successful schedule operation) + AlreadyScheduled, ///< Contract was already scheduled (schedule operation failed) + NotScheduled, ///< Contract is not scheduled (successful unschedule operation) + Executing, ///< Cannot modify - currently executing + Invalid ///< Invalid handle provided +}; - // Private constructor for group to stamp identity - WorkContractHandle(WorkContractGroup* group, uint32_t index, uint32_t generation) { - HandleAccess::set(*this, group, index, generation); +/** + * @class WorkContractHandle + * @brief EntropyObject-stamped handle for work contracts + * + * This handle derives from EntropyObject and carries a stamped identity: + * owner (WorkContractGroup*), slot index, and generation. The group is the + * source of truth; validation compares the stamp against the group's slot. + * + * Copy semantics: + * - Copying a handle copies only its stamped identity (no ownership transfer). + * - The group owns lifetime; when a slot is freed, the handle becomes invalid. + * + * Typical workflow: + * 1. Create via WorkContractGroup::createContract() + * 2. Call schedule(), optionally unschedule() + * 3. After execution starts or release(), valid() becomes false + * + * @code + * WorkContractGroup group(1024); + * auto h = group.createContract([]{ doWork(); }); + * if (h.schedule() == ScheduleResult::Scheduled) { // queued } + * if (h.valid()) { // still schedulable } + * @endcode + */ +class WorkContractHandle : public EntropyEngine::Core::EntropyObject +{ +private: + friend class WorkContractGroup; + + // Private constructor for group to stamp identity + WorkContractHandle(WorkContractGroup* group, uint32_t index, uint32_t generation) { + HandleAccess::set(*this, group, index, generation); + } + +public: + // Default: invalid (no stamped identity) + WorkContractHandle() = default; + + // Copy constructor: create a new handle object stamped with the same identity + WorkContractHandle(const WorkContractHandle& other) noexcept { + if (other.hasHandle()) { + HandleAccess::set(*this, const_cast(other.handleOwner()), other.handleIndex(), + other.handleGeneration()); } - - public: - // Default: invalid (no stamped identity) - WorkContractHandle() = default; - - // Copy constructor: create a new handle object stamped with the same identity - WorkContractHandle(const WorkContractHandle& other) noexcept { + } + // Copy assignment + WorkContractHandle& operator=(const WorkContractHandle& other) noexcept { + if (this != &other) { if (other.hasHandle()) { - HandleAccess::set(*this, - const_cast(other.handleOwner()), - other.handleIndex(), + HandleAccess::set(*this, const_cast(other.handleOwner()), other.handleIndex(), other.handleGeneration()); + } else { + HandleAccess::clear(*this); } } - // Copy assignment - WorkContractHandle& operator=(const WorkContractHandle& other) noexcept { - if (this != &other) { - if (other.hasHandle()) { - HandleAccess::set(*this, - const_cast(other.handleOwner()), - other.handleIndex(), - other.handleGeneration()); - } else { - HandleAccess::clear(*this); - } - } - return *this; + return *this; + } + // Move constructor + WorkContractHandle(WorkContractHandle&& other) noexcept { + if (other.hasHandle()) { + HandleAccess::set(*this, const_cast(other.handleOwner()), other.handleIndex(), + other.handleGeneration()); } - // Move constructor - WorkContractHandle(WorkContractHandle&& other) noexcept { + } + // Move assignment + WorkContractHandle& operator=(WorkContractHandle&& other) noexcept { + if (this != &other) { if (other.hasHandle()) { - HandleAccess::set(*this, - const_cast(other.handleOwner()), - other.handleIndex(), + HandleAccess::set(*this, const_cast(other.handleOwner()), other.handleIndex(), other.handleGeneration()); + } else { + HandleAccess::clear(*this); } } - // Move assignment - WorkContractHandle& operator=(WorkContractHandle&& other) noexcept { - if (this != &other) { - if (other.hasHandle()) { - HandleAccess::set(*this, - const_cast(other.handleOwner()), - other.handleIndex(), - other.handleGeneration()); - } else { - HandleAccess::clear(*this); - } - } - return *this; - } + return *this; + } - // Maintain the same public API - - /** - * @brief Schedules this contract for execution - * - * Transitions Allocated -> Scheduled. No-op if already scheduled. - * @return Scheduled, AlreadyScheduled, Executing, or Invalid - * - * @code - * auto h = group.createContract([]{}); - * if (h.schedule() == ScheduleResult::Scheduled) { // scheduled } - * @endcode - */ - ScheduleResult schedule(); - - /** - * @brief Attempts to remove this contract from the ready set - * - * Succeeds only when in Scheduled state; cannot cancel while Executing. - * @return NotScheduled on success, Executing if too late, or Invalid - */ - ScheduleResult unschedule(); - - /** - * @brief Checks whether this handle still refers to a live slot - * @return true if owner, index, and generation match a live slot - */ - bool valid() const; - - /** - * @brief Immediately frees this contract's slot - * - * Clears scheduling state and returns the slot to the free list. After this, valid() is false. - */ - void release(); - - /** - * @brief Reports whether the contract is currently Scheduled - * @return true if scheduled and waiting for execution - */ - bool isScheduled() const; - - /** - * @brief Reports whether the contract is currently Executing - * @return true if actively running - */ - bool isExecuting() const; - - const char* className() const noexcept override { return "WorkContractHandle"; } - uint64_t classHash() const noexcept override; - std::string toString() const override; - }; - -} // namespace Concurrency -} // namespace Core -} // namespace EntropyEngine + // Maintain the same public API + + /** + * @brief Schedules this contract for execution + * + * Transitions Allocated -> Scheduled. No-op if already scheduled. + * @return Scheduled, AlreadyScheduled, Executing, or Invalid + * + * @code + * auto h = group.createContract([]{}); + * if (h.schedule() == ScheduleResult::Scheduled) { // scheduled } + * @endcode + */ + ScheduleResult schedule(); + + /** + * @brief Attempts to remove this contract from the ready set + * + * Succeeds only when in Scheduled state; cannot cancel while Executing. + * @return NotScheduled on success, Executing if too late, or Invalid + */ + ScheduleResult unschedule(); + /** + * @brief Checks whether this handle still refers to a live slot + * @return true if owner, index, and generation match a live slot + */ + bool valid() const; + + /** + * @brief Immediately frees this contract's slot + * + * Clears scheduling state and returns the slot to the free list. After this, valid() is false. + */ + void release(); + + /** + * @brief Reports whether the contract is currently Scheduled + * @return true if scheduled and waiting for execution + */ + bool isScheduled() const; + + /** + * @brief Reports whether the contract is currently Executing + * @return true if actively running + */ + bool isExecuting() const; + + const char* className() const noexcept override { + return "WorkContractHandle"; + } + uint64_t classHash() const noexcept override; + std::string toString() const override; +}; + +} // namespace Concurrency +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Concurrency/WorkGraph.cpp b/src/Concurrency/WorkGraph.cpp index 6459790..139fcd7 100644 --- a/src/Concurrency/WorkGraph.cpp +++ b/src/Concurrency/WorkGraph.cpp @@ -8,34 +8,35 @@ */ #include "WorkGraph.h" -#include "NodeStateManager.h" -#include "NodeScheduler.h" -#include "WorkGraphEvents.h" + #include -#include #include #include +#include -namespace EntropyEngine { -namespace Core { -namespace Concurrency { +#include "NodeScheduler.h" +#include "NodeStateManager.h" +#include "WorkGraphEvents.h" -WorkGraph::WorkGraph(WorkContractGroup* workContractGroup) - : WorkGraph(workContractGroup, WorkGraphConfig{}) { -} +namespace EntropyEngine +{ +namespace Core +{ +namespace Concurrency +{ + +WorkGraph::WorkGraph(WorkContractGroup* workContractGroup) : WorkGraph(workContractGroup, WorkGraphConfig{}) {} WorkGraph::WorkGraph(WorkContractGroup* workContractGroup, const WorkGraphConfig& config) - : Debug::Named("WorkGraph") - , _workContractGroup(workContractGroup) - , _config(config) { + : Debug::Named("WorkGraph"), _workContractGroup(workContractGroup), _config(config) { if (_config.enableDebugLogging) { ENTROPY_LOG_DEBUG_CAT("Concurrency", "WorkGraph constructor called"); } - + if (!_workContractGroup) { throw std::invalid_argument("WorkGraph requires a valid WorkContractGroup"); } - + // Create event bus if configured if (_config.enableEvents) { if (_config.sharedEventBus) { @@ -46,30 +47,23 @@ WorkGraph::WorkGraph(WorkContractGroup* workContractGroup, const WorkGraphConfig _eventBus = std::make_unique(); } } - + // Always create state manager (it's fundamental to correct operation) auto* eventBusPtr = _config.enableEvents ? getEventBus() : nullptr; if (_config.enableDebugLogging) { - ENTROPY_LOG_DEBUG_CAT("Concurrency", eventBusPtr ? "WorkGraph: Creating state manager with event bus" : "WorkGraph: Creating state manager WITHOUT event bus"); + ENTROPY_LOG_DEBUG_CAT("Concurrency", eventBusPtr ? "WorkGraph: Creating state manager with event bus" + : "WorkGraph: Creating state manager WITHOUT event bus"); } - _stateManager = std::make_unique( - this, - eventBusPtr - ); - + _stateManager = std::make_unique(this, eventBusPtr); + // Always create scheduler NodeScheduler::Config schedulerConfig; schedulerConfig.maxDeferredNodes = _config.maxDeferredNodes; schedulerConfig.enableBatchScheduling = _config.enableAdvancedScheduling; schedulerConfig.enableDebugLogging = _config.enableDebugLogging; - _scheduler = std::make_unique( - _workContractGroup, - this, - &_graphMutex, - _config.enableEvents ? getEventBus() : nullptr, - schedulerConfig - ); - + _scheduler = std::make_unique(_workContractGroup, this, &_graphMutex, + _config.enableEvents ? getEventBus() : nullptr, schedulerConfig); + // Set up safe scheduler callbacks with proper lifetime tracking NodeScheduler::Callbacks callbacks; callbacks.onNodeExecuting = [this](NodeHandle node) { @@ -78,7 +72,8 @@ WorkGraph::WorkGraph(WorkContractGroup* workContractGroup, const WorkGraphConfig if (_config.enableDebugLogging) { auto* nodeData = _graph.getNodeData(node); if (nodeData) { - ENTROPY_LOG_DEBUG_CAT("Concurrency", "WorkGraph: Node transitioning to Executing, current state: " + std::to_string(static_cast(nodeData->state.load()))); + ENTROPY_LOG_DEBUG_CAT("Concurrency", "WorkGraph: Node transitioning to Executing, current state: " + + std::to_string(static_cast(nodeData->state.load()))); } else { ENTROPY_LOG_DEBUG_CAT("Concurrency", "WorkGraph: Node transitioning to Executing"); } @@ -119,24 +114,23 @@ WorkGraph::WorkGraph(WorkContractGroup* workContractGroup, const WorkGraphConfig if (nodeData) { // Prevent double-processing bool expected = false; - if (nodeData->completionProcessed.compare_exchange_strong(expected, true, - std::memory_order_acq_rel)) { + if (nodeData->completionProcessed.compare_exchange_strong(expected, true, std::memory_order_acq_rel)) { // Transition to failed state _stateManager->transitionState(node, nodeData->state.load(), NodeState::Failed); - + // Increment dropped count and decrement pending count _droppedNodes.fetch_add(1, std::memory_order_relaxed); uint32_t pending = _pendingNodes.fetch_sub(1, std::memory_order_acq_rel) - 1; - + // Cancel all dependent nodes (like a failed node would) cancelDependents(node); - + // If all nodes are "done" (completed/failed/dropped), notify waiters if (pending == 0) { std::lock_guard lock(_waitMutex); _waitCondition.notify_all(); } - + ENTROPY_LOG_ERROR_CAT("WorkGraph", "Node dropped due to deferred queue overflow!"); } } @@ -161,7 +155,7 @@ WorkGraph::WorkGraph(WorkContractGroup* workContractGroup, const WorkGraphConfig } }; _scheduler->setCallbacks(callbacks); - + // Register callback for when contract capacity becomes available // This allows us to process deferred nodes at the right time // We process multiple rounds to keep the pipeline full @@ -175,25 +169,25 @@ WorkGraph::WorkGraph(WorkContractGroup* workContractGroup, const WorkGraphConfig // First, check timed deferrals - wake up any timers/delayed work that's ready size_t timedProcessed = _scheduler->processTimedDeferredNodes(); if (timedProcessed > 0 && _config.enableDebugLogging) { - ENTROPY_LOG_DEBUG_CAT("Concurrency", "WorkGraph: Processed " + std::to_string(timedProcessed) + " timed deferred nodes"); + ENTROPY_LOG_DEBUG_CAT( + "Concurrency", "WorkGraph: Processed " + std::to_string(timedProcessed) + " timed deferred nodes"); } // Then process regular deferred nodes multiple times to fill capacity // This is important when we have many deferred nodes for (size_t i = 0; i < _config.maxDeferredProcessingIterations; i++) { size_t processed = _scheduler->processDeferredNodes(); - if (processed == 0) break; // No more capacity or no more deferred nodes + if (processed == 0) break; // No more capacity or no more deferred nodes if (_config.enableDebugLogging) { - ENTROPY_LOG_DEBUG_CAT("Concurrency", "WorkGraph: Capacity callback processed " + std::to_string(processed) + " deferred nodes"); + ENTROPY_LOG_DEBUG_CAT("Concurrency", "WorkGraph: Capacity callback processed " + + std::to_string(processed) + " deferred nodes"); } } } }); // Set up timed deferral callback to avoid dynamic_cast in WorkService - _workContractGroup->setTimedDeferralCallback([this]() { - return checkTimedDeferrals(); - }); + _workContractGroup->setTimedDeferralCallback([this]() { return checkTimedDeferrals(); }); // Register with debug system (can be disabled via config) if (_config.enableDebugRegistration) { @@ -205,30 +199,30 @@ WorkGraph::WorkGraph(WorkContractGroup* workContractGroup, const WorkGraphConfig WorkGraph::~WorkGraph() { if (_config.enableDebugLogging) { - ENTROPY_LOG_DEBUG_CAT("Concurrency", "WorkGraph destructor starting, pending nodes: " + std::to_string(_pendingNodes.load())); + ENTROPY_LOG_DEBUG_CAT("Concurrency", + "WorkGraph destructor starting, pending nodes: " + std::to_string(_pendingNodes.load())); } - + // Set destroyed flag to prevent new callbacks _destroyed.store(true, std::memory_order_release); - + // Unregister callbacks from WorkContractGroup first // This prevents new callbacks from being scheduled if (_workContractGroup) { _workContractGroup->removeOnCapacityAvailable(_capacityCallbackIt); - _workContractGroup->setTimedDeferralCallback(nullptr); // Clear timed deferral callback + _workContractGroup->setTimedDeferralCallback(nullptr); // Clear timed deferral callback } - + // Wait for all active callbacks to complete if (_activeCallbacks.load(std::memory_order_acquire) > 0) { if (_config.enableDebugLogging) { - ENTROPY_LOG_DEBUG_CAT("Concurrency", "WorkGraph destructor waiting for callbacks: " + std::to_string(_activeCallbacks.load())); + ENTROPY_LOG_DEBUG_CAT("Concurrency", "WorkGraph destructor waiting for callbacks: " + + std::to_string(_activeCallbacks.load())); } std::unique_lock lock(_waitMutex); - _shutdownCondition.wait(lock, [this]() { - return _activeCallbacks.load(std::memory_order_acquire) == 0; - }); + _shutdownCondition.wait(lock, [this]() { return _activeCallbacks.load(std::memory_order_acquire) == 0; }); } - + // Now safe to proceed with cleanup // Unregister from debug system (if registered) if (_config.enableDebugRegistration) { @@ -238,31 +232,29 @@ WorkGraph::~WorkGraph() { } } -WorkGraph::NodeHandle WorkGraph::addNode(std::function work, - const std::string& name, - void* userData, - ExecutionType executionType) { +WorkGraph::NodeHandle WorkGraph::addNode(std::function work, const std::string& name, void* userData, + ExecutionType executionType) { std::unique_lock lock(_graphMutex); - + // Create node with the work and execution type WorkGraphNode node(std::move(work), name, executionType); node.userData = userData; - + // Add to graph and track as pending auto handle = _graph.addNode(std::move(node)); _pendingNodes.fetch_add(1, std::memory_order_relaxed); - + // Cache the handle for access later _nodeHandles.push_back(handle); - + // Register with state manager _stateManager->registerNode(handle, NodeState::Pending); - + // Publish event if enabled if (auto* eventBus = getEventBus()) { eventBus->publish(NodeAddedEvent(this, handle)); } - + // If execution has already started, check if this node can execute immediately if (_executionStarted.load(std::memory_order_acquire)) { auto* nodeData = _graph.getNodeData(handle); @@ -277,44 +269,40 @@ WorkGraph::NodeHandle WorkGraph::addNode(std::function work, } } } - + return handle; } -WorkGraph::NodeHandle WorkGraph::addYieldableNode(YieldableWorkFunction work, - const std::string& name, - void* userData, - ExecutionType executionType, - std::optional maxReschedules) { +WorkGraph::NodeHandle WorkGraph::addYieldableNode(YieldableWorkFunction work, const std::string& name, void* userData, + ExecutionType executionType, std::optional maxReschedules) { std::unique_lock lock(_graphMutex); - + // Create node with yieldable work function WorkGraphNode node(std::move(work), name, executionType); node.userData = userData; node.maxReschedules = maxReschedules; - + // Add to graph and track as pending auto handle = _graph.addNode(std::move(node)); _pendingNodes.fetch_add(1, std::memory_order_relaxed); - + // Cache the handle for access later _nodeHandles.push_back(handle); - + // Register with state manager _stateManager->registerNode(handle, NodeState::Pending); - + // Publish event if enabled if (auto* eventBus = getEventBus()) { eventBus->publish(NodeAddedEvent(this, handle)); } - + if (_config.enableDebugLogging) { - auto msg = std::format("Added yieldable node '{}' with max reschedules: {}", - name, - maxReschedules.has_value() ? std::to_string(*maxReschedules) : "unlimited"); + auto msg = std::format("Added yieldable node '{}' with max reschedules: {}", name, + maxReschedules.has_value() ? std::to_string(*maxReschedules) : "unlimited"); ENTROPY_LOG_DEBUG_CAT("WorkGraph", msg); } - + // If execution has already started, check if this node can execute immediately if (_executionStarted.load(std::memory_order_acquire)) { auto* nodeData = _graph.getNodeData(handle); @@ -329,7 +317,7 @@ WorkGraph::NodeHandle WorkGraph::addYieldableNode(YieldableWorkFunction work, } } } - + return handle; } @@ -354,7 +342,7 @@ void WorkGraph::reset() { if (_config.enableDebugLogging) { ENTROPY_LOG_DEBUG_CAT("Concurrency", "WorkGraph::reset() - resetting execution state for " + - std::to_string(_nodeHandles.size()) + " nodes"); + std::to_string(_nodeHandles.size()) + " nodes"); } // Reset execution flag @@ -406,8 +394,8 @@ void WorkGraph::clear() { std::unique_lock lock(_graphMutex); if (_config.enableDebugLogging) { - ENTROPY_LOG_DEBUG_CAT("Concurrency", "WorkGraph::clear() - removing all " + - std::to_string(_nodeHandles.size()) + " nodes"); + ENTROPY_LOG_DEBUG_CAT("Concurrency", + "WorkGraph::clear() - removing all " + std::to_string(_nodeHandles.size()) + " nodes"); } // Reset execution flag @@ -450,11 +438,12 @@ size_t WorkGraph::scheduleRoots() { size_t WorkGraph::scheduleRootsLocked() { // Assumes caller already holds a lock on _graphMutex size_t rootCount = 0; - + if (_config.enableDebugLogging) { - ENTROPY_LOG_DEBUG_CAT("Concurrency", "WorkGraph: Checking " + std::to_string(_nodeHandles.size()) + " nodes for roots"); + ENTROPY_LOG_DEBUG_CAT("Concurrency", + "WorkGraph: Checking " + std::to_string(_nodeHandles.size()) + " nodes for roots"); } - + // Check all cached handles to find roots (nodes ready to execute) size_t nodeIndex = 0; for (auto& handle : _nodeHandles) { @@ -463,7 +452,7 @@ size_t WorkGraph::scheduleRootsLocked() { } if (isHandleValid(handle)) { auto* nodeData = _graph.getNodeData(handle); - + // Check if this node is ready to execute (no pending dependencies) if (nodeData && nodeData->pendingDependencies.load() == 0) { if (_config.enableDebugLogging) { @@ -495,17 +484,18 @@ size_t WorkGraph::scheduleRootsLocked() { } } } - + if (_config.enableDebugLogging) { - ENTROPY_LOG_DEBUG_CAT("Concurrency", "WorkGraph: Root node scheduling complete, scheduled " + std::to_string(rootCount) + " roots"); + ENTROPY_LOG_DEBUG_CAT("Concurrency", "WorkGraph: Root node scheduling complete, scheduled " + + std::to_string(rootCount) + " roots"); } - + return rootCount; } void WorkGraph::suspend() { _suspended.store(true, std::memory_order_release); - + if (_config.enableDebugLogging) { ENTROPY_LOG_DEBUG_CAT("WorkGraph", "Graph suspended - no new nodes will be scheduled"); } @@ -513,20 +503,21 @@ void WorkGraph::suspend() { void WorkGraph::resume() { bool wasSuspended = _suspended.exchange(false, std::memory_order_acq_rel); - + if (wasSuspended) { if (_config.enableDebugLogging) { ENTROPY_LOG_DEBUG_CAT("WorkGraph", "Graph resumed - checking for ready nodes"); } - + // Process any deferred nodes that accumulated while suspended if (_scheduler) { size_t processed = _scheduler->processDeferredNodes(); if (processed > 0 && _config.enableDebugLogging) { - ENTROPY_LOG_DEBUG_CAT("WorkGraph", "Processed " + std::to_string(processed) + " deferred nodes after resume"); + ENTROPY_LOG_DEBUG_CAT("WorkGraph", + "Processed " + std::to_string(processed) + " deferred nodes after resume"); } } - + // Check if any nodes became ready while we were suspended and schedule them std::shared_lock lock(_graphMutex); for (const auto& handle : _nodeHandles) { @@ -545,37 +536,39 @@ void WorkGraph::execute() { if (_config.enableDebugLogging) { ENTROPY_LOG_INFO_CAT("Concurrency", "WorkGraph::execute() starting"); } - + // Need exclusive lock to prevent nodes being added during execution startup std::unique_lock lock(_graphMutex); - + bool expected = false; if (!_executionStarted.compare_exchange_strong(expected, true)) { throw std::runtime_error("WorkGraph execution already started"); } - + // Schedule all root nodes to start execution while holding the lock // This eliminates the race window between setting _executionStarted and scheduling roots size_t roots = scheduleRootsLocked(); - + if (_config.enableDebugLogging) { ENTROPY_LOG_DEBUG_CAT("Concurrency", "WorkGraph::execute() scheduled " + std::to_string(roots) + " root nodes"); if (_scheduler) { size_t deferred = _scheduler->getDeferredCount(); if (deferred > 0) { - ENTROPY_LOG_WARNING_CAT("Concurrency", "WorkGraph::execute() deferred " + std::to_string(deferred) + " nodes during startup"); + ENTROPY_LOG_WARNING_CAT("Concurrency", "WorkGraph::execute() deferred " + std::to_string(deferred) + + " nodes during startup"); } } } - + // Now safe to unlock lock.unlock(); - + // After unlocking, process any deferred nodes if (_scheduler) { size_t deferred = _scheduler->getDeferredCount(); if (_config.enableDebugLogging && deferred > 0) { - ENTROPY_LOG_DEBUG_CAT("Concurrency", "WorkGraph::execute() has " + std::to_string(deferred) + " deferred nodes after initial scheduling"); + ENTROPY_LOG_DEBUG_CAT("Concurrency", "WorkGraph::execute() has " + std::to_string(deferred) + + " deferred nodes after initial scheduling"); } if (deferred > 0) { if (_config.enableDebugLogging) { @@ -583,18 +576,19 @@ void WorkGraph::execute() { } size_t processed = _scheduler->processDeferredNodes(); if (_config.enableDebugLogging) { - ENTROPY_LOG_DEBUG_CAT("Concurrency", "WorkGraph::execute() processDeferredNodes returned " + std::to_string(processed)); + ENTROPY_LOG_DEBUG_CAT( + "Concurrency", "WorkGraph::execute() processDeferredNodes returned " + std::to_string(processed)); } } } - + if (roots == 0 && getPendingCount() > 0) { - auto msg = std::format("ERROR: No roots found. Pending count: {}, node count: {}", - getPendingCount(), _nodeHandles.size()); + auto msg = std::format("ERROR: No roots found. Pending count: {}, node count: {}", getPendingCount(), + _nodeHandles.size()); ENTROPY_LOG_ERROR_CAT("WorkGraph", msg); throw std::runtime_error("WorkGraph has no root nodes but has pending work - possible cycle?"); } - + if (_config.enableDebugLogging) { ENTROPY_LOG_DEBUG_CAT("Concurrency", "WorkGraph::execute() completed"); } @@ -609,7 +603,7 @@ bool WorkGraph::scheduleNode(NodeHandle node) { // Don't schedule while suspended return false; } - + if (_config.enableDebugLogging) { ENTROPY_LOG_DEBUG_CAT("Concurrency", "WorkGraph::scheduleNode() called"); } @@ -624,48 +618,47 @@ bool WorkGraph::scheduleNode(NodeHandle node) { void WorkGraph::onNodeComplete(NodeHandle node) { auto* nodeData = _graph.getNodeData(node); if (!nodeData) return; - + // Prevent double-processing using atomic flag bool expected = false; - if (!nodeData->completionProcessed.compare_exchange_strong(expected, true, - std::memory_order_acq_rel)) { + if (!nodeData->completionProcessed.compare_exchange_strong(expected, true, std::memory_order_acq_rel)) { if (_config.enableDebugLogging) { ENTROPY_LOG_WARNING_CAT("Concurrency", "WorkGraph: Node already processed completion"); } - return; // Already processed + return; // Already processed } - + // Transition state through state manager _stateManager->transitionState(node, NodeState::Executing, NodeState::Completed); - + // Update counters _completedNodes.fetch_add(1, std::memory_order_relaxed); uint32_t pending = _pendingNodes.fetch_sub(1, std::memory_order_acq_rel) - 1; - + // If all nodes are complete, notify waiters if (pending == 0) { std::lock_guard lock(_waitMutex); _waitCondition.notify_all(); } - + // Call completion callback if set if (_onNodeComplete) { _onNodeComplete(node); } - + // Schedule children whose dependencies are now satisfied // First, copy the children list while holding the lock (minimize critical section) std::vector children; { std::shared_lock lock(_graphMutex); children = this->getChildren(node); - } // Release lock immediately - + } // Release lock immediately + // Process children outside the lock to minimize contention for (auto& child : children) { auto* childData = _graph.getNodeData(child); if (!childData) continue; - + // Skip if child is cancelled if (childData->state.load(std::memory_order_acquire) == NodeState::Cancelled) { if (_config.enableDebugLogging) { @@ -673,13 +666,13 @@ void WorkGraph::onNodeComplete(NodeHandle node) { } continue; } - + // Decrement dependency count uint32_t remaining = childData->pendingDependencies.fetch_sub(1, std::memory_order_acq_rel) - 1; if (_config.enableDebugLogging) { ENTROPY_LOG_DEBUG_CAT("Concurrency", "WorkGraph: Child node dependencies decremented"); } - + // If all dependencies satisfied, try to transition to ready and schedule if (remaining == 0 && childData->failedParentCount.load(std::memory_order_acquire) == 0) { if (_config.enableDebugLogging) { @@ -701,8 +694,8 @@ void WorkGraph::onNodeComplete(NodeHandle node) { ENTROPY_LOG_DEBUG_CAT("Concurrency", "WorkGraph: Child node still has dependencies"); } } - - // Note: Processing of deferred nodes is now handled via the + + // Note: Processing of deferred nodes is now handled via the // onCapacityAvailable callback from WorkContractGroup, which // is called after contracts are actually freed. } @@ -711,7 +704,7 @@ WorkGraph::WaitResult WorkGraph::wait() { if (_config.enableDebugLogging) { ENTROPY_LOG_DEBUG_CAT("Concurrency", "WorkGraph::wait() called"); } - + // Check if already complete before waiting if (_pendingNodes.load(std::memory_order_acquire) == 0) { if (_config.enableDebugLogging) { @@ -725,47 +718,48 @@ WorkGraph::WaitResult WorkGraph::wait() { result.allCompleted = (result.failedCount == 0 && result.droppedCount == 0); return result; } - + // Use condition variable for waiting instead of busy-wait std::unique_lock lock(_waitMutex); if (_config.enableDebugLogging) { - ENTROPY_LOG_DEBUG_CAT("Concurrency", "WorkGraph::wait() waiting for pending nodes: " + std::to_string(_pendingNodes.load())); + ENTROPY_LOG_DEBUG_CAT("Concurrency", + "WorkGraph::wait() waiting for pending nodes: " + std::to_string(_pendingNodes.load())); } _waitCondition.wait(lock, [this]() { auto pending = _pendingNodes.load(std::memory_order_acquire); if (_config.enableDebugLogging && pending > 0) { - ENTROPY_LOG_DEBUG_CAT("Concurrency", "WorkGraph::wait() still waiting, pending: " + std::to_string(pending)); + ENTROPY_LOG_DEBUG_CAT("Concurrency", + "WorkGraph::wait() still waiting, pending: " + std::to_string(pending)); } return pending == 0; }); - + // Prepare result WaitResult result; result.completedCount = _completedNodes.load(std::memory_order_acquire); result.failedCount = _failedNodes.load(std::memory_order_acquire); result.droppedCount = _droppedNodes.load(std::memory_order_acquire); result.allCompleted = (result.failedCount == 0 && result.droppedCount == 0); - + // Log warning if nodes were dropped if (result.droppedCount > 0) { - auto msg = std::format("WorkGraph::wait() - {} nodes were dropped due to deferred queue overflow!", result.droppedCount); + auto msg = std::format("WorkGraph::wait() - {} nodes were dropped due to deferred queue overflow!", + result.droppedCount); ENTROPY_LOG_WARNING_CAT("WorkGraph", msg); } - + return result; } - bool WorkGraph::isComplete() const { auto pending = _pendingNodes.load(std::memory_order_acquire); if (_config.enableDebugLogging && pending > 0) { auto completed = _completedNodes.load(std::memory_order_acquire); auto failed = _failedNodes.load(std::memory_order_acquire); auto dropped = _droppedNodes.load(std::memory_order_acquire); - ENTROPY_LOG_DEBUG_CAT("Concurrency", "WorkGraph::isComplete() - pending: " + std::to_string(pending) + - ", completed: " + std::to_string(completed) + - ", failed: " + std::to_string(failed) + - ", dropped: " + std::to_string(dropped)); + ENTROPY_LOG_DEBUG_CAT("Concurrency", "WorkGraph::isComplete() - pending: " + std::to_string(pending) + + ", completed: " + std::to_string(completed) + ", failed: " + + std::to_string(failed) + ", dropped: " + std::to_string(dropped)); } return pending == 0; } @@ -786,48 +780,44 @@ size_t WorkGraph::checkTimedDeferrals() { return 0; } -WorkGraph::NodeHandle WorkGraph::addContinuation(const std::vector& parents, - std::function work, - const std::string& name, - ExecutionType executionType) { +WorkGraph::NodeHandle WorkGraph::addContinuation(const std::vector& parents, std::function work, + const std::string& name, ExecutionType executionType) { // Create the continuation node with specified execution type auto continuation = addNode(std::move(work), name, nullptr, executionType); - + // Add dependencies from all parents for (const auto& parent : parents) { addDependency(parent, continuation); } - + return continuation; } void WorkGraph::onNodeFailed(NodeHandle node) { auto* nodeData = _graph.getNodeData(node); if (!nodeData) return; - + // Prevent double-processing bool expected = false; - if (!nodeData->completionProcessed.compare_exchange_strong(expected, true, - std::memory_order_acq_rel)) { - return; // Already processed + if (!nodeData->completionProcessed.compare_exchange_strong(expected, true, std::memory_order_acq_rel)) { + return; // Already processed } - + // Transition state through state manager _stateManager->transitionState(node, NodeState::Executing, NodeState::Failed); - + // Update counters _failedNodes.fetch_add(1, std::memory_order_relaxed); uint32_t pending = _pendingNodes.fetch_sub(1, std::memory_order_acq_rel) - 1; - + // If all nodes are complete, notify waiters if (pending == 0) { std::lock_guard lock(_waitMutex); _waitCondition.notify_all(); } - + // Cancel all dependent nodes cancelDependents(node); - } void WorkGraph::onNodeYielded(NodeHandle node) { @@ -838,16 +828,15 @@ void WorkGraph::onNodeYielded(NodeHandle node) { uint32_t rescheduleCount = nodeData->rescheduleCount.fetch_add(1, std::memory_order_relaxed); if (_config.enableDebugLogging) { - auto msg = std::format("Node '{}' yielded (reschedule count: {})", - nodeData->name, rescheduleCount + 1); + auto msg = std::format("Node '{}' yielded (reschedule count: {})", nodeData->name, rescheduleCount + 1); ENTROPY_LOG_DEBUG_CAT("WorkGraph", msg); } // Check reschedule limit if (nodeData->maxReschedules && rescheduleCount >= *nodeData->maxReschedules) { if (_config.enableDebugLogging) { - auto msg = std::format("Node '{}' reached max reschedule limit ({}), completing", - nodeData->name, *nodeData->maxReschedules); + auto msg = std::format("Node '{}' reached max reschedule limit ({}), completing", nodeData->name, + *nodeData->maxReschedules); ENTROPY_LOG_WARNING_CAT("WorkGraph", msg); } @@ -875,16 +864,16 @@ void WorkGraph::onNodeYieldedUntil(NodeHandle node, std::chrono::steady_clock::t if (_config.enableDebugLogging) { auto now = std::chrono::steady_clock::now(); auto delay = std::chrono::duration_cast(wakeTime - now); - auto msg = std::format("Node '{}' yielded until wake time (delay: {}ms, reschedule count: {})", - nodeData->name, delay.count(), rescheduleCount + 1); + auto msg = std::format("Node '{}' yielded until wake time (delay: {}ms, reschedule count: {})", nodeData->name, + delay.count(), rescheduleCount + 1); ENTROPY_LOG_DEBUG_CAT("WorkGraph", msg); } // Check reschedule limit if (nodeData->maxReschedules && rescheduleCount >= *nodeData->maxReschedules) { if (_config.enableDebugLogging) { - auto msg = std::format("Node '{}' reached max reschedule limit ({}), completing", - nodeData->name, *nodeData->maxReschedules); + auto msg = std::format("Node '{}' reached max reschedule limit ({}), completing", nodeData->name, + *nodeData->maxReschedules); ENTROPY_LOG_WARNING_CAT("WorkGraph", msg); } @@ -907,15 +896,15 @@ void WorkGraph::onNodeYieldedUntil(NodeHandle node, std::chrono::steady_clock::t void WorkGraph::rescheduleYieldedNode(NodeHandle node) { auto* nodeData = _graph.getNodeData(node); if (!nodeData) return; - + if (_config.enableDebugLogging) { auto msg = std::format("Rescheduling yielded node '{}'", nodeData->name); ENTROPY_LOG_DEBUG_CAT("WorkGraph", msg); } - + // Clear the completion processed flag so it can run again nodeData->completionProcessed.store(false, std::memory_order_release); - + // Transition from Yielded to Ready if (_stateManager) { if (_stateManager->transitionState(node, NodeState::Yielded, NodeState::Ready)) { @@ -926,7 +915,7 @@ void WorkGraph::rescheduleYieldedNode(NodeHandle node) { } return; } - + // Transition to scheduled if (_stateManager->transitionState(node, NodeState::Ready, NodeState::Scheduled)) { // Schedule with the scheduler @@ -944,47 +933,46 @@ void WorkGraph::rescheduleYieldedNode(NodeHandle node) { void WorkGraph::onNodeCancelled(NodeHandle node) { auto* nodeData = _graph.getNodeData(node); if (!nodeData) return; - + // Prevent double-processing bool expected = false; - if (!nodeData->completionProcessed.compare_exchange_strong(expected, true, - std::memory_order_acq_rel)) { - return; // Already processed + if (!nodeData->completionProcessed.compare_exchange_strong(expected, true, std::memory_order_acq_rel)) { + return; // Already processed } - + // Transition state through state manager (from whatever state to cancelled) NodeState currentState = nodeData->state.load(std::memory_order_acquire); _stateManager->transitionState(node, currentState, NodeState::Cancelled); - + // CRITICAL FIX: Decrement pending count for cancelled nodes uint32_t pending = _pendingNodes.fetch_sub(1, std::memory_order_acq_rel) - 1; - + // If all nodes are complete, notify waiters if (pending == 0) { std::lock_guard lock(_waitMutex); _waitCondition.notify_all(); } - + // Cancel all dependent nodes cancelDependents(node); } void WorkGraph::cancelDependents(NodeHandle failedNode) { std::vector nodesToCancel; - + { std::shared_lock lock(_graphMutex); - + // Get all children of the failed node auto children = this->getChildren(failedNode); - + for (auto& child : children) { auto* childData = _graph.getNodeData(child); if (!childData) continue; - + // Increment failed parent count childData->failedParentCount.fetch_add(1, std::memory_order_acq_rel); - + // If not already in terminal state, add to cancellation list NodeState childState = childData->state.load(std::memory_order_acquire); if (!isTerminalState(childState)) { @@ -992,7 +980,7 @@ void WorkGraph::cancelDependents(NodeHandle failedNode) { } } } - + // Cancel nodes outside the lock for (auto& node : nodesToCancel) { if (_config.enableDebugLogging) { @@ -1007,12 +995,12 @@ Core::EventBus* WorkGraph::getEventBus() { if (_config.enableEvents && !_eventBus && !_config.sharedEventBus) { _eventBus = std::make_unique(); } - + // Return shared event bus if configured if (_config.sharedEventBus) { return _config.sharedEventBus.get(); } - + return _eventBus.get(); } @@ -1022,6 +1010,6 @@ WorkGraphStats::Snapshot WorkGraph::getStats() const { return stats.toSnapshot(); } -} // namespace Concurrency -} // namespace Core -} // namespace EntropyEngine \ No newline at end of file +} // namespace Concurrency +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Concurrency/WorkGraph.h b/src/Concurrency/WorkGraph.h index 5fa511f..1bdc655 100644 --- a/src/Concurrency/WorkGraph.h +++ b/src/Concurrency/WorkGraph.h @@ -10,7 +10,7 @@ /** * @file WorkGraph.h * @brief Dependency-based work execution system with automatic scheduling - * + * * This file contains the WorkGraph class, which manages work nodes with dependency * relationships. It automatically schedules work when dependencies are satisfied * and provides synchronization primitives for complex parallel workflows. @@ -18,1012 +18,1017 @@ #pragma once -#include -#include -#include #include +#include +#include #include #include +#include #include -#include +#include #include -#include +#include + +#include "../Core/EventBus.h" +#include "../Debug/Debug.h" +#include "../Graph/AcyclicNodeHandle.h" +#include "../Graph/DirectedAcyclicGraph.h" #include "WorkContractGroup.h" #include "WorkContractHandle.h" -#include "../Graph/DirectedAcyclicGraph.h" -#include "../Graph/AcyclicNodeHandle.h" -#include "../Debug/Debug.h" #include "WorkGraphTypes.h" -#include "../Core/EventBus.h" -#include -namespace EntropyEngine { -namespace Core { -namespace Concurrency { +namespace EntropyEngine +{ +namespace Core +{ +namespace Concurrency +{ - // Forward declarations - class NodeStateManager; - class NodeScheduler; +// Forward declarations +class NodeStateManager; +class NodeScheduler; - /** - * @brief The atomic unit of work in a dependency graph with self-managing execution timing - * - * WorkGraphNode represents a single task within a dependency graph. Each node encapsulates - * a work function along with the necessary state management to ensure execution occurs - * precisely when all dependencies are satisfied. The node maintains complete awareness - * of its position within the execution hierarchy. - * - * The node tracks: - * - Its current state (pending, executing, completed, yielded, etc.) - * - How many parents need to finish before it can run - * - Whether any parent failed (so it should be cancelled) - * - Its work contract handle for execution - * - Reschedule count for yieldable nodes - * - * All state transitions use atomic operations to ensure thread-safe updates when multiple - * parent nodes complete concurrently. This design enables safe concurrent dependency - * count decrements without locks. - * - * @code - * // Nodes are created internally by WorkGraph::addNode() - * // You interact with them through NodeHandle, not directly - * auto node = graph.addNode([]{ - * processData(); - * }, "data-processor"); - * - * // Or create a yieldable node - * auto yieldNode = graph.addYieldableNode([]() -> WorkResultContext { - * if (!ready()) return WorkResultContext::yield(); - * process(); - * return WorkResultContext::complete(); - * }, "yielder"); - * @endcode - */ - struct WorkGraphNode { - /// Atomic state management (replaces completed/cancelled flags) - std::atomic state{NodeState::Pending}; - - /// Work function to execute - now supports both void and WorkResult returns - std::variant, YieldableWorkFunction> work; - - /// Handle to the work contract (when scheduled) - WorkContractHandle handle; - - /// Number of uncompleted dependencies - std::atomic pendingDependencies{0}; - - /// Number of failed parents (for optimized parent checking) - std::atomic failedParentCount{0}; - - /// Track if completion has been processed to prevent double processing - std::atomic completionProcessed{false}; - - /// Debug name for the node - std::string name; - - /// User data pointer for custom context - void* userData = nullptr; - - /// Execution type for this node (main thread or any thread) - ExecutionType executionType = ExecutionType::AnyThread; - - /// Reschedule tracking for yieldable nodes - std::atomic rescheduleCount{0}; - - /// Optional maximum reschedule limit - std::optional maxReschedules; - - /// Is this a yieldable node? - bool isYieldable = false; - - WorkGraphNode() = default; - - // Constructor for legacy void() work functions - WorkGraphNode(std::function w, const std::string& n, ExecutionType execType = ExecutionType::AnyThread) - : work(std::move(w)), name(n), executionType(execType), isYieldable(false) {} - - // Constructor for yieldable work functions - WorkGraphNode(YieldableWorkFunction w, const std::string& n, ExecutionType execType = ExecutionType::AnyThread) - : work(std::move(w)), name(n), executionType(execType), isYieldable(true) {} - - // Move constructor - WorkGraphNode(WorkGraphNode&& other) noexcept - : state(other.state.load()) - , work(std::move(other.work)) - , handle(std::move(other.handle)) - , pendingDependencies(other.pendingDependencies.load()) - , failedParentCount(other.failedParentCount.load()) - , completionProcessed(other.completionProcessed.load()) - , name(std::move(other.name)) - , userData(other.userData) - , executionType(other.executionType) - , rescheduleCount(other.rescheduleCount.load()) - , maxReschedules(other.maxReschedules) - , isYieldable(other.isYieldable) { +/** + * @brief The atomic unit of work in a dependency graph with self-managing execution timing + * + * WorkGraphNode represents a single task within a dependency graph. Each node encapsulates + * a work function along with the necessary state management to ensure execution occurs + * precisely when all dependencies are satisfied. The node maintains complete awareness + * of its position within the execution hierarchy. + * + * The node tracks: + * - Its current state (pending, executing, completed, yielded, etc.) + * - How many parents need to finish before it can run + * - Whether any parent failed (so it should be cancelled) + * - Its work contract handle for execution + * - Reschedule count for yieldable nodes + * + * All state transitions use atomic operations to ensure thread-safe updates when multiple + * parent nodes complete concurrently. This design enables safe concurrent dependency + * count decrements without locks. + * + * @code + * // Nodes are created internally by WorkGraph::addNode() + * // You interact with them through NodeHandle, not directly + * auto node = graph.addNode([]{ + * processData(); + * }, "data-processor"); + * + * // Or create a yieldable node + * auto yieldNode = graph.addYieldableNode([]() -> WorkResultContext { + * if (!ready()) return WorkResultContext::yield(); + * process(); + * return WorkResultContext::complete(); + * }, "yielder"); + * @endcode + */ +struct WorkGraphNode +{ + /// Atomic state management (replaces completed/cancelled flags) + std::atomic state{NodeState::Pending}; + + /// Work function to execute - now supports both void and WorkResult returns + std::variant, YieldableWorkFunction> work; + + /// Handle to the work contract (when scheduled) + WorkContractHandle handle; + + /// Number of uncompleted dependencies + std::atomic pendingDependencies{0}; + + /// Number of failed parents (for optimized parent checking) + std::atomic failedParentCount{0}; + + /// Track if completion has been processed to prevent double processing + std::atomic completionProcessed{false}; + + /// Debug name for the node + std::string name; + + /// User data pointer for custom context + void* userData = nullptr; + + /// Execution type for this node (main thread or any thread) + ExecutionType executionType = ExecutionType::AnyThread; + + /// Reschedule tracking for yieldable nodes + std::atomic rescheduleCount{0}; + + /// Optional maximum reschedule limit + std::optional maxReschedules; + + /// Is this a yieldable node? + bool isYieldable = false; + + WorkGraphNode() = default; + + // Constructor for legacy void() work functions + WorkGraphNode(std::function w, const std::string& n, ExecutionType execType = ExecutionType::AnyThread) + : work(std::move(w)), name(n), executionType(execType), isYieldable(false) {} + + // Constructor for yieldable work functions + WorkGraphNode(YieldableWorkFunction w, const std::string& n, ExecutionType execType = ExecutionType::AnyThread) + : work(std::move(w)), name(n), executionType(execType), isYieldable(true) {} + + // Move constructor + WorkGraphNode(WorkGraphNode&& other) noexcept + : state(other.state.load()), + work(std::move(other.work)), + handle(std::move(other.handle)), + pendingDependencies(other.pendingDependencies.load()), + failedParentCount(other.failedParentCount.load()), + completionProcessed(other.completionProcessed.load()), + name(std::move(other.name)), + userData(other.userData), + executionType(other.executionType), + rescheduleCount(other.rescheduleCount.load()), + maxReschedules(other.maxReschedules), + isYieldable(other.isYieldable) { + other.userData = nullptr; + } + + // Move assignment + WorkGraphNode& operator=(WorkGraphNode&& other) noexcept { + if (this != &other) { + state.store(other.state.load()); + work = std::move(other.work); + handle = std::move(other.handle); + pendingDependencies.store(other.pendingDependencies.load()); + failedParentCount.store(other.failedParentCount.load()); + completionProcessed.store(other.completionProcessed.load()); + name = std::move(other.name); + userData = other.userData; + executionType = other.executionType; + rescheduleCount.store(other.rescheduleCount.load()); + maxReschedules = other.maxReschedules; + isYieldable = other.isYieldable; other.userData = nullptr; } - - // Move assignment - WorkGraphNode& operator=(WorkGraphNode&& other) noexcept { - if (this != &other) { - state.store(other.state.load()); - work = std::move(other.work); - handle = std::move(other.handle); - pendingDependencies.store(other.pendingDependencies.load()); - failedParentCount.store(other.failedParentCount.load()); - completionProcessed.store(other.completionProcessed.load()); - name = std::move(other.name); - userData = other.userData; - executionType = other.executionType; - rescheduleCount.store(other.rescheduleCount.load()); - maxReschedules = other.maxReschedules; - isYieldable = other.isYieldable; - other.userData = nullptr; + return *this; + } + + // Delete copy operations + WorkGraphNode(const WorkGraphNode&) = delete; + WorkGraphNode& operator=(const WorkGraphNode&) = delete; +}; + +/** + * @brief Orchestrates complex parallel workflows with automatic dependency management + * + * WorkGraph provides high-level workflow management for parallel task execution. It accepts + * task definitions with dependency relationships and automatically determines optimal + * execution order. As tasks complete, the system triggers dependent tasks in cascade, + * ensuring correct execution flow through complex dependency chains. + * + * Bridges the gap between low-level work execution (WorkContractGroup) and + * high-level workflow requirements. While WorkContractGroup provides raw execution + * capabilities, WorkGraph adds intelligent scheduling based on dependency relationships. + * + * Key features: + * - Automatic dependency resolution without manual scheduling + * - Dynamic graph construction during execution + * - Failure propagation to cancel dependent tasks + * - Thread-safe operations for concurrent modifications + * - Zero-copy integration with existing WorkContractGroup + * - Main thread execution support for UI and render operations + * + * Common applications: + * - Build systems (compile → link → package) + * - Data pipelines (load → transform → analyze → save) + * - Game asset processing (texture → compress → pack) + * - Mixed UI/background workflows (process → update UI → save) + * + * Complexity characteristics: + * - Dependency tracking: O(1) atomic operations + * - Completion cascade: O(children) per completed node + * + * @code + * // Mixed execution pipeline with UI updates + * WorkContractGroup group(1024); + * WorkService service(2); // 2 worker threads + * service.addWorkContractGroup(&group); + * + * WorkGraph graph(&group); + * + * // Background data processing + * auto load = graph.addNode([]{ + * auto data = loadFromDisk(); + * processData(data); + * }, "loader"); + * + * // Main thread UI update + * auto updateUI = graph.addNode([]{ + * progressBar.setValue(50); + * statusLabel.setText("Processing..."); + * }, "ui-update", nullptr, ExecutionType::MainThread); + * + * // More background work + * auto save = graph.addNode([]{ + * auto data = getProcessedData(); + * saveToDisk(data); + * }, "saver"); + * + * // Wire up dependencies - UI update after load, save after UI + * graph.addDependency(load, updateUI); + * graph.addDependency(updateUI, save); + * + * // Start execution + * graph.execute(); + * service.start(); + * + * // Main thread pump (in your event loop) + * while (!graph.isComplete()) { + * // Process main thread work + * group.executeMainThreadWork(5); // Max 5 per frame + * + * // Handle UI events + * processEvents(); + * renderFrame(); + * } + * @endcode + */ +class WorkGraph : public Debug::Named +{ +private: + /// RAII guard for tracking active callbacks + struct CallbackGuard + { + WorkGraph* graph; + CallbackGuard(WorkGraph* g) : graph(g) { + graph->_activeCallbacks.fetch_add(1, std::memory_order_acq_rel); + } + ~CallbackGuard() { + if (graph->_activeCallbacks.fetch_sub(1, std::memory_order_acq_rel) == 1) { + std::lock_guard lock(graph->_waitMutex); + graph->_shutdownCondition.notify_all(); } - return *this; } - - // Delete copy operations - WorkGraphNode(const WorkGraphNode&) = delete; - WorkGraphNode& operator=(const WorkGraphNode&) = delete; }; + Graph::DirectedAcyclicGraph _graph; ///< Internal DAG implementation + + WorkContractGroup* _workContractGroup; ///< External work executor + + WorkContractGroup::CapacityCallback _capacityCallbackIt; ///< Capacity callback handle + + WorkGraphConfig _config; ///< Graph configuration + + std::unique_ptr _eventBus; ///< Event system + std::unique_ptr _stateManager; ///< State tracker + std::unique_ptr _scheduler; ///< Node scheduler + + mutable std::shared_mutex _graphMutex; ///< Graph structure protection + + std::atomic _executionStarted{false}; ///< Execution started flag + + std::atomic _pendingNodes{0}; ///< Pending node count + + std::atomic _droppedNodes{0}; ///< Dropped node count + + std::atomic _completedNodes{0}; ///< Completed node count + + std::atomic _failedNodes{0}; ///< Failed node count + + /// Cache of valid node handles for efficient access + std::vector> _nodeHandles; ///< Pre-allocated for performance + + /// Callback for node completion (for testing/debugging) + std::function)> _onNodeComplete; ///< Optional completion hook + + /// Synchronization for wait() - more efficient than busy-waiting + mutable std::mutex _waitMutex; ///< Protects wait condition + mutable std::condition_variable _waitCondition; ///< Signaled when nodes complete + + /// Safety flag to prevent callbacks after destruction + mutable std::atomic _destroyed{false}; ///< Set true in destructor + + /// Number of active callbacks (for safe destruction) + mutable std::atomic _activeCallbacks{0}; ///< Tracked by CallbackGuard + + /// Condition variable for waiting on active callbacks + mutable std::condition_variable _shutdownCondition; ///< Destructor waits on this + + /// Suspension state - prevents scheduling new nodes + std::atomic _suspended{false}; ///< True when graph is suspended + +public: + using NodeHandle = Graph::AcyclicNodeHandle; + + /** + * @brief What you get back from wait() - the final score of your graph execution + * + * This tells you how your workflow went: did everything finish? Did some tasks + * fail? Were any dropped due to capacity issues? It's like a report card for + * your parallel execution. + * + * @code + * auto result = graph.wait(); + * if (result.allCompleted) { + * LOG_INFO("Perfect run! All {} nodes completed", result.completedCount); + * } else { + * LOG_WARN("Issues detected: {} failed, {} dropped", + * result.failedCount, result.droppedCount); + * } + * @endcode + */ + struct WaitResult + { + bool allCompleted = false; ///< True only if every single node succeeded + uint32_t droppedCount = 0; ///< Nodes we couldn't schedule (queue overflow) + uint32_t failedCount = 0; ///< Nodes that threw exceptions + uint32_t completedCount = 0; ///< Nodes that ran successfully + }; + + /** + * @brief Creates a work graph backed by your thread pool + * + * The graph doesn't own the WorkContractGroup - just uses it to schedule work. + * Multiple graphs can share the same thread pool. + * + * @param workContractGroup Your thread pool for executing work (must outlive the graph) + * + * @code + * // Typical setup + * WorkContractGroup threadPool(1024); // Shared thread pool + * WorkGraph pipeline1(&threadPool); // Asset processing pipeline + * WorkGraph pipeline2(&threadPool); // Data analysis pipeline + * // Both pipelines share the same threads! + * @endcode + */ + explicit WorkGraph(WorkContractGroup* workContractGroup); + + /** + * @brief Creates a work graph with custom behavior options + * + * Use for advanced features like events, state management, or custom allocation. + * + * @param workContractGroup Your thread pool for executing work + * @param config Tuning knobs and feature flags + * + * @code + * // Example: Graph with event notifications for monitoring + * WorkGraphConfig config; + * config.enableEvents = true; + * config.expectedNodeCount = 1000; // Pre-allocate for performance + * config.maxDeferredNodes = 100; // Limit queue size + * + * WorkGraph monitoredGraph(&threadPool, config); + * + * // Now you can subscribe to events + * monitoredGraph.getEventBus()->subscribe( + * [](const NodeCompletedEvent& e) { + * LOG_INFO("Node completed in {}ms", + * chrono::duration_cast(e.executionTime).count()); + * }); + * @endcode + */ + WorkGraph(WorkContractGroup* workContractGroup, const WorkGraphConfig& config); + + /** + * @brief Cleans up the graph and ensures all callbacks complete + * + * Waits for active callbacks before destroying. Safe to destroy with pending + * work - it continues executing in the WorkContractGroup. + */ + ~WorkGraph(); + + /** + * @brief Adds a yieldable task that can suspend and resume execution + * + * Creates a node that can yield control back to the scheduler and be + * rescheduled later. Perfect for polling operations, staged processing, + * or any task that needs to wait without blocking a thread. + * + * @param work Yieldable function returning WorkResult + * @param name Human-readable name for debugging + * @param userData Your own context pointer + * @param executionType Where to run: AnyThread or MainThread + * @param maxReschedules Optional limit on reschedules (prevent infinite loops) + * @return Handle to reference this node + * + * @code + * // Polling task that yields until ready + * auto poller = graph.addYieldableNode([]() -> WorkResultContext { + * if (!dataReady()) { + * return WorkResultContext::yield(); // Try again later + * } + * processData(); + * return WorkResultContext::complete(); + * }, "data-poller"); + * + * // Staged processing with yield between stages + * int stage = 0; + * auto staged = graph.addYieldableNode([&stage]() -> WorkResultContext { + * switch (stage++) { + * case 0: doStage1(); return WorkResultContext::yield(); + * case 1: doStage2(); return WorkResultContext::yield(); + * case 2: doStage3(); return WorkResultContext::complete(); + * default: return WorkResultContext::complete(); + * } + * }, "staged-processor", nullptr, ExecutionType::AnyThread, 10); + * @endcode + */ + NodeHandle addYieldableNode(YieldableWorkFunction work, const std::string& name = "", void* userData = nullptr, + ExecutionType executionType = ExecutionType::AnyThread, + std::optional maxReschedules = std::nullopt); + + /** + * @brief Adds a task to your workflow - it won't run until its time comes + * + * Creates a node that waits for dependencies before running. Thread-safe - + * can add nodes while graph executes. Use ExecutionType::MainThread for + * UI updates or other main-thread-only operations. + * + * @param work Your task - lambda, function, or any callable + * @param name Human-readable name for debugging + * @param userData Your own context pointer + * @param executionType Where to run: AnyThread (worker pool) or MainThread + * @return Handle to reference this node + * + * @code + * // Simple task + * auto task = graph.addNode([]{ + * doSomeWork(); + * }, "worker-1"); + * + * // Main thread task + * auto uiUpdate = graph.addNode([]{ + * updateUI(); + * }, "ui-updater", nullptr, ExecutionType::MainThread); + * + * // Task with captures + * std::string filename = "data.txt"; + * auto loader = graph.addNode([filename]{ + * loadFile(filename); + * }, "file-loader"); + * + * // Task with user data + * auto* context = new ProcessContext(); + * auto processor = graph.addNode( + * [context]{ context->process(); }, + * "processor", + * context // Attach as user data + * ); + * @endcode + */ + NodeHandle addNode(std::function work, const std::string& name = "", void* userData = nullptr, + ExecutionType executionType = ExecutionType::AnyThread); + + /** + * @brief Wire up your workflow - tell nodes who they're waiting for + * + * Defines execution order: "to" waits for "from" to finish. If "from" fails, + * "to" is cancelled. Prevents cycles. Thread-safe. + * + * @param from The prerequisite task + * @param to The dependent task + * @throws std::invalid_argument if this would create a cycle + * @throws std::runtime_error if nodes invalid or completed + * + * @code + * // Linear pipeline: A → B → C + * auto A = graph.addNode([]{ stepA(); }, "A"); + * auto B = graph.addNode([]{ stepB(); }, "B"); + * auto C = graph.addNode([]{ stepC(); }, "C"); + * graph.addDependency(A, B); // B waits for A + * graph.addDependency(B, C); // C waits for B + * + * // Fan-out: A → {B, C, D} + * auto A = graph.addNode([]{ generateData(); }, "generator"); + * auto B = graph.addNode([]{ process1(); }, "proc1"); + * auto C = graph.addNode([]{ process2(); }, "proc2"); + * auto D = graph.addNode([]{ process3(); }, "proc3"); + * graph.addDependency(A, B); // All three process + * graph.addDependency(A, C); // the same data + * graph.addDependency(A, D); // in parallel + * + * // Fan-in: {A, B, C} → D + * auto D = graph.addNode([]{ mergeResults(); }, "merger"); + * graph.addDependency(A, D); // D waits for + * graph.addDependency(B, D); // all three + * graph.addDependency(C, D); // to complete + * @endcode + */ + void addDependency(NodeHandle from, NodeHandle to); + + /** + * @brief Resets execution state so the graph can be re-executed + * + * Keeps all nodes and dependencies intact, but resets: + * - Execution started flag + * - Pending/completed/failed/dropped counters + * - Node states back to Pending + * - Node completion processed flags + * - Pending dependency counts (restored from edge structure) + * + * After reset(), you can call execute() again to re-run the same workflow. + * This is much faster than destroying and recreating the graph. + * + * @note Not thread-safe with concurrent execute()/wait() calls + * + * @code + * // Reusable graph pattern + * WorkGraph graph(&threadPool); + * auto nodeA = graph.addNode([&data]{ process(data); }, "processor"); + * + * for (int frame = 0; frame < 100; ++frame) { + * updateData(data); // Update what the node operates on + * graph.reset(); // Reset execution state + * graph.execute(); // Re-run the workflow + * graph.wait(); + * } + * @endcode + */ + void reset(); + + /** + * @brief Removes all nodes and dependencies from the graph + * + * Clears the entire graph structure. After clear(), the graph is empty + * and ready for new nodes to be added. Use when the workflow structure + * changes (different number of nodes, different dependencies). + * + * @note Not thread-safe with concurrent operations + * + * @code + * // Dynamic workflow that changes structure + * if (configurationChanged) { + * graph.clear(); // Remove old structure + * // Add new nodes based on new configuration + * for (auto& task : newTasks) { + * graph.addNode(task.work, task.name); + * } + * } else { + * graph.reset(); // Just reset execution state + * } + * graph.execute(); + * @endcode + */ + void clear(); + + /** + * @brief Kicks off your workflow by scheduling all nodes that have no dependencies + * + * Finds root nodes and schedules them. Called automatically by execute(). + * + * @return Number of root nodes that were scheduled + * + * @code + * // Manual execution control + * graph.addNode([]{ step1(); }, "step1"); + * graph.addNode([]{ step2(); }, "step2"); + * // Both are roots since no dependencies were added + * + * size_t roots = graph.scheduleRoots(); // Returns 2 + * LOG_INFO("Started {} independent tasks", roots); + * @endcode + */ + size_t scheduleRoots(); + + /** + * @brief Lights the fuse on your workflow - starts the cascade of execution + * + * Finds and schedules root nodes. Safe to call multiple times. Thread-safe + * with dynamic modifications. + * + * @code + * // Fire and forget + * graph.execute(); + * // Graph is now running in the background + * + * // You can even add more work while it runs! + * auto newNode = graph.addNode([]{ lateWork(); }); + * graph.addDependency(existingNode, newNode); + * // newNode will execute when existingNode completes + * @endcode + */ + void execute(); + /** - * @brief Orchestrates complex parallel workflows with automatic dependency management - * - * WorkGraph provides high-level workflow management for parallel task execution. It accepts - * task definitions with dependency relationships and automatically determines optimal - * execution order. As tasks complete, the system triggers dependent tasks in cascade, - * ensuring correct execution flow through complex dependency chains. - * - * Bridges the gap between low-level work execution (WorkContractGroup) and - * high-level workflow requirements. While WorkContractGroup provides raw execution - * capabilities, WorkGraph adds intelligent scheduling based on dependency relationships. - * - * Key features: - * - Automatic dependency resolution without manual scheduling - * - Dynamic graph construction during execution - * - Failure propagation to cancel dependent tasks - * - Thread-safe operations for concurrent modifications - * - Zero-copy integration with existing WorkContractGroup - * - Main thread execution support for UI and render operations - * - * Common applications: - * - Build systems (compile → link → package) - * - Data pipelines (load → transform → analyze → save) - * - Game asset processing (texture → compress → pack) - * - Mixed UI/background workflows (process → update UI → save) - * - * Complexity characteristics: - * - Dependency tracking: O(1) atomic operations - * - Completion cascade: O(children) per completed node - * + * @brief Suspends graph execution - no new nodes will be scheduled + * + * Currently executing nodes will complete, but no new nodes will be + * scheduled (including yielded nodes trying to reschedule). The graph + * remains suspended until resume() is called. + * + * Thread-safe. Can be called while graph is executing. + * * @code - * // Mixed execution pipeline with UI updates - * WorkContractGroup group(1024); - * WorkService service(2); // 2 worker threads - * service.addWorkContractGroup(&group); - * - * WorkGraph graph(&group); - * - * // Background data processing - * auto load = graph.addNode([]{ - * auto data = loadFromDisk(); - * processData(data); - * }, "loader"); - * - * // Main thread UI update - * auto updateUI = graph.addNode([]{ - * progressBar.setValue(50); - * statusLabel.setText("Processing..."); - * }, "ui-update", nullptr, ExecutionType::MainThread); - * - * // More background work - * auto save = graph.addNode([]{ - * auto data = getProcessedData(); - * saveToDisk(data); - * }, "saver"); - * - * // Wire up dependencies - UI update after load, save after UI - * graph.addDependency(load, updateUI); - * graph.addDependency(updateUI, save); - * - * // Start execution * graph.execute(); - * service.start(); - * - * // Main thread pump (in your event loop) - * while (!graph.isComplete()) { - * // Process main thread work - * group.executeMainThreadWork(5); // Max 5 per frame - * - * // Handle UI events - * processEvents(); - * renderFrame(); + * // ... some time later + * graph.suspend(); // Pause execution + * // ... do something else + * graph.resume(); // Continue where we left off + * @endcode + */ + void suspend(); + + /** + * @brief Resumes graph execution after suspension + * + * Allows scheduling to continue. Any nodes that became ready while + * suspended will be scheduled. Yielded nodes waiting to reschedule + * will also continue. + * + * Thread-safe. Safe to call even if not suspended. + * + * @code + * if (needToPause) { + * graph.suspend(); + * handleHighPriorityWork(); + * graph.resume(); * } * @endcode */ - class WorkGraph : public Debug::Named { - private: - /// RAII guard for tracking active callbacks - struct CallbackGuard { - WorkGraph* graph; - CallbackGuard(WorkGraph* g) : graph(g) { - graph->_activeCallbacks.fetch_add(1, std::memory_order_acq_rel); - } - ~CallbackGuard() { - if (graph->_activeCallbacks.fetch_sub(1, std::memory_order_acq_rel) == 1) { - std::lock_guard lock(graph->_waitMutex); - graph->_shutdownCondition.notify_all(); - } - } - }; - - Graph::DirectedAcyclicGraph _graph; ///< Internal DAG implementation - - WorkContractGroup* _workContractGroup; ///< External work executor - - WorkContractGroup::CapacityCallback _capacityCallbackIt; ///< Capacity callback handle - - WorkGraphConfig _config; ///< Graph configuration - - std::unique_ptr _eventBus; ///< Event system - std::unique_ptr _stateManager; ///< State tracker - std::unique_ptr _scheduler; ///< Node scheduler - - mutable std::shared_mutex _graphMutex; ///< Graph structure protection - - std::atomic _executionStarted{false}; ///< Execution started flag - - std::atomic _pendingNodes{0}; ///< Pending node count - - std::atomic _droppedNodes{0}; ///< Dropped node count - - std::atomic _completedNodes{0}; ///< Completed node count - - std::atomic _failedNodes{0}; ///< Failed node count - - /// Cache of valid node handles for efficient access - std::vector> _nodeHandles; ///< Pre-allocated for performance - - /// Callback for node completion (for testing/debugging) - std::function)> _onNodeComplete; ///< Optional completion hook - - /// Synchronization for wait() - more efficient than busy-waiting - mutable std::mutex _waitMutex; ///< Protects wait condition - mutable std::condition_variable _waitCondition; ///< Signaled when nodes complete - - /// Safety flag to prevent callbacks after destruction - mutable std::atomic _destroyed{false}; ///< Set true in destructor - - /// Number of active callbacks (for safe destruction) - mutable std::atomic _activeCallbacks{0}; ///< Tracked by CallbackGuard - - /// Condition variable for waiting on active callbacks - mutable std::condition_variable _shutdownCondition; ///< Destructor waits on this - - /// Suspension state - prevents scheduling new nodes - std::atomic _suspended{false}; ///< True when graph is suspended - - public: - using NodeHandle = Graph::AcyclicNodeHandle; - - /** - * @brief What you get back from wait() - the final score of your graph execution - * - * This tells you how your workflow went: did everything finish? Did some tasks - * fail? Were any dropped due to capacity issues? It's like a report card for - * your parallel execution. - * - * @code - * auto result = graph.wait(); - * if (result.allCompleted) { - * LOG_INFO("Perfect run! All {} nodes completed", result.completedCount); - * } else { - * LOG_WARN("Issues detected: {} failed, {} dropped", - * result.failedCount, result.droppedCount); - * } - * @endcode - */ - struct WaitResult { - bool allCompleted = false; ///< True only if every single node succeeded - uint32_t droppedCount = 0; ///< Nodes we couldn't schedule (queue overflow) - uint32_t failedCount = 0; ///< Nodes that threw exceptions - uint32_t completedCount = 0; ///< Nodes that ran successfully - }; - - /** - * @brief Creates a work graph backed by your thread pool - * - * The graph doesn't own the WorkContractGroup - just uses it to schedule work. - * Multiple graphs can share the same thread pool. - * - * @param workContractGroup Your thread pool for executing work (must outlive the graph) - * - * @code - * // Typical setup - * WorkContractGroup threadPool(1024); // Shared thread pool - * WorkGraph pipeline1(&threadPool); // Asset processing pipeline - * WorkGraph pipeline2(&threadPool); // Data analysis pipeline - * // Both pipelines share the same threads! - * @endcode - */ - explicit WorkGraph(WorkContractGroup* workContractGroup); - - /** - * @brief Creates a work graph with custom behavior options - * - * Use for advanced features like events, state management, or custom allocation. - * - * @param workContractGroup Your thread pool for executing work - * @param config Tuning knobs and feature flags - * - * @code - * // Example: Graph with event notifications for monitoring - * WorkGraphConfig config; - * config.enableEvents = true; - * config.expectedNodeCount = 1000; // Pre-allocate for performance - * config.maxDeferredNodes = 100; // Limit queue size - * - * WorkGraph monitoredGraph(&threadPool, config); - * - * // Now you can subscribe to events - * monitoredGraph.getEventBus()->subscribe( - * [](const NodeCompletedEvent& e) { - * LOG_INFO("Node completed in {}ms", - * chrono::duration_cast(e.executionTime).count()); - * }); - * @endcode - */ - WorkGraph(WorkContractGroup* workContractGroup, const WorkGraphConfig& config); - - /** - * @brief Cleans up the graph and ensures all callbacks complete - * - * Waits for active callbacks before destroying. Safe to destroy with pending - * work - it continues executing in the WorkContractGroup. - */ - ~WorkGraph(); - - /** - * @brief Adds a yieldable task that can suspend and resume execution - * - * Creates a node that can yield control back to the scheduler and be - * rescheduled later. Perfect for polling operations, staged processing, - * or any task that needs to wait without blocking a thread. - * - * @param work Yieldable function returning WorkResult - * @param name Human-readable name for debugging - * @param userData Your own context pointer - * @param executionType Where to run: AnyThread or MainThread - * @param maxReschedules Optional limit on reschedules (prevent infinite loops) - * @return Handle to reference this node - * - * @code - * // Polling task that yields until ready - * auto poller = graph.addYieldableNode([]() -> WorkResultContext { - * if (!dataReady()) { - * return WorkResultContext::yield(); // Try again later - * } - * processData(); - * return WorkResultContext::complete(); - * }, "data-poller"); - * - * // Staged processing with yield between stages - * int stage = 0; - * auto staged = graph.addYieldableNode([&stage]() -> WorkResultContext { - * switch (stage++) { - * case 0: doStage1(); return WorkResultContext::yield(); - * case 1: doStage2(); return WorkResultContext::yield(); - * case 2: doStage3(); return WorkResultContext::complete(); - * default: return WorkResultContext::complete(); - * } - * }, "staged-processor", nullptr, ExecutionType::AnyThread, 10); - * @endcode - */ - NodeHandle addYieldableNode(YieldableWorkFunction work, - const std::string& name = "", - void* userData = nullptr, - ExecutionType executionType = ExecutionType::AnyThread, - std::optional maxReschedules = std::nullopt); - - /** - * @brief Adds a task to your workflow - it won't run until its time comes - * - * Creates a node that waits for dependencies before running. Thread-safe - - * can add nodes while graph executes. Use ExecutionType::MainThread for - * UI updates or other main-thread-only operations. - * - * @param work Your task - lambda, function, or any callable - * @param name Human-readable name for debugging - * @param userData Your own context pointer - * @param executionType Where to run: AnyThread (worker pool) or MainThread - * @return Handle to reference this node - * - * @code - * // Simple task - * auto task = graph.addNode([]{ - * doSomeWork(); - * }, "worker-1"); - * - * // Main thread task - * auto uiUpdate = graph.addNode([]{ - * updateUI(); - * }, "ui-updater", nullptr, ExecutionType::MainThread); - * - * // Task with captures - * std::string filename = "data.txt"; - * auto loader = graph.addNode([filename]{ - * loadFile(filename); - * }, "file-loader"); - * - * // Task with user data - * auto* context = new ProcessContext(); - * auto processor = graph.addNode( - * [context]{ context->process(); }, - * "processor", - * context // Attach as user data - * ); - * @endcode - */ - NodeHandle addNode(std::function work, - const std::string& name = "", - void* userData = nullptr, - ExecutionType executionType = ExecutionType::AnyThread); - - /** - * @brief Wire up your workflow - tell nodes who they're waiting for - * - * Defines execution order: "to" waits for "from" to finish. If "from" fails, - * "to" is cancelled. Prevents cycles. Thread-safe. - * - * @param from The prerequisite task - * @param to The dependent task - * @throws std::invalid_argument if this would create a cycle - * @throws std::runtime_error if nodes invalid or completed - * - * @code - * // Linear pipeline: A → B → C - * auto A = graph.addNode([]{ stepA(); }, "A"); - * auto B = graph.addNode([]{ stepB(); }, "B"); - * auto C = graph.addNode([]{ stepC(); }, "C"); - * graph.addDependency(A, B); // B waits for A - * graph.addDependency(B, C); // C waits for B - * - * // Fan-out: A → {B, C, D} - * auto A = graph.addNode([]{ generateData(); }, "generator"); - * auto B = graph.addNode([]{ process1(); }, "proc1"); - * auto C = graph.addNode([]{ process2(); }, "proc2"); - * auto D = graph.addNode([]{ process3(); }, "proc3"); - * graph.addDependency(A, B); // All three process - * graph.addDependency(A, C); // the same data - * graph.addDependency(A, D); // in parallel - * - * // Fan-in: {A, B, C} → D - * auto D = graph.addNode([]{ mergeResults(); }, "merger"); - * graph.addDependency(A, D); // D waits for - * graph.addDependency(B, D); // all three - * graph.addDependency(C, D); // to complete - * @endcode - */ - void addDependency(NodeHandle from, NodeHandle to); - - /** - * @brief Resets execution state so the graph can be re-executed - * - * Keeps all nodes and dependencies intact, but resets: - * - Execution started flag - * - Pending/completed/failed/dropped counters - * - Node states back to Pending - * - Node completion processed flags - * - Pending dependency counts (restored from edge structure) - * - * After reset(), you can call execute() again to re-run the same workflow. - * This is much faster than destroying and recreating the graph. - * - * @note Not thread-safe with concurrent execute()/wait() calls - * - * @code - * // Reusable graph pattern - * WorkGraph graph(&threadPool); - * auto nodeA = graph.addNode([&data]{ process(data); }, "processor"); - * - * for (int frame = 0; frame < 100; ++frame) { - * updateData(data); // Update what the node operates on - * graph.reset(); // Reset execution state - * graph.execute(); // Re-run the workflow - * graph.wait(); - * } - * @endcode - */ - void reset(); - - /** - * @brief Removes all nodes and dependencies from the graph - * - * Clears the entire graph structure. After clear(), the graph is empty - * and ready for new nodes to be added. Use when the workflow structure - * changes (different number of nodes, different dependencies). - * - * @note Not thread-safe with concurrent operations - * - * @code - * // Dynamic workflow that changes structure - * if (configurationChanged) { - * graph.clear(); // Remove old structure - * // Add new nodes based on new configuration - * for (auto& task : newTasks) { - * graph.addNode(task.work, task.name); - * } - * } else { - * graph.reset(); // Just reset execution state - * } - * graph.execute(); - * @endcode - */ - void clear(); - - /** - * @brief Kicks off your workflow by scheduling all nodes that have no dependencies - * - * Finds root nodes and schedules them. Called automatically by execute(). - * - * @return Number of root nodes that were scheduled - * - * @code - * // Manual execution control - * graph.addNode([]{ step1(); }, "step1"); - * graph.addNode([]{ step2(); }, "step2"); - * // Both are roots since no dependencies were added - * - * size_t roots = graph.scheduleRoots(); // Returns 2 - * LOG_INFO("Started {} independent tasks", roots); - * @endcode - */ - size_t scheduleRoots(); - - /** - * @brief Lights the fuse on your workflow - starts the cascade of execution - * - * Finds and schedules root nodes. Safe to call multiple times. Thread-safe - * with dynamic modifications. - * - * @code - * // Fire and forget - * graph.execute(); - * // Graph is now running in the background - * - * // You can even add more work while it runs! - * auto newNode = graph.addNode([]{ lateWork(); }); - * graph.addDependency(existingNode, newNode); - * // newNode will execute when existingNode completes - * @endcode - */ - void execute(); - - /** - * @brief Suspends graph execution - no new nodes will be scheduled - * - * Currently executing nodes will complete, but no new nodes will be - * scheduled (including yielded nodes trying to reschedule). The graph - * remains suspended until resume() is called. - * - * Thread-safe. Can be called while graph is executing. - * - * @code - * graph.execute(); - * // ... some time later - * graph.suspend(); // Pause execution - * // ... do something else - * graph.resume(); // Continue where we left off - * @endcode - */ - void suspend(); - - /** - * @brief Resumes graph execution after suspension - * - * Allows scheduling to continue. Any nodes that became ready while - * suspended will be scheduled. Yielded nodes waiting to reschedule - * will also continue. - * - * Thread-safe. Safe to call even if not suspended. - * - * @code - * if (needToPause) { - * graph.suspend(); - * handleHighPriorityWork(); - * graph.resume(); - * } - * @endcode - */ - void resume(); - - /** - * @brief Checks if the graph is currently suspended - * - * @return true if suspend() was called and resume() hasn't been called yet - */ - bool isSuspended() const noexcept { return _suspended.load(std::memory_order_acquire); } - - /** - * @brief Blocks until your entire workflow finishes - success or failure - * - * Synchronization point using condition variables. Returns execution summary. - * Thread-safe. - * - * @return Execution summary with success/failure counts - * - * @code - * graph.execute(); - * // Do other work while graph runs... - * - * auto result = graph.wait(); - * if (result.allCompleted) { - * LOG_INFO("Workflow completed successfully!"); - * } else if (result.failedCount > 0) { - * LOG_ERROR("Workflow had {} failures", result.failedCount); - * // Check which nodes failed for debugging - * } else if (result.droppedCount > 0) { - * LOG_WARN("Dropped {} nodes due to capacity", result.droppedCount); - * // Maybe increase WorkContractGroup size? - * } - * @endcode - */ - WaitResult wait(); - - /** - * @brief Quick non-blocking check if your workflow is done - * - * Returns true when all nodes reached terminal state. Perfect for polling - * in game loops. - * - * @return true if all nodes are done, false if work remains - * - * @code - * // In your game loop - * if (!graph.isComplete()) { - * renderLoadingScreen(); - * } else { - * auto stats = graph.getStats(); - * if (stats.failedNodes == 0) { - * proceedToNextLevel(); - * } else { - * showErrorDialog(); - * } - * } - * @endcode - */ - bool isComplete() const; - - /** - * @brief Manually drain the deferred queue when capacity becomes available - * - * Schedules deferred nodes when capacity frees up. Usually automatic via - * callbacks. - * - * @return How many deferred nodes were successfully scheduled - * - * @code - * // After manually cancelling some work - * workGroup.cancelSomeContracts(); - * size_t scheduled = graph.processDeferredNodes(); - * LOG_INFO("Scheduled {} previously deferred nodes", scheduled); - * @endcode - */ - size_t processDeferredNodes(); - - /** - * @brief Checks timed deferrals and schedules nodes whose wake time has arrived - * - * Examines nodes that yielded with a specific wake time (e.g., timers) and - * schedules any whose scheduled time has passed. Call this periodically from - * your main loop or worker threads to ensure timers fire promptly. - * - * @return Number of timed nodes successfully scheduled - * - * @code - * // In main loop - * while (running) { - * graph.checkTimedDeferrals(); // Wake up any ready timers - * workService->executeMainThreadWork(10); - * std::this_thread::sleep_for(10ms); - * } - * @endcode - */ - size_t checkTimedDeferrals(); - - /** - * @brief Access the event system for monitoring graph execution - * - * Lazy-initialized event bus if events enabled. Returns nullptr otherwise. - * - * @return Event bus for subscriptions, or nullptr if events disabled - * - * @code - * // Subscribe to completion events - * if (auto* bus = graph.getEventBus()) { - * bus->subscribe([](const auto& event) { - * LOG_INFO("Node completed: {} in {}ms", - * event.node.getData()->name, - * duration_cast(event.executionTime).count()); - * }); - * - * bus->subscribe([](const auto& event) { - * LOG_ERROR("Node failed: {}", event.node.getData()->name); - * // Could rethrow the exception for debugging - * }); - * } - * @endcode - */ - Core::EventBus* getEventBus(); - - /** - * @brief Access the underlying node payload for a given handle - * - * Useful for debugging or custom instrumentation. Returns nullptr if the - * handle is invalid or the node has been removed. Prefer using public APIs - * for scheduling and state changes rather than mutating node data directly. - * - * @param node Handle to the node whose data you want - * @return Pointer to node data (mutable/const), or nullptr if not valid - * - * @code - * // Read node name for logging - * if (auto* n = graph.getNodeData(handle)) { - * LOG_INFO("Node: {}", n->name); - * } - * @endcode - */ - WorkGraphNode* getNodeData(const NodeHandle& node) { return _graph.getNodeData(node); } - const WorkGraphNode* getNodeData(const NodeHandle& node) const { return _graph.getNodeData(node); } - - /** - * @brief Snapshot of your workflow's current state - how's it doing? - * - * Returns consistent snapshot of all stats. Great for progress bars or - * monitoring. - * - * @return Complete statistics snapshot at this moment - * - * @code - * // Progress monitoring - * auto stats = graph.getStats(); - * float progress = (float)stats.completedNodes / stats.totalNodes * 100; - * LOG_INFO("Progress: {:.1f}% ({}/{} nodes)", - * progress, stats.completedNodes, stats.totalNodes); - * - * if (stats.failedNodes > 0) { - * LOG_WARN("Failures detected: {} nodes failed", stats.failedNodes); - * } - * @endcode - */ - WorkGraphStats::Snapshot getStats() const; - - /** - * @brief Access the configuration this graph was created with - * @return The config struct passed to constructor (or defaults) - */ - const WorkGraphConfig& getConfig() const { return _config; } - - /** - * @brief Quick check of how much work remains - * @return Nodes that haven't reached terminal state yet - */ - uint32_t getPendingCount() const { - return _pendingNodes.load(std::memory_order_acquire); - } + void resume(); - /** - * @brief Get the total number of nodes in the graph - * @return Total node count (useful for checking if graph needs rebuilding) - */ - size_t getNodeCount() const { - return _nodeHandles.size(); - } + /** + * @brief Checks if the graph is currently suspended + * + * @return true if suspend() was called and resume() hasn't been called yet + */ + bool isSuspended() const noexcept { + return _suspended.load(std::memory_order_acquire); + } - /** - * @brief Install a hook that fires whenever a node finishes - * - * Simple completion tracking. Called synchronously - keep it fast! - * - * @param callback Function to call on each node completion - * - * @code - * // Simple progress tracker - * std::atomic completed{0}; - * graph.setNodeCompleteCallback([&completed](NodeHandle node) { - * int count = ++completed; - * if (count % 100 == 0) { - * LOG_INFO("Completed {} nodes", count); - * } - * }); - * @endcode - */ - void setNodeCompleteCallback(std::function callback) { - _onNodeComplete = std::move(callback); - } - - /** - * @brief Create a "join" node that waits for multiple parents - perfect for fan-in patterns - * - * Convenience for creating a node that waits for multiple parents. Runs only - * after ALL parents complete successfully. - * - * @param parents All nodes that must complete first - * @param work What to do after all parents finish - * @param name Debug label for the continuation node - * @param executionType Where this node should execute (default: AnyThread) - * @return Handle to the newly created continuation node - * - * @code - * // Parallel processing with merge - * auto part1 = graph.addNode([]{ processPart1(); }); - * auto part2 = graph.addNode([]{ processPart2(); }); - * auto part3 = graph.addNode([]{ processPart3(); }); - * - * // Single merge point - * auto merge = graph.addContinuation( - * {part1, part2, part3}, - * []{ mergeResults(); }, - * "merger" - * ); - * - * // Main thread UI update after merge - * auto uiUpdate = graph.addContinuation( - * {merge}, - * []{ updateUI(); }, - * "ui-updater", - * ExecutionType::MainThread - * ); - * @endcode - */ - NodeHandle addContinuation(const std::vector& parents, - std::function work, - const std::string& name = "", - ExecutionType executionType = ExecutionType::AnyThread); - - - /** - * @brief Test if a node handle still points to a real node - * - * @param handle The handle to validate - * @return true if the handle points to a valid node - */ - bool isHandleValid(const NodeHandle& handle) const { - return _graph.isHandleValid(handle); - } - - /** - * @brief Find all nodes that depend on this one - * - * Returns direct children. Useful for debugging or visualization. - * - * @param node The parent whose children you want - * @return List of nodes that depend on the given node - * - * @code - * // Find what depends on a critical node - * auto children = graph.getChildren(criticalNode); - * LOG_INFO("Node has {} dependents", children.size()); - * for (auto& child : children) { - * LOG_INFO(" - {}", child.getData()->name); - * } - * @endcode - */ - std::vector getChildren(const NodeHandle& node) const { - return _graph.getChildren(node); - } - - private: - /** - * @brief The domino effect handler - when one task finishes, what happens next? - * - * Heart of dependency resolution. Decrements child dependency counts and - * schedules ready nodes. Uses atomics for thread-safe concurrent updates. - * Called automatically by work wrapper. - * - * @param node The node that just finished executing successfully - */ - void onNodeComplete(NodeHandle node); - - /** - * @brief Takes a ready node and gets it running in the thread pool - * - * Wraps work with dependency tracking and exception handling. Uses CAS - * for exactly-once execution. Called only by execute() and onNodeComplete(). - * - * @param node The ready node to submit for execution - * @return true if scheduled, false if already scheduled/completed - */ - bool scheduleNode(NodeHandle node); - - /** - * @brief Bumps up how many parents a node is waiting for - * - * Called by addDependency(). Atomic to handle concurrent graph construction. - * Private - users work through addDependency(). - * - * @param node The node that just got another parent to wait for - */ - void incrementDependencies(NodeHandle node); - - /** - * @brief Internal root scheduling - assumes you already hold the graph lock - * - * Called by execute() with lock held. Scans for zero-dependency nodes. - * Private - unsafe without proper locking. - * - * @return How many root nodes were found and scheduled - */ - size_t scheduleRootsLocked(); - - - /** - * @brief Propagates failure through the graph - if parent fails, children can't run - * - * Cascades cancellation transitively through dependent nodes. Private - only - * triggered by onNodeFailed(). - * - * @param failedNode The node whose failure triggers the cascade - */ - void cancelDependents(NodeHandle failedNode); - - /** - * @brief Handles the bookkeeping when a node gets cancelled - * - * Updates counters and fires events. Cancellation cascades to children. - * Private - only triggered internally. - * - * @param node The node that's being cancelled - */ - void onNodeCancelled(NodeHandle node); - - /** - * @brief Deals with the aftermath when a node's work function throws - * - * Updates stats, fires events with exception details, cancels dependents. - * Private - called by work wrapper. - * - * @param node The node whose work function threw an exception - */ - void onNodeFailed(NodeHandle node); - - /** - * @brief Handles a node that has yielded execution - * - * Transitions the node from Executing to Yielded state and reschedules it - * for later execution. Checks reschedule limits to prevent infinite loops. - * - * @param node The node that yielded - */ - void onNodeYielded(NodeHandle node); - - /** - * @brief Handles timed node yield - node suspended until specific time - * - * Transitions the node from Executing to Yielded state and defers it - * until the specified wake time. The node sleeps passively in a priority - * queue consuming no CPU until the wake time arrives. - * - * @param node The node that yielded - * @param wakeTime When the node should be reconsidered for scheduling - */ - void onNodeYieldedUntil(NodeHandle node, std::chrono::steady_clock::time_point wakeTime); - - /** - * @brief Reschedules a yielded node for execution - * - * Transitions the node from Yielded to Ready state and schedules it - * again. Called after a node yields to give it another chance to run. - * - * @param node The yielded node to reschedule - */ - void rescheduleYieldedNode(NodeHandle node); - - }; + /** + * @brief Blocks until your entire workflow finishes - success or failure + * + * Synchronization point using condition variables. Returns execution summary. + * Thread-safe. + * + * @return Execution summary with success/failure counts + * + * @code + * graph.execute(); + * // Do other work while graph runs... + * + * auto result = graph.wait(); + * if (result.allCompleted) { + * LOG_INFO("Workflow completed successfully!"); + * } else if (result.failedCount > 0) { + * LOG_ERROR("Workflow had {} failures", result.failedCount); + * // Check which nodes failed for debugging + * } else if (result.droppedCount > 0) { + * LOG_WARN("Dropped {} nodes due to capacity", result.droppedCount); + * // Maybe increase WorkContractGroup size? + * } + * @endcode + */ + WaitResult wait(); + + /** + * @brief Quick non-blocking check if your workflow is done + * + * Returns true when all nodes reached terminal state. Perfect for polling + * in game loops. + * + * @return true if all nodes are done, false if work remains + * + * @code + * // In your game loop + * if (!graph.isComplete()) { + * renderLoadingScreen(); + * } else { + * auto stats = graph.getStats(); + * if (stats.failedNodes == 0) { + * proceedToNextLevel(); + * } else { + * showErrorDialog(); + * } + * } + * @endcode + */ + bool isComplete() const; + + /** + * @brief Manually drain the deferred queue when capacity becomes available + * + * Schedules deferred nodes when capacity frees up. Usually automatic via + * callbacks. + * + * @return How many deferred nodes were successfully scheduled + * + * @code + * // After manually cancelling some work + * workGroup.cancelSomeContracts(); + * size_t scheduled = graph.processDeferredNodes(); + * LOG_INFO("Scheduled {} previously deferred nodes", scheduled); + * @endcode + */ + size_t processDeferredNodes(); + + /** + * @brief Checks timed deferrals and schedules nodes whose wake time has arrived + * + * Examines nodes that yielded with a specific wake time (e.g., timers) and + * schedules any whose scheduled time has passed. Call this periodically from + * your main loop or worker threads to ensure timers fire promptly. + * + * @return Number of timed nodes successfully scheduled + * + * @code + * // In main loop + * while (running) { + * graph.checkTimedDeferrals(); // Wake up any ready timers + * workService->executeMainThreadWork(10); + * std::this_thread::sleep_for(10ms); + * } + * @endcode + */ + size_t checkTimedDeferrals(); + + /** + * @brief Access the event system for monitoring graph execution + * + * Lazy-initialized event bus if events enabled. Returns nullptr otherwise. + * + * @return Event bus for subscriptions, or nullptr if events disabled + * + * @code + * // Subscribe to completion events + * if (auto* bus = graph.getEventBus()) { + * bus->subscribe([](const auto& event) { + * LOG_INFO("Node completed: {} in {}ms", + * event.node.getData()->name, + * duration_cast(event.executionTime).count()); + * }); + * + * bus->subscribe([](const auto& event) { + * LOG_ERROR("Node failed: {}", event.node.getData()->name); + * // Could rethrow the exception for debugging + * }); + * } + * @endcode + */ + Core::EventBus* getEventBus(); + + /** + * @brief Access the underlying node payload for a given handle + * + * Useful for debugging or custom instrumentation. Returns nullptr if the + * handle is invalid or the node has been removed. Prefer using public APIs + * for scheduling and state changes rather than mutating node data directly. + * + * @param node Handle to the node whose data you want + * @return Pointer to node data (mutable/const), or nullptr if not valid + * + * @code + * // Read node name for logging + * if (auto* n = graph.getNodeData(handle)) { + * LOG_INFO("Node: {}", n->name); + * } + * @endcode + */ + WorkGraphNode* getNodeData(const NodeHandle& node) { + return _graph.getNodeData(node); + } + const WorkGraphNode* getNodeData(const NodeHandle& node) const { + return _graph.getNodeData(node); + } + + /** + * @brief Snapshot of your workflow's current state - how's it doing? + * + * Returns consistent snapshot of all stats. Great for progress bars or + * monitoring. + * + * @return Complete statistics snapshot at this moment + * + * @code + * // Progress monitoring + * auto stats = graph.getStats(); + * float progress = (float)stats.completedNodes / stats.totalNodes * 100; + * LOG_INFO("Progress: {:.1f}% ({}/{} nodes)", + * progress, stats.completedNodes, stats.totalNodes); + * + * if (stats.failedNodes > 0) { + * LOG_WARN("Failures detected: {} nodes failed", stats.failedNodes); + * } + * @endcode + */ + WorkGraphStats::Snapshot getStats() const; + + /** + * @brief Access the configuration this graph was created with + * @return The config struct passed to constructor (or defaults) + */ + const WorkGraphConfig& getConfig() const { + return _config; + } + + /** + * @brief Quick check of how much work remains + * @return Nodes that haven't reached terminal state yet + */ + uint32_t getPendingCount() const { + return _pendingNodes.load(std::memory_order_acquire); + } + + /** + * @brief Get the total number of nodes in the graph + * @return Total node count (useful for checking if graph needs rebuilding) + */ + size_t getNodeCount() const { + return _nodeHandles.size(); + } -} // namespace Concurrency -} // namespace Core -} // namespace EntropyEngine + /** + * @brief Install a hook that fires whenever a node finishes + * + * Simple completion tracking. Called synchronously - keep it fast! + * + * @param callback Function to call on each node completion + * + * @code + * // Simple progress tracker + * std::atomic completed{0}; + * graph.setNodeCompleteCallback([&completed](NodeHandle node) { + * int count = ++completed; + * if (count % 100 == 0) { + * LOG_INFO("Completed {} nodes", count); + * } + * }); + * @endcode + */ + void setNodeCompleteCallback(std::function callback) { + _onNodeComplete = std::move(callback); + } + + /** + * @brief Create a "join" node that waits for multiple parents - perfect for fan-in patterns + * + * Convenience for creating a node that waits for multiple parents. Runs only + * after ALL parents complete successfully. + * + * @param parents All nodes that must complete first + * @param work What to do after all parents finish + * @param name Debug label for the continuation node + * @param executionType Where this node should execute (default: AnyThread) + * @return Handle to the newly created continuation node + * + * @code + * // Parallel processing with merge + * auto part1 = graph.addNode([]{ processPart1(); }); + * auto part2 = graph.addNode([]{ processPart2(); }); + * auto part3 = graph.addNode([]{ processPart3(); }); + * + * // Single merge point + * auto merge = graph.addContinuation( + * {part1, part2, part3}, + * []{ mergeResults(); }, + * "merger" + * ); + * + * // Main thread UI update after merge + * auto uiUpdate = graph.addContinuation( + * {merge}, + * []{ updateUI(); }, + * "ui-updater", + * ExecutionType::MainThread + * ); + * @endcode + */ + NodeHandle addContinuation(const std::vector& parents, std::function work, + const std::string& name = "", ExecutionType executionType = ExecutionType::AnyThread); + + /** + * @brief Test if a node handle still points to a real node + * + * @param handle The handle to validate + * @return true if the handle points to a valid node + */ + bool isHandleValid(const NodeHandle& handle) const { + return _graph.isHandleValid(handle); + } + + /** + * @brief Find all nodes that depend on this one + * + * Returns direct children. Useful for debugging or visualization. + * + * @param node The parent whose children you want + * @return List of nodes that depend on the given node + * + * @code + * // Find what depends on a critical node + * auto children = graph.getChildren(criticalNode); + * LOG_INFO("Node has {} dependents", children.size()); + * for (auto& child : children) { + * LOG_INFO(" - {}", child.getData()->name); + * } + * @endcode + */ + std::vector getChildren(const NodeHandle& node) const { + return _graph.getChildren(node); + } + +private: + /** + * @brief The domino effect handler - when one task finishes, what happens next? + * + * Heart of dependency resolution. Decrements child dependency counts and + * schedules ready nodes. Uses atomics for thread-safe concurrent updates. + * Called automatically by work wrapper. + * + * @param node The node that just finished executing successfully + */ + void onNodeComplete(NodeHandle node); + + /** + * @brief Takes a ready node and gets it running in the thread pool + * + * Wraps work with dependency tracking and exception handling. Uses CAS + * for exactly-once execution. Called only by execute() and onNodeComplete(). + * + * @param node The ready node to submit for execution + * @return true if scheduled, false if already scheduled/completed + */ + bool scheduleNode(NodeHandle node); + + /** + * @brief Bumps up how many parents a node is waiting for + * + * Called by addDependency(). Atomic to handle concurrent graph construction. + * Private - users work through addDependency(). + * + * @param node The node that just got another parent to wait for + */ + void incrementDependencies(NodeHandle node); + + /** + * @brief Internal root scheduling - assumes you already hold the graph lock + * + * Called by execute() with lock held. Scans for zero-dependency nodes. + * Private - unsafe without proper locking. + * + * @return How many root nodes were found and scheduled + */ + size_t scheduleRootsLocked(); + + /** + * @brief Propagates failure through the graph - if parent fails, children can't run + * + * Cascades cancellation transitively through dependent nodes. Private - only + * triggered by onNodeFailed(). + * + * @param failedNode The node whose failure triggers the cascade + */ + void cancelDependents(NodeHandle failedNode); + + /** + * @brief Handles the bookkeeping when a node gets cancelled + * + * Updates counters and fires events. Cancellation cascades to children. + * Private - only triggered internally. + * + * @param node The node that's being cancelled + */ + void onNodeCancelled(NodeHandle node); + + /** + * @brief Deals with the aftermath when a node's work function throws + * + * Updates stats, fires events with exception details, cancels dependents. + * Private - called by work wrapper. + * + * @param node The node whose work function threw an exception + */ + void onNodeFailed(NodeHandle node); + + /** + * @brief Handles a node that has yielded execution + * + * Transitions the node from Executing to Yielded state and reschedules it + * for later execution. Checks reschedule limits to prevent infinite loops. + * + * @param node The node that yielded + */ + void onNodeYielded(NodeHandle node); + + /** + * @brief Handles timed node yield - node suspended until specific time + * + * Transitions the node from Executing to Yielded state and defers it + * until the specified wake time. The node sleeps passively in a priority + * queue consuming no CPU until the wake time arrives. + * + * @param node The node that yielded + * @param wakeTime When the node should be reconsidered for scheduling + */ + void onNodeYieldedUntil(NodeHandle node, std::chrono::steady_clock::time_point wakeTime); + + /** + * @brief Reschedules a yielded node for execution + * + * Transitions the node from Yielded to Ready state and schedules it + * again. Called after a node yields to give it another chance to run. + * + * @param node The yielded node to reschedule + */ + void rescheduleYieldedNode(NodeHandle node); +}; +} // namespace Concurrency +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Concurrency/WorkGraphEvents.h b/src/Concurrency/WorkGraphEvents.h index be7358d..28d8df6 100644 --- a/src/Concurrency/WorkGraphEvents.h +++ b/src/Concurrency/WorkGraphEvents.h @@ -10,7 +10,7 @@ /** * @file WorkGraphEvents.h * @brief Event definitions for monitoring WorkGraph execution - your window into the workflow - * + * * This file contains all the event types that WorkGraph can emit during execution. * Subscribe to these events to build monitoring tools, debuggers, visualizers, or * just to understand what's happening inside your parallel workflows. @@ -18,57 +18,60 @@ #pragma once -#include "WorkGraphTypes.h" #include -#include #include +#include -namespace EntropyEngine { -namespace Core { -namespace Concurrency { +#include "WorkGraphTypes.h" + +namespace EntropyEngine +{ +namespace Core +{ +namespace Concurrency +{ /** * @brief The mother of all WorkGraph events - timestamp and source - * + * * Every event carries these two essential pieces of information: when it happened * and which graph it came from. The timestamp uses steady_clock for reliable * duration measurements even if the system clock changes. - * + * * Minimal overhead design. - * + * * @code * // All events inherit from this, so you can handle them generically * eventBus->subscribe([](const WorkGraphEvent& event) { * auto elapsed = std::chrono::steady_clock::now() - event.timestamp; - * LOG_DEBUG("Event from graph {} occurred {}ms ago", - * event.graph, + * LOG_DEBUG("Event from graph {} occurred {}ms ago", + * event.graph, * std::chrono::duration_cast(elapsed).count()); * }); * @endcode */ -struct WorkGraphEvent { +struct WorkGraphEvent +{ std::chrono::steady_clock::time_point timestamp; ///< When this event was created const WorkGraph* graph; ///< Which graph emitted this event - - explicit WorkGraphEvent(const WorkGraph* g) - : timestamp(std::chrono::steady_clock::now()) - , graph(g) {} + + explicit WorkGraphEvent(const WorkGraph* g) : timestamp(std::chrono::steady_clock::now()), graph(g) {} }; /** * @brief The workhorse event - tracks every state transition in your workflow - * + * * This is THE event for understanding execution flow. Every time a node changes * state (Pending→Ready, Ready→Scheduled, etc.), this event fires. Perfect for * building state diagrams, progress trackers, or debugging stuck workflows. - * + * * @code * eventBus->subscribe([](const auto& event) { * LOG_INFO("Node {} transitioned from {} to {}", * event.node.getData()->name, * nodeStateToString(event.oldState), * nodeStateToString(event.newState)); - * + * * // Track progress * if (event.newState == NodeState::Completed) { * updateProgressBar(); @@ -76,22 +79,23 @@ struct WorkGraphEvent { * }); * @endcode */ -struct NodeStateChangedEvent : WorkGraphEvent { - NodeHandle node; ///< The node that changed state - NodeState oldState; ///< What it was - NodeState newState; ///< What it is now - +struct NodeStateChangedEvent : WorkGraphEvent +{ + NodeHandle node; ///< The node that changed state + NodeState oldState; ///< What it was + NodeState newState; ///< What it is now + NodeStateChangedEvent(const WorkGraph* g, NodeHandle n, NodeState from, NodeState to) : WorkGraphEvent(g), node(n), oldState(from), newState(to) {} }; /** * @brief Fired when a new task joins your workflow - * + * * Useful for dynamic graph visualization or tracking graph construction. * Note this fires immediately when addNode() is called, before any dependencies * are established. - * + * * @code * eventBus->subscribe([&nodeCount](const auto& event) { * nodeCount++; @@ -99,20 +103,20 @@ struct NodeStateChangedEvent : WorkGraphEvent { * }); * @endcode */ -struct NodeAddedEvent : WorkGraphEvent { - NodeHandle node; ///< The newly added node - - NodeAddedEvent(const WorkGraph* g, NodeHandle n) - : WorkGraphEvent(g), node(n) {} +struct NodeAddedEvent : WorkGraphEvent +{ + NodeHandle node; ///< The newly added node + + NodeAddedEvent(const WorkGraph* g, NodeHandle n) : WorkGraphEvent(g), node(n) {} }; /** * @brief Fired when you wire two nodes together - * + * * This event captures the moment a dependency is established. The 'from' node * must complete before the 'to' node can run. Great for building visual * representations of your workflow structure. - * + * * @code * // Build a dependency graph visualization * eventBus->subscribe([&graphViz](const auto& event) { @@ -123,21 +127,21 @@ struct NodeAddedEvent : WorkGraphEvent { * }); * @endcode */ -struct DependencyAddedEvent : WorkGraphEvent { - NodeHandle from; ///< The prerequisite node (parent) - NodeHandle to; ///< The dependent node (child) - - DependencyAddedEvent(const WorkGraph* g, NodeHandle f, NodeHandle t) - : WorkGraphEvent(g), from(f), to(t) {} +struct DependencyAddedEvent : WorkGraphEvent +{ + NodeHandle from; ///< The prerequisite node (parent) + NodeHandle to; ///< The dependent node (child) + + DependencyAddedEvent(const WorkGraph* g, NodeHandle f, NodeHandle t) : WorkGraphEvent(g), from(f), to(t) {} }; /** * @brief Success! A node finished without throwing - * + * * This is your "mission accomplished" notification. The node ran to completion * without exceptions. Includes execution time for performance analysis - though * this might be zero if timing wasn't tracked. - * + * * @code * // Performance monitoring * eventBus->subscribe([](const auto& event) { @@ -145,34 +149,34 @@ struct DependencyAddedEvent : WorkGraphEvent { * event.executionTime).count(); * LOG_INFO("Node {} completed in {}ms", * event.node.getData()->name, ms); - * + * * if (ms > 1000) { * LOG_WARN("Slow node detected!"); * } * }); * @endcode */ -struct NodeCompletedEvent : WorkGraphEvent { - NodeHandle node; ///< The successful node - std::chrono::steady_clock::duration executionTime; ///< How long it took - - NodeCompletedEvent(const WorkGraph* g, NodeHandle n, - std::chrono::steady_clock::duration duration = {}) +struct NodeCompletedEvent : WorkGraphEvent +{ + NodeHandle node; ///< The successful node + std::chrono::steady_clock::duration executionTime; ///< How long it took + + NodeCompletedEvent(const WorkGraph* g, NodeHandle n, std::chrono::steady_clock::duration duration = {}) : WorkGraphEvent(g), node(n), executionTime(duration) {} }; /** * @brief Uh oh - a node threw an exception - * + * * When a node's work function throws, this event captures the failure. The * exception is preserved as exception_ptr so you can rethrow it for debugging * or log the details. Remember: node failure triggers cancellation of all * dependent nodes! - * + * * @code * eventBus->subscribe([](const auto& event) { * LOG_ERROR("Node {} failed!", event.node.getData()->name); - * + * * if (event.exception) { * try { * std::rethrow_exception(event.exception); @@ -185,157 +189,159 @@ struct NodeCompletedEvent : WorkGraphEvent { * }); * @endcode */ -struct NodeFailedEvent : WorkGraphEvent { - NodeHandle node; ///< The node that failed - std::exception_ptr exception; ///< What was thrown (may be null) - +struct NodeFailedEvent : WorkGraphEvent +{ + NodeHandle node; ///< The node that failed + std::exception_ptr exception; ///< What was thrown (may be null) + NodeFailedEvent(const WorkGraph* g, NodeHandle n, std::exception_ptr ex = nullptr) : WorkGraphEvent(g), node(n), exception(ex) {} }; /** * @brief A node was cancelled due to upstream failure - * + * * When a parent node fails, all its descendants get cancelled since their inputs * are invalid. This event tells you which nodes were skipped and why. The * failedParent tells you which upstream failure caused this cancellation. - * + * * @code * eventBus->subscribe([](const auto& event) { * LOG_WARN("Node {} cancelled due to failure of {}", * event.node.getData()->name, - * event.failedParent.valid() ? + * event.failedParent.valid() ? * event.failedParent.getData()->name : "unknown"); * }); * @endcode */ -struct NodeCancelledEvent : WorkGraphEvent { - NodeHandle node; ///< The cancelled node - NodeHandle failedParent; ///< Which parent's failure caused this (may be invalid) - +struct NodeCancelledEvent : WorkGraphEvent +{ + NodeHandle node; ///< The cancelled node + NodeHandle failedParent; ///< Which parent's failure caused this (may be invalid) + NodeCancelledEvent(const WorkGraph* g, NodeHandle n, NodeHandle parent = NodeHandle()) : WorkGraphEvent(g), node(n), failedParent(parent) {} }; /** * @brief All dependencies satisfied - this node is ready to rock! - * + * * Fired when a node transitions from Pending to Ready. This means all its * parent nodes have completed successfully and it's eligible for scheduling. * The actual scheduling might be delayed if the work queue is full. - * + * * @code * // Track scheduling latency * std::unordered_map readyTimes; - * + * * eventBus->subscribe([&readyTimes](const auto& event) { * readyTimes[event.node] = std::chrono::steady_clock::now(); * }); - * + * * eventBus->subscribe([&readyTimes](const auto& event) { * auto it = readyTimes.find(event.node); * if (it != readyTimes.end()) { * auto delay = std::chrono::steady_clock::now() - it->second; - * LOG_DEBUG("Scheduling delay: {}μs", + * LOG_DEBUG("Scheduling delay: {}μs", * std::chrono::duration_cast(delay).count()); * } * }); * @endcode */ -struct NodeReadyEvent : WorkGraphEvent { - NodeHandle node; ///< The node that's ready to execute - - NodeReadyEvent(const WorkGraph* g, NodeHandle n) - : WorkGraphEvent(g), node(n) {} +struct NodeReadyEvent : WorkGraphEvent +{ + NodeHandle node; ///< The node that's ready to execute + + NodeReadyEvent(const WorkGraph* g, NodeHandle n) : WorkGraphEvent(g), node(n) {} }; /** * @brief Node has been submitted to the thread pool - * + * * This fires when a ready node is successfully scheduled into the WorkContractGroup. * The node is now in the work queue waiting for a thread to pick it up. Next stop: * NodeExecutingEvent when a thread actually starts running it. - * + * * @code * // Monitor queue depth * std::atomic queuedNodes{0}; - * + * * eventBus->subscribe([&queuedNodes](const auto& event) { * queuedNodes++; * LOG_DEBUG("Work queue depth: {}", queuedNodes.load()); * }); - * + * * eventBus->subscribe([&queuedNodes](const auto& event) { * queuedNodes--; * }); * @endcode */ -struct NodeScheduledEvent : WorkGraphEvent { - NodeHandle node; ///< The node that was queued - - NodeScheduledEvent(const WorkGraph* g, NodeHandle n) - : WorkGraphEvent(g), node(n) {} +struct NodeScheduledEvent : WorkGraphEvent +{ + NodeHandle node; ///< The node that was queued + + NodeScheduledEvent(const WorkGraph* g, NodeHandle n) : WorkGraphEvent(g), node(n) {} }; /** * @brief A thread has started running this node's work - * + * * The moment of truth - a worker thread has dequeued this node and is about to * call its work function. Includes the thread ID for tracking thread utilization * and debugging thread-related issues. - * + * * @code * // Thread utilization tracking * std::unordered_map threadWork; - * + * * eventBus->subscribe([&threadWork](const auto& event) { * threadWork[event.threadId] = event.node.getData()->name; - * LOG_DEBUG("Thread {} executing: {}", + * LOG_DEBUG("Thread {} executing: {}", * event.threadId, event.node.getData()->name); * }); * @endcode */ -struct NodeExecutingEvent : WorkGraphEvent { - NodeHandle node; ///< The node being executed - size_t threadId; ///< Which worker thread is running it - - NodeExecutingEvent(const WorkGraph* g, NodeHandle n, size_t tid = 0) - : WorkGraphEvent(g), node(n), threadId(tid) {} +struct NodeExecutingEvent : WorkGraphEvent +{ + NodeHandle node; ///< The node being executed + size_t threadId; ///< Which worker thread is running it + + NodeExecutingEvent(const WorkGraph* g, NodeHandle n, size_t tid = 0) : WorkGraphEvent(g), node(n), threadId(tid) {} }; /** * @brief Work queue is full - this node has to wait - * + * * When the WorkContractGroup is at capacity, ready nodes get deferred instead * of dropped. This event lets you monitor back-pressure in your system. High * deferral rates suggest you need more worker threads or smaller work items. - * + * * @code * eventBus->subscribe([](const auto& event) { * LOG_WARN("Node {} deferred - queue depth: {}", * event.node.getData()->name, event.queueDepth); - * + * * if (event.queueDepth > 100) { * LOG_ERROR("Severe backlog detected!"); * } * }); * @endcode */ -struct NodeDeferredEvent : WorkGraphEvent { - NodeHandle node; ///< The node that couldn't be scheduled - size_t queueDepth; ///< How many nodes are waiting in deferred queue - - NodeDeferredEvent(const WorkGraph* g, NodeHandle n, size_t depth) - : WorkGraphEvent(g), node(n), queueDepth(depth) {} +struct NodeDeferredEvent : WorkGraphEvent +{ + NodeHandle node; ///< The node that couldn't be scheduled + size_t queueDepth; ///< How many nodes are waiting in deferred queue + + NodeDeferredEvent(const WorkGraph* g, NodeHandle n, size_t depth) : WorkGraphEvent(g), node(n), queueDepth(depth) {} }; /** * @brief The starting gun - workflow execution begins! - * + * * Fired when execute() is called. Gives you the big picture: how many total * nodes need to run and how many root nodes (no dependencies) are kicking * things off. Perfect for initializing progress tracking. - * + * * @code * eventBus->subscribe([](const auto& event) { * LOG_INFO("Starting workflow with {} nodes ({} roots)", @@ -344,102 +350,104 @@ struct NodeDeferredEvent : WorkGraphEvent { * }); * @endcode */ -struct GraphExecutionStartedEvent : WorkGraphEvent { - size_t totalNodes; ///< How many nodes in the entire graph - size_t rootNodes; ///< How many have no dependencies - +struct GraphExecutionStartedEvent : WorkGraphEvent +{ + size_t totalNodes; ///< How many nodes in the entire graph + size_t rootNodes; ///< How many have no dependencies + GraphExecutionStartedEvent(const WorkGraph* g, size_t total, size_t roots) : WorkGraphEvent(g), totalNodes(total), rootNodes(roots) {} }; /** * @brief The finish line - all nodes have reached terminal states - * + * * This fires when the entire graph is done, whether successfully or not. * The stats snapshot gives you the complete picture: successes, failures, * cancellations, and performance metrics. This is your post-mortem data. - * + * * @code * eventBus->subscribe([](const auto& event) { * const auto& stats = event.stats; * LOG_INFO("Workflow complete: {} succeeded, {} failed, {} cancelled", * stats.completedNodes, stats.failedNodes, stats.cancelledNodes); - * + * * auto seconds = std::chrono::duration_cast( * stats.totalExecutionTime).count(); * LOG_INFO("Total execution time: {}s", seconds); - * + * * if (stats.failedNodes > 0) { * LOG_ERROR("Workflow had failures!"); * } * }); * @endcode */ -struct GraphExecutionCompletedEvent : WorkGraphEvent { +struct GraphExecutionCompletedEvent : WorkGraphEvent +{ WorkGraphStats::Snapshot stats; ///< Final statistics for the run - - GraphExecutionCompletedEvent(const WorkGraph* g, const WorkGraphStats::Snapshot& s) - : WorkGraphEvent(g), stats(s) {} + + GraphExecutionCompletedEvent(const WorkGraph* g, const WorkGraphStats::Snapshot& s) : WorkGraphEvent(g), stats(s) {} }; /** * @brief One step closer - a parent completed and child's dependency count dropped - * + * * This granular event fires each time a parent node completes and decrements * a child's dependency counter. When remainingDependencies reaches zero, the * child becomes ready. Useful for understanding the cascade of execution. - * + * * @code * eventBus->subscribe([](const auto& event) { * LOG_DEBUG("{} completed, {} now has {} dependencies left", * event.from.getData()->name, * event.to.getData()->name, * event.remainingDependencies); - * + * * if (event.remainingDependencies == 0) { * LOG_INFO("{} is now ready!", event.to.getData()->name); * } * }); * @endcode */ -struct DependencyResolvedEvent : WorkGraphEvent { - NodeHandle from; ///< Parent that just completed - NodeHandle to; ///< Child being notified - uint32_t remainingDependencies; ///< How many more parents must complete - +struct DependencyResolvedEvent : WorkGraphEvent +{ + NodeHandle from; ///< Parent that just completed + NodeHandle to; ///< Child being notified + uint32_t remainingDependencies; ///< How many more parents must complete + DependencyResolvedEvent(const WorkGraph* g, NodeHandle parent, NodeHandle child, uint32_t remaining) : WorkGraphEvent(g), from(parent), to(child), remainingDependencies(remaining) {} }; /** * @brief Periodic health check - current graph statistics - * + * * Some implementations fire this periodically during execution to provide * real-time monitoring without needing to poll getStats(). The frequency * depends on the implementation. Useful for dashboards and progress bars. - * + * * @code * // Real-time progress monitoring * eventBus->subscribe([](const auto& event) { * const auto& stats = event.stats; - * float progress = (float)(stats.completedNodes + stats.failedNodes + + * float progress = (float)(stats.completedNodes + stats.failedNodes + * stats.cancelledNodes) / stats.totalNodes * 100; - * + * * updateProgressBar(progress); * updateStatusText("Running: {}, Queued: {}, Complete: {}", - * stats.executingNodes, + * stats.executingNodes, * stats.scheduledNodes, * stats.completedNodes); * }); * @endcode */ -struct GraphStatsEvent : WorkGraphEvent { +struct GraphStatsEvent : WorkGraphEvent +{ WorkGraphStats::Snapshot stats; ///< Current statistics snapshot - - GraphStatsEvent(const WorkGraph* g, const WorkGraphStats::Snapshot& s) - : WorkGraphEvent(g), stats(s) {} + + GraphStatsEvent(const WorkGraph* g, const WorkGraphStats::Snapshot& s) : WorkGraphEvent(g), stats(s) {} }; -} // namespace Concurrency -} // namespace Core -} // namespace EntropyEngine \ No newline at end of file +} // namespace Concurrency +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Concurrency/WorkGraphTypes.h b/src/Concurrency/WorkGraphTypes.h index 546bb04..0287c3b 100644 --- a/src/Concurrency/WorkGraphTypes.h +++ b/src/Concurrency/WorkGraphTypes.h @@ -10,7 +10,7 @@ /** * @file WorkGraphTypes.h * @brief Common types and enums for the WorkGraph system - the vocabulary of task orchestration - * + * * This file contains all the type definitions, enums, and configuration structures used * throughout the WorkGraph system. It's kept separate to avoid circular dependencies * and to provide a clean, central place for all the types you'll work with. @@ -18,411 +18,434 @@ #pragma once +#include #include #include #include -#include #include + #include "../Graph/AcyclicNodeHandle.h" -namespace EntropyEngine { -namespace Core { - // Forward declaration - class EventBus; - -namespace Concurrency { - - // Forward declarations - struct WorkGraphNode; - class WorkGraph; - class WorkContractGroup; - class NodeScheduler; - class NodeStateManager; +namespace EntropyEngine +{ +namespace Core +{ +// Forward declaration +class EventBus; - /** - * @brief Defines where a work contract can be executed - * - * Choose AnyThread for CPU-bound work that can run anywhere. Choose MainThread - * for UI updates, OpenGL calls, or other operations that must run on a specific - * thread. Dependencies work seamlessly across execution types. - * - * @code - * // Background processing - * auto compute = graph.addNode([]{ heavyComputation(); }, - * "compute", nullptr, ExecutionType::AnyThread); - * - * // UI update that depends on computation - * auto update = graph.addNode([]{ updateProgressBar(); }, - * "update-ui", nullptr, ExecutionType::MainThread); - * - * graph.addDependency(compute, update); // UI waits for computation - * @endcode - */ - enum class ExecutionType : uint8_t { - AnyThread = 0, ///< Runs on any worker thread from the pool - MainThread = 1 ///< Must run on the main/UI thread - }; - - /** - * @brief The lifecycle states of a task node - from birth to completion - * - * Every node in your WorkGraph moves through these states as it progresses from - * "waiting for parents" to "done". The transitions are carefully controlled to - * ensure thread safety and proper dependency management. - * - * State flow: - * - Pending → Ready (when all dependencies complete) - * - Ready → Scheduled (when submitted to thread pool) - * - Scheduled → Executing (when thread picks it up) - * - Executing → Completed/Failed/Yielded (based on return value or exceptions) - * - Yielded → Ready (for rescheduling) - * - Any state → Cancelled (if parent fails) - * - * Terminal states (Completed, Failed, Cancelled) are final - no further transitions. - */ - enum class NodeState : uint8_t { - Pending = 0, ///< Waiting for dependencies - can't run yet - Ready = 1, ///< All dependencies satisfied, waiting for thread - Scheduled = 2, ///< Submitted to WorkContractGroup, in queue - Executing = 3, ///< Currently running on a worker thread - Completed = 4, ///< Finished successfully - triggered children - Failed = 5, ///< Exception thrown - children will be cancelled - Cancelled = 6, ///< Skipped due to parent failure - never ran - Yielded = 7 ///< Suspended execution, will be rescheduled - }; - - /** - * @brief Return value from yieldable work functions - * - * Work functions can now return a status to control their execution flow. - * Complete means the work is done, Yield means suspend and reschedule later. - * This enables coroutine-like behavior without actual C++ coroutines. - * - * @code - * auto node = graph.addYieldableNode([]() -> WorkResultContext { - * if (!dataReady()) { - * return WorkResultContext::yield(); // Try again later - * } - * processData(); - * return WorkResultContext::complete(); - * }); - * @endcode - */ - enum class WorkResult : uint8_t { - Complete = 0, ///< Work is done, proceed to completion - Yield = 1, ///< Suspend and reschedule immediately for later execution - YieldUntil = 2 ///< Suspend and reschedule at specific time (use WorkResultContext) - }; +namespace Concurrency +{ + +// Forward declarations +struct WorkGraphNode; +class WorkGraph; +class WorkContractGroup; +class NodeScheduler; +class NodeStateManager; + +/** + * @brief Defines where a work contract can be executed + * + * Choose AnyThread for CPU-bound work that can run anywhere. Choose MainThread + * for UI updates, OpenGL calls, or other operations that must run on a specific + * thread. Dependencies work seamlessly across execution types. + * + * @code + * // Background processing + * auto compute = graph.addNode([]{ heavyComputation(); }, + * "compute", nullptr, ExecutionType::AnyThread); + * + * // UI update that depends on computation + * auto update = graph.addNode([]{ updateProgressBar(); }, + * "update-ui", nullptr, ExecutionType::MainThread); + * + * graph.addDependency(compute, update); // UI waits for computation + * @endcode + */ +enum class ExecutionType : uint8_t +{ + AnyThread = 0, ///< Runs on any worker thread from the pool + MainThread = 1 ///< Must run on the main/UI thread +}; + +/** + * @brief The lifecycle states of a task node - from birth to completion + * + * Every node in your WorkGraph moves through these states as it progresses from + * "waiting for parents" to "done". The transitions are carefully controlled to + * ensure thread safety and proper dependency management. + * + * State flow: + * - Pending → Ready (when all dependencies complete) + * - Ready → Scheduled (when submitted to thread pool) + * - Scheduled → Executing (when thread picks it up) + * - Executing → Completed/Failed/Yielded (based on return value or exceptions) + * - Yielded → Ready (for rescheduling) + * - Any state → Cancelled (if parent fails) + * + * Terminal states (Completed, Failed, Cancelled) are final - no further transitions. + */ +enum class NodeState : uint8_t +{ + Pending = 0, ///< Waiting for dependencies - can't run yet + Ready = 1, ///< All dependencies satisfied, waiting for thread + Scheduled = 2, ///< Submitted to WorkContractGroup, in queue + Executing = 3, ///< Currently running on a worker thread + Completed = 4, ///< Finished successfully - triggered children + Failed = 5, ///< Exception thrown - children will be cancelled + Cancelled = 6, ///< Skipped due to parent failure - never ran + Yielded = 7 ///< Suspended execution, will be rescheduled +}; + +/** + * @brief Return value from yieldable work functions + * + * Work functions can now return a status to control their execution flow. + * Complete means the work is done, Yield means suspend and reschedule later. + * This enables coroutine-like behavior without actual C++ coroutines. + * + * @code + * auto node = graph.addYieldableNode([]() -> WorkResultContext { + * if (!dataReady()) { + * return WorkResultContext::yield(); // Try again later + * } + * processData(); + * return WorkResultContext::complete(); + * }); + * @endcode + */ +enum class WorkResult : uint8_t +{ + Complete = 0, ///< Work is done, proceed to completion + Yield = 1, ///< Suspend and reschedule immediately for later execution + YieldUntil = 2 ///< Suspend and reschedule at specific time (use WorkResultContext) +}; + +/** + * @brief Extended result context for yieldable work functions with timing support + * + * Provides fine-grained control over when a yielded node should be rescheduled. + * Use the static factory methods for convenience. + * + * @code + * // Immediate reschedule (old behavior) + * return WorkResultContext::yield(); + * + * // Timed reschedule (new - for timers, polling, etc.) + * auto wakeTime = std::chrono::steady_clock::now() + std::chrono::seconds(5); + * return WorkResultContext::yieldUntil(wakeTime); + * + * // Completion + * return WorkResultContext::complete(); + * @endcode + */ +struct WorkResultContext +{ + WorkResult result = WorkResult::Complete; + std::optional wakeTime; + + /// Creates a completion result + static WorkResultContext complete() { + return {WorkResult::Complete, std::nullopt}; + } + + /// Creates an immediate yield result (reschedule ASAP) + static WorkResultContext yield() { + return {WorkResult::Yield, std::nullopt}; + } /** - * @brief Extended result context for yieldable work functions with timing support + * @brief Creates a timed yield result (reschedule at specific time) * - * Provides fine-grained control over when a yielded node should be rescheduled. - * Use the static factory methods for convenience. + * Defers node execution until the specified wake time without consuming CPU resources. + * The node will be rescheduled for execution when the wake time is reached. * - * @code - * // Immediate reschedule (old behavior) - * return WorkResultContext::yield(); + * @param when The time point when the node should be rescheduled * - * // Timed reschedule (new - for timers, polling, etc.) - * auto wakeTime = std::chrono::steady_clock::now() + std::chrono::seconds(5); - * return WorkResultContext::yieldUntil(wakeTime); + * @note If the wake time is in the past or current time, the system treats it as + * an immediate yield (equivalent to calling yield()), rescheduling as soon + * as possible rather than failing or blocking. * - * // Completion - * return WorkResultContext::complete(); - * @endcode + * @return WorkResultContext configured for timed yield */ - struct WorkResultContext { - WorkResult result = WorkResult::Complete; - std::optional wakeTime; - - /// Creates a completion result - static WorkResultContext complete() { - return {WorkResult::Complete, std::nullopt}; - } - - /// Creates an immediate yield result (reschedule ASAP) - static WorkResultContext yield() { - return {WorkResult::Yield, std::nullopt}; - } - - /** - * @brief Creates a timed yield result (reschedule at specific time) - * - * Defers node execution until the specified wake time without consuming CPU resources. - * The node will be rescheduled for execution when the wake time is reached. - * - * @param when The time point when the node should be rescheduled - * - * @note If the wake time is in the past or current time, the system treats it as - * an immediate yield (equivalent to calling yield()), rescheduling as soon - * as possible rather than failing or blocking. - * - * @return WorkResultContext configured for timed yield - */ - static WorkResultContext yieldUntil(std::chrono::steady_clock::time_point when) { - return {WorkResult::YieldUntil, when}; - } - }; - - /** - * @brief The final verdict on how a node's execution went - * - * Simple enum to categorize the outcome of node execution. Used in callbacks - * and events to communicate results without needing to check multiple states. - */ - enum class ExecutionResult : uint8_t { - Success = 0, ///< Work function completed without throwing - Failed = 1, ///< Work function threw an exception - Cancelled = 2, ///< Never ran due to parent failure - Skipped = 3 ///< Skipped for other reasons (reserved for future use) - }; - - /** - * @brief Configuration parameters and feature toggles for WorkGraph instances - * - * WorkGraph supports various complexity levels from minimal to feature-rich configurations. - * This structure provides control over optional features and tuning parameters. - * Default values provide a lightweight graph configuration. - * - * The configuration allows customization of graph behavior through feature enablement, - * tuning, and resource management options to match specific use case requirements. - * - * @code - * // Minimal config for simplified setup - * WorkGraphConfig fastConfig; - * fastConfig.expectedNodeCount = 1000; // Pre-allocate storage - * - * // Rich config for debugging and monitoring - * WorkGraphConfig debugConfig; - * debugConfig.enableEvents = true; - * debugConfig.enableDebugLogging = true; - * debugConfig.enableDebugRegistration = true; - * - * // Custom memory management - * WorkGraphConfig customConfig; - * customConfig.nodeAllocator = myPoolAllocator; - * customConfig.nodeDeallocator = myPoolDeallocator; - * @endcode - */ - struct WorkGraphConfig { - /// Enable event bus for this graph - enables monitoring - bool enableEvents = false; - - /// Enable advanced state management - for complex workflows with state machines - bool enableStateManager = false; - - /// Enable advanced scheduling features - priority queues, affinity, etc. - bool enableAdvancedScheduling = false; - - /// Expected number of nodes (for pre-allocation) - avoids reallocation during execution - size_t expectedNodeCount = 16; - - /// Maximum deferred queue size (0 = unlimited) - prevents unbounded memory growth - size_t maxDeferredNodes = 0; // Unlimited by default - - /// Maximum iterations when processing deferred nodes - controls how aggressively we fill capacity - /// Higher values ensure more aggressive filling of available capacity - size_t maxDeferredProcessingIterations = 10; - - /// Enable debug logging - verbose output for troubleshooting - bool enableDebugLogging = false; - - /// Enable debug registration - makes graph visible in debug tools - bool enableDebugRegistration = false; - - /// Use shared event bus instead of creating own - for system-wide event correlation - std::shared_ptr sharedEventBus = nullptr; - - /// Custom allocator for node storage - integrate with your memory system - std::function nodeAllocator = nullptr; - std::function nodeDeallocator = nullptr; - }; - + static WorkResultContext yieldUntil(std::chrono::steady_clock::time_point when) { + return {WorkResult::YieldUntil, when}; + } +}; + +/** + * @brief The final verdict on how a node's execution went + * + * Simple enum to categorize the outcome of node execution. Used in callbacks + * and events to communicate results without needing to check multiple states. + */ +enum class ExecutionResult : uint8_t +{ + Success = 0, ///< Work function completed without throwing + Failed = 1, ///< Work function threw an exception + Cancelled = 2, ///< Never ran due to parent failure + Skipped = 3 ///< Skipped for other reasons (reserved for future use) +}; + +/** + * @brief Configuration parameters and feature toggles for WorkGraph instances + * + * WorkGraph supports various complexity levels from minimal to feature-rich configurations. + * This structure provides control over optional features and tuning parameters. + * Default values provide a lightweight graph configuration. + * + * The configuration allows customization of graph behavior through feature enablement, + * tuning, and resource management options to match specific use case requirements. + * + * @code + * // Minimal config for simplified setup + * WorkGraphConfig fastConfig; + * fastConfig.expectedNodeCount = 1000; // Pre-allocate storage + * + * // Rich config for debugging and monitoring + * WorkGraphConfig debugConfig; + * debugConfig.enableEvents = true; + * debugConfig.enableDebugLogging = true; + * debugConfig.enableDebugRegistration = true; + * + * // Custom memory management + * WorkGraphConfig customConfig; + * customConfig.nodeAllocator = myPoolAllocator; + * customConfig.nodeDeallocator = myPoolDeallocator; + * @endcode + */ +struct WorkGraphConfig +{ + /// Enable event bus for this graph - enables monitoring + bool enableEvents = false; + + /// Enable advanced state management - for complex workflows with state machines + bool enableStateManager = false; + + /// Enable advanced scheduling features - priority queues, affinity, etc. + bool enableAdvancedScheduling = false; + + /// Expected number of nodes (for pre-allocation) - avoids reallocation during execution + size_t expectedNodeCount = 16; + + /// Maximum deferred queue size (0 = unlimited) - prevents unbounded memory growth + size_t maxDeferredNodes = 0; // Unlimited by default + + /// Maximum iterations when processing deferred nodes - controls how aggressively we fill capacity + /// Higher values ensure more aggressive filling of available capacity + size_t maxDeferredProcessingIterations = 10; + + /// Enable debug logging - verbose output for troubleshooting + bool enableDebugLogging = false; + + /// Enable debug registration - makes graph visible in debug tools + bool enableDebugRegistration = false; + + /// Use shared event bus instead of creating own - for system-wide event correlation + std::shared_ptr sharedEventBus = nullptr; + + /// Custom allocator for node storage - integrate with your memory system + std::function nodeAllocator = nullptr; + std::function nodeDeallocator = nullptr; +}; + +/** + * @brief Real-time metrics for your executing workflow - watch the action unfold + * + * These statistics are updated atomically as your graph executes, giving you + * a live view of what's happening. Since multiple threads update these counters + * simultaneously, they might be slightly inconsistent if read individually. + * + * That's why we provide toSnapshot() - it grabs all values at once for a + * coherent view. Perfect for progress bars, dashboards, or post-mortem analysis. + * + * Memory usage tracking helps you understand the footprint of large workflows. + * Execution time is wall-clock time from first node start to last node finish. + */ +struct WorkGraphStats +{ + std::atomic totalNodes{0}; ///< How many nodes exist in the graph + std::atomic completedNodes{0}; ///< Successfully finished nodes + std::atomic failedNodes{0}; ///< Nodes that threw exceptions + std::atomic cancelledNodes{0}; ///< Nodes skipped due to parent failure + std::atomic pendingNodes{0}; ///< Waiting for dependencies + std::atomic readyNodes{0}; ///< Ready but not yet scheduled + std::atomic scheduledNodes{0}; ///< In the work queue + std::atomic executingNodes{0}; ///< Currently running + + std::atomic memoryUsage{0}; ///< Approximate memory consumption + std::chrono::steady_clock::duration totalExecutionTime{}; ///< Total wall time + /** - * @brief Real-time metrics for your executing workflow - watch the action unfold - * - * These statistics are updated atomically as your graph executes, giving you - * a live view of what's happening. Since multiple threads update these counters - * simultaneously, they might be slightly inconsistent if read individually. - * - * That's why we provide toSnapshot() - it grabs all values at once for a - * coherent view. Perfect for progress bars, dashboards, or post-mortem analysis. - * - * Memory usage tracking helps you understand the footprint of large workflows. - * Execution time is wall-clock time from first node start to last node finish. + * @brief Frozen moment in time - all stats captured atomically + * + * Since the live stats change constantly, this snapshot gives you a + * consistent view where all numbers add up correctly. The individual + * atomic loads use relaxed ordering for speed - we don't need strict + * ordering since we're just reading counters. */ - struct WorkGraphStats { - std::atomic totalNodes{0}; ///< How many nodes exist in the graph - std::atomic completedNodes{0}; ///< Successfully finished nodes - std::atomic failedNodes{0}; ///< Nodes that threw exceptions - std::atomic cancelledNodes{0}; ///< Nodes skipped due to parent failure - std::atomic pendingNodes{0}; ///< Waiting for dependencies - std::atomic readyNodes{0}; ///< Ready but not yet scheduled - std::atomic scheduledNodes{0}; ///< In the work queue - std::atomic executingNodes{0}; ///< Currently running - - std::atomic memoryUsage{0}; ///< Approximate memory consumption - std::chrono::steady_clock::duration totalExecutionTime{}; ///< Total wall time - - /** - * @brief Frozen moment in time - all stats captured atomically - * - * Since the live stats change constantly, this snapshot gives you a - * consistent view where all numbers add up correctly. The individual - * atomic loads use relaxed ordering for speed - we don't need strict - * ordering since we're just reading counters. - */ - struct Snapshot { - uint32_t totalNodes = 0; - uint32_t completedNodes = 0; - uint32_t failedNodes = 0; - uint32_t cancelledNodes = 0; - uint32_t pendingNodes = 0; - uint32_t readyNodes = 0; - uint32_t scheduledNodes = 0; - uint32_t executingNodes = 0; - size_t memoryUsage = 0; - std::chrono::steady_clock::duration totalExecutionTime{}; - }; - - /** - * @brief Capture all statistics in one consistent snapshot - * @return Snapshot with all counters captured atomically - */ - Snapshot toSnapshot() const { - Snapshot snap; - snap.totalNodes = totalNodes.load(std::memory_order_relaxed); - snap.completedNodes = completedNodes.load(std::memory_order_relaxed); - snap.failedNodes = failedNodes.load(std::memory_order_relaxed); - snap.cancelledNodes = cancelledNodes.load(std::memory_order_relaxed); - snap.pendingNodes = pendingNodes.load(std::memory_order_relaxed); - snap.readyNodes = readyNodes.load(std::memory_order_relaxed); - snap.scheduledNodes = scheduledNodes.load(std::memory_order_relaxed); - snap.executingNodes = executingNodes.load(std::memory_order_relaxed); - snap.memoryUsage = memoryUsage.load(std::memory_order_relaxed); - snap.totalExecutionTime = totalExecutionTime; - return snap; - } + struct Snapshot + { + uint32_t totalNodes = 0; + uint32_t completedNodes = 0; + uint32_t failedNodes = 0; + uint32_t cancelledNodes = 0; + uint32_t pendingNodes = 0; + uint32_t readyNodes = 0; + uint32_t scheduledNodes = 0; + uint32_t executingNodes = 0; + size_t memoryUsage = 0; + std::chrono::steady_clock::duration totalExecutionTime{}; }; - - // Type aliases for clarity - the common types you'll use everywhere - using NodeHandle = Graph::AcyclicNodeHandle; ///< How you reference nodes - using NodeCallback = std::function; ///< Callbacks that receive nodes - using WorkFunction = std::function; ///< The actual work to execute (legacy) - using YieldableWorkFunction = std::function; ///< Work that can yield/suspend with timing - using CompletionCallback = std::function; ///< Notified when work completes - + /** - * @brief Check if a node has reached the end of its journey - * - * Terminal states (Completed, Failed, Cancelled) are final. - * Yielded is NOT terminal - the node will resume execution. - * - * @param state The state to check - * @return true if this is a final state + * @brief Capture all statistics in one consistent snapshot + * @return Snapshot with all counters captured atomically */ - inline constexpr bool isTerminalState(NodeState state) { - return state == NodeState::Completed || - state == NodeState::Failed || - state == NodeState::Cancelled; + Snapshot toSnapshot() const { + Snapshot snap; + snap.totalNodes = totalNodes.load(std::memory_order_relaxed); + snap.completedNodes = completedNodes.load(std::memory_order_relaxed); + snap.failedNodes = failedNodes.load(std::memory_order_relaxed); + snap.cancelledNodes = cancelledNodes.load(std::memory_order_relaxed); + snap.pendingNodes = pendingNodes.load(std::memory_order_relaxed); + snap.readyNodes = readyNodes.load(std::memory_order_relaxed); + snap.scheduledNodes = scheduledNodes.load(std::memory_order_relaxed); + snap.executingNodes = executingNodes.load(std::memory_order_relaxed); + snap.memoryUsage = memoryUsage.load(std::memory_order_relaxed); + snap.totalExecutionTime = totalExecutionTime; + return snap; } - - /** - * @brief Validates state transitions - prevents impossible state changes - * - * Valid transitions: - * - Pending → Ready or Cancelled - * - Ready → Scheduled or Cancelled - * - Scheduled → Executing or Cancelled - * - Executing → Completed, Failed, or Yielded - * - Yielded → Ready (for rescheduling) - * - Terminal states → Nothing - * - * @param from Current state - * @param to Desired new state - * @return true if the transition is legal - */ - inline constexpr bool isValidTransition(NodeState from, NodeState to) { - // Terminal states cannot transition - if (isTerminalState(from)) { +}; + +// Type aliases for clarity - the common types you'll use everywhere +using NodeHandle = Graph::AcyclicNodeHandle; ///< How you reference nodes +using NodeCallback = std::function; ///< Callbacks that receive nodes +using WorkFunction = std::function; ///< The actual work to execute (legacy) +using YieldableWorkFunction = std::function; ///< Work that can yield/suspend with timing +using CompletionCallback = std::function; ///< Notified when work completes + +/** + * @brief Check if a node has reached the end of its journey + * + * Terminal states (Completed, Failed, Cancelled) are final. + * Yielded is NOT terminal - the node will resume execution. + * + * @param state The state to check + * @return true if this is a final state + */ +inline constexpr bool isTerminalState(NodeState state) { + return state == NodeState::Completed || state == NodeState::Failed || state == NodeState::Cancelled; +} + +/** + * @brief Validates state transitions - prevents impossible state changes + * + * Valid transitions: + * - Pending → Ready or Cancelled + * - Ready → Scheduled or Cancelled + * - Scheduled → Executing or Cancelled + * - Executing → Completed, Failed, or Yielded + * - Yielded → Ready (for rescheduling) + * - Terminal states → Nothing + * + * @param from Current state + * @param to Desired new state + * @return true if the transition is legal + */ +inline constexpr bool isValidTransition(NodeState from, NodeState to) { + // Terminal states cannot transition + if (isTerminalState(from)) { + return false; + } + + // Define valid transitions + switch (from) { + case NodeState::Pending: + return to == NodeState::Ready || to == NodeState::Cancelled; + + case NodeState::Ready: + return to == NodeState::Scheduled || to == NodeState::Cancelled; + + case NodeState::Scheduled: + return to == NodeState::Executing || to == NodeState::Cancelled; + + case NodeState::Executing: + return to == NodeState::Completed || to == NodeState::Failed || to == NodeState::Yielded; + + case NodeState::Yielded: + return to == NodeState::Ready || to == NodeState::Cancelled; + + default: return false; - } - - // Define valid transitions - switch (from) { - case NodeState::Pending: - return to == NodeState::Ready || to == NodeState::Cancelled; - - case NodeState::Ready: - return to == NodeState::Scheduled || to == NodeState::Cancelled; - - case NodeState::Scheduled: - return to == NodeState::Executing || to == NodeState::Cancelled; - - case NodeState::Executing: - return to == NodeState::Completed || to == NodeState::Failed || to == NodeState::Yielded; - - case NodeState::Yielded: - return to == NodeState::Ready || to == NodeState::Cancelled; - - default: - return false; - } } - - /** - * @brief Human-readable state names for logging and debugging - * - * @param state The state to stringify - * @return Static string representation (no allocation) - * - * @code - * LOG_DEBUG("Node {} transitioned to {}", - * node.getData()->name, - * nodeStateToString(node.getData()->state)); - * @endcode - */ - inline const char* nodeStateToString(NodeState state) { - switch (state) { - case NodeState::Pending: return "Pending"; - case NodeState::Ready: return "Ready"; - case NodeState::Scheduled: return "Scheduled"; - case NodeState::Executing: return "Executing"; - case NodeState::Completed: return "Completed"; - case NodeState::Failed: return "Failed"; - case NodeState::Cancelled: return "Cancelled"; - case NodeState::Yielded: return "Yielded"; - default: return "Unknown"; - } +} + +/** + * @brief Human-readable state names for logging and debugging + * + * @param state The state to stringify + * @return Static string representation (no allocation) + * + * @code + * LOG_DEBUG("Node {} transitioned to {}", + * node.getData()->name, + * nodeStateToString(node.getData()->state)); + * @endcode + */ +inline const char* nodeStateToString(NodeState state) { + switch (state) { + case NodeState::Pending: + return "Pending"; + case NodeState::Ready: + return "Ready"; + case NodeState::Scheduled: + return "Scheduled"; + case NodeState::Executing: + return "Executing"; + case NodeState::Completed: + return "Completed"; + case NodeState::Failed: + return "Failed"; + case NodeState::Cancelled: + return "Cancelled"; + case NodeState::Yielded: + return "Yielded"; + default: + return "Unknown"; } - -} // namespace Concurrency -} // namespace Core -} // namespace EntropyEngine +} + +} // namespace Concurrency +} // namespace Core +} // namespace EntropyEngine // Hash specialization for NodeHandle - must be defined before any use in unordered_map -namespace std { - /** - * @brief Allows NodeHandle to be used as key in unordered_map/unordered_set - * - * This specialization enables you to use NodeHandle as a key in hash-based - * containers. We hash based on the internal pointer, which is unique per node. - * - * @code - * std::unordered_map nodePriorities; - * nodePriorities[myNode] = 5; // Works thanks to this specialization - * @endcode - */ - template<> - struct hash { - size_t operator()(const EntropyEngine::Core::Concurrency::NodeHandle& handle) const { - // Combine owner pointer and packed handle id (index:generation) for stable identity - auto owner = reinterpret_cast(handle.handleOwner()); - uint64_t id = handle.handleId(); - // Fowler–Noll–Vo or a simple mix; keep it simple and fast - uint64_t mixed = static_cast(owner) ^ (id + 0x9E3779B97F4A7C15ULL + (static_cast(owner) << 6) + (static_cast(owner) >> 2)); - return std::hash{}(mixed); - } - }; -} \ No newline at end of file +namespace std +{ +/** + * @brief Allows NodeHandle to be used as key in unordered_map/unordered_set + * + * This specialization enables you to use NodeHandle as a key in hash-based + * containers. We hash based on the internal pointer, which is unique per node. + * + * @code + * std::unordered_map nodePriorities; + * nodePriorities[myNode] = 5; // Works thanks to this specialization + * @endcode + */ +template <> +struct hash +{ + size_t operator()(const EntropyEngine::Core::Concurrency::NodeHandle& handle) const { + // Combine owner pointer and packed handle id (index:generation) for stable identity + auto owner = reinterpret_cast(handle.handleOwner()); + uint64_t id = handle.handleId(); + // Fowler–Noll–Vo or a simple mix; keep it simple and fast + uint64_t mixed = + static_cast(owner) ^ + (id + 0x9E3779B97F4A7C15ULL + (static_cast(owner) << 6) + (static_cast(owner) >> 2)); + return std::hash{}(mixed); + } +}; +} // namespace std diff --git a/src/Concurrency/WorkService.cpp b/src/Concurrency/WorkService.cpp index ed8e154..8b8cb36 100644 --- a/src/Concurrency/WorkService.cpp +++ b/src/Concurrency/WorkService.cpp @@ -3,405 +3,401 @@ // #include "WorkService.h" -#include + #include +#include #include +#include "AdaptiveRankingScheduler.h" #include "WorkContractGroup.h" #include "WorkGraph.h" -#include "AdaptiveRankingScheduler.h" -namespace EntropyEngine { -namespace Core { -namespace Concurrency { - thread_local size_t WorkService::stSoftFailureCount = 0; - thread_local size_t WorkService::stThreadId = 0; +namespace EntropyEngine +{ +namespace Core +{ +namespace Concurrency +{ +thread_local size_t WorkService::stSoftFailureCount = 0; +thread_local size_t WorkService::stThreadId = 0; + +WorkService::WorkService(Config config, std::unique_ptr scheduler) : _config(config) { + // Always clamp to a range of 1 to hardware concurrency. + if (_config.threadCount == 0) { + _config.threadCount = std::thread::hardware_concurrency(); + } + _config.threadCount = std::clamp(_config.threadCount, (uint32_t)1, std::thread::hardware_concurrency()); - WorkService::WorkService(Config config, std::unique_ptr scheduler) - : _config(config) { + // Update scheduler config with thread count + _config.schedulerConfig.threadCount = _config.threadCount; - // Always clamp to a range of 1 to hardware concurrency. - if (_config.threadCount == 0) { - _config.threadCount = std::thread::hardware_concurrency(); - } - _config.threadCount = std::clamp(_config.threadCount, (uint32_t)1, std::thread::hardware_concurrency()); + // Create scheduler if not provided + if (!scheduler) { + _scheduler = std::make_unique(_config.schedulerConfig); + } else { + _scheduler = std::move(scheduler); + } +} - // Update scheduler config with thread count - _config.schedulerConfig.threadCount = _config.threadCount; +WorkService::~WorkService() { + stop(); + clear(); +} - // Create scheduler if not provided - if (!scheduler) { - _scheduler = std::make_unique(_config.schedulerConfig); - } else { - _scheduler = std::move(scheduler); - } +void WorkService::start() { + if (_running) { + return; // Already running } - WorkService::~WorkService() { - stop(); - clear(); + for (uint32_t i = 0; i < _config.threadCount; i++) { + _threads.emplace_back([this, threadId = i](const std::stop_token& stoken) { + stThreadId = threadId; + executeWork(stoken); + }); } - void WorkService::start() { - if (_running) { - return; // Already running - } - - for (uint32_t i = 0; i < _config.threadCount; i++) { - _threads.emplace_back([this, threadId = i](const std::stop_token& stoken) { - stThreadId = threadId; - executeWork(stoken); - }); - } + _running = true; +} - _running = true; +void WorkService::requestStop() { + for (auto& thread : _threads) { + thread.request_stop(); } - void WorkService::requestStop() { - for (auto &thread : _threads) { - thread.request_stop(); - } + // Wake up any threads waiting on the condition variable + _workAvailable = true; + _workAvailableCV.notify_all(); +} - // Wake up any threads waiting on the condition variable - _workAvailable = true; - _workAvailableCV.notify_all(); +void WorkService::waitForStop() { + for (auto& thread : _threads) { + if (thread.joinable()) { + thread.join(); + } } - void WorkService::waitForStop() { - for (auto &thread : _threads) { - if (thread.joinable()) { - thread.join(); - } - } + _threads.clear(); + _running = false; + + // Reset thread-local state after all threads have stopped + resetThreadLocalState(); +} + +void WorkService::stop() { + requestStop(); + waitForStop(); +} - _threads.clear(); - _running = false; +bool WorkService::isRunning() const { + return _running; +} - // Reset thread-local state after all threads have stopped - resetThreadLocalState(); +void WorkService::clear() { + std::unique_lock lock(_workContractGroupsMutex); + + // Release and disconnect all groups we retained on add + for (auto* group : _workContractGroups) { + if (group) { + // Clear back-reference so the group won't call back into us during destruction + group->setConcurrencyProvider(nullptr); + // Release the retain acquired in addWorkContractGroup + group->release(); + } } - void WorkService::stop() { - requestStop(); - waitForStop(); + _workContractGroups.clear(); + _workContractGroupCount = 0; + + // Notify scheduler + _scheduler->notifyGroupsChanged({}); + _scheduler->reset(); +} + +WorkService::GroupOperationStatus WorkService::addWorkContractGroup(WorkContractGroup* contractGroup) { + // This is generally MUCH simpler than the old atomic stuff. + // Old atomic tracking of the contract group had a bunch of epoch-based tracking that was very complex. + // Also required reclamation of vectors that honestly just isn't worth it on a cold path like this. + std::unique_lock lock(_workContractGroupsMutex); + + // Check for existence to prevent duplicates + if (std::find(_workContractGroups.begin(), _workContractGroups.end(), contractGroup) != _workContractGroups.end()) { + return GroupOperationStatus::Exists; } - bool WorkService::isRunning() const { - return _running; + // Add the group + _workContractGroups.push_back(contractGroup); + _workContractGroupCount++; + + // Retain the group while registered with the service (ref-counted semantics) + if (contractGroup) { + contractGroup->retain(); } - void WorkService::clear() { - std::unique_lock lock(_workContractGroupsMutex); + // Notify scheduler of group change + _scheduler->notifyGroupsChanged(_workContractGroups); - // Release and disconnect all groups we retained on add - for (auto* group : _workContractGroups) { - if (group) { - // Clear back-reference so the group won't call back into us during destruction - group->setConcurrencyProvider(nullptr); - // Release the retain acquired in addWorkContractGroup - group->release(); - } - } + // Set ourselves as the concurrency provider for this group + contractGroup->setConcurrencyProvider(this); - _workContractGroups.clear(); - _workContractGroupCount = 0; + return GroupOperationStatus::Added; +} - // Notify scheduler - _scheduler->notifyGroupsChanged({}); - _scheduler->reset(); - } +WorkService::GroupOperationStatus WorkService::removeWorkContractGroup(WorkContractGroup* contractGroup) { + // First, stop the group to prevent new work selection + // Workers will skip this group via isStopping() checks + contractGroup->stop(); - WorkService::GroupOperationStatus WorkService::addWorkContractGroup(WorkContractGroup* contractGroup) { - // This is generally MUCH simpler than the old atomic stuff. - // Old atomic tracking of the contract group had a bunch of epoch-based tracking that was very complex. - // Also required reclamation of vectors that honestly just isn't worth it on a cold path like this. + { std::unique_lock lock(_workContractGroupsMutex); - // Check for existence to prevent duplicates - if (std::find(_workContractGroups.begin(), _workContractGroups.end(), contractGroup) != _workContractGroups.end()) { - return GroupOperationStatus::Exists; + auto it = std::find(_workContractGroups.begin(), _workContractGroups.end(), contractGroup); + if (it == _workContractGroups.end()) { + return GroupOperationStatus::NotFound; } - // Add the group - _workContractGroups.push_back(contractGroup); - _workContractGroupCount++; - - // Retain the group while registered with the service (ref-counted semantics) - if (contractGroup) { - contractGroup->retain(); - } + // Remove the group from the list + _workContractGroups.erase(it); + _workContractGroupCount--; // Notify scheduler of group change _scheduler->notifyGroupsChanged(_workContractGroups); - // Set ourselves as the concurrency provider for this group - contractGroup->setConcurrencyProvider(this); - - return GroupOperationStatus::Added; + // Clear the concurrency provider for this group + contractGroup->setConcurrencyProvider(nullptr); } + // Lock released here - WorkService::GroupOperationStatus WorkService::removeWorkContractGroup(WorkContractGroup* contractGroup) { - // First, stop the group to prevent new work selection - // Workers will skip this group via isStopping() checks - contractGroup->stop(); - - { - std::unique_lock lock(_workContractGroupsMutex); + // Wait for any in-flight contract executions to complete + // This ensures no worker is actively using the group + contractGroup->wait(); - auto it = std::find(_workContractGroups.begin(), _workContractGroups.end(), contractGroup); - if (it == _workContractGroups.end()) { - return GroupOperationStatus::NotFound; - } + // Now safe to release our reference + contractGroup->release(); - // Remove the group from the list - _workContractGroups.erase(it); - _workContractGroupCount--; + return GroupOperationStatus::Removed; +} - // Notify scheduler of group change - _scheduler->notifyGroupsChanged(_workContractGroups); +size_t WorkService::getWorkContractGroupCount() const { + std::shared_lock lock(_workContractGroupsMutex); + return _workContractGroupCount; +} - // Clear the concurrency provider for this group - contractGroup->setConcurrencyProvider(nullptr); - } - // Lock released here +size_t WorkService::getThreadCount() const { + return _config.threadCount; +} - // Wait for any in-flight contract executions to complete - // This ensures no worker is actively using the group - contractGroup->wait(); +size_t WorkService::getSoftFailureCount() const { + return _config.maxSoftFailureCount; +} - // Now safe to release our reference - contractGroup->release(); - - return GroupOperationStatus::Removed; +size_t WorkService::setSoftFailureCount(size_t softFailureCount) { + if (softFailureCount != _config.maxSoftFailureCount) { + _config.maxSoftFailureCount = softFailureCount; } + return _config.maxSoftFailureCount; +} - size_t WorkService::getWorkContractGroupCount() const { - std::shared_lock lock(_workContractGroupsMutex); - return _workContractGroupCount; - } +size_t WorkService::getFailureSleepTime() const { + return _config.failureSleepTime; +} - size_t WorkService::getThreadCount() const { - return _config.threadCount; +size_t WorkService::setFailureSleepTime(size_t failureSleepTime) { + if (failureSleepTime != _config.failureSleepTime) { + _config.failureSleepTime = failureSleepTime; } - size_t WorkService::getSoftFailureCount() const { - return _config.maxSoftFailureCount; - } + return _config.failureSleepTime; +} - size_t WorkService::setSoftFailureCount(size_t softFailureCount) { - if (softFailureCount != _config.maxSoftFailureCount) { - _config.maxSoftFailureCount = softFailureCount; - } - return _config.maxSoftFailureCount; - } +void WorkService::executeWork(const std::stop_token& token) { + WorkContractGroup* lastExecutedGroup = nullptr; - size_t WorkService::getFailureSleepTime() const { - return _config.failureSleepTime; - } + while (!token.stop_requested()) { + WorkContractGroup* selectedGroup = nullptr; - size_t WorkService::setFailureSleepTime(size_t failureSleepTime) { - if (failureSleepTime != _config.failureSleepTime) { - _config.failureSleepTime = failureSleepTime; - } + // Hold shared_lock while reading from _workContractGroups + // Multiple workers can hold shared_lock concurrently + // removeWorkContractGroup() with unique_lock will wait for all readers + { + std::shared_lock lock(_workContractGroupsMutex); - return _config.failureSleepTime; - } + if (!_workContractGroups.empty()) { + // Create scheduling context + IWorkScheduler::SchedulingContext context{stThreadId, stSoftFailureCount, lastExecutedGroup}; + + // Ask scheduler for next group - reads directly from _workContractGroups + auto scheduleResult = _scheduler->selectNextGroup(_workContractGroups, context); - void WorkService::executeWork(const std::stop_token& token) { - WorkContractGroup* lastExecutedGroup = nullptr; - - while (!token.stop_requested()) { - WorkContractGroup* selectedGroup = nullptr; - - // Hold shared_lock while reading from _workContractGroups - // Multiple workers can hold shared_lock concurrently - // removeWorkContractGroup() with unique_lock will wait for all readers - { - std::shared_lock lock(_workContractGroupsMutex); - - if (!_workContractGroups.empty()) { - // Create scheduling context - IWorkScheduler::SchedulingContext context{ - stThreadId, - stSoftFailureCount, - lastExecutedGroup - }; - - // Ask scheduler for next group - reads directly from _workContractGroups - auto scheduleResult = _scheduler->selectNextGroup(_workContractGroups, context); - - // Select group if valid and not stopping - if (scheduleResult.group && !scheduleResult.group->isStopping()) { - selectedGroup = scheduleResult.group; - } + // Select group if valid and not stopping + if (scheduleResult.group && !scheduleResult.group->isStopping()) { + selectedGroup = scheduleResult.group; } } - // Shared lock released here - - if (!selectedGroup) { - // No work found - check for ready timers before sleeping - checkTimedDeferrals(); + } + // Shared lock released here + + if (!selectedGroup) { + // No work found - check for ready timers before sleeping + checkTimedDeferrals(); + + // Use condition variable for efficient waiting (100us timeout as safety valve) + std::unique_lock lock(_workAvailableMutex); + _workAvailable = false; + _workAvailableCV.wait_for(lock, std::chrono::microseconds(100), + [this, &token]() { return _workAvailable.load() || token.stop_requested(); }); + stSoftFailureCount = 0; + continue; + } - // Use condition variable for efficient waiting (100us timeout as safety valve) - std::unique_lock lock(_workAvailableMutex); - _workAvailable = false; - _workAvailableCV.wait_for(lock, std::chrono::microseconds(100), [this, &token]() { - return _workAvailable.load() || token.stop_requested(); - }); - stSoftFailureCount = 0; - continue; + // Try to get work from selected group + auto contract = selectedGroup->selectForExecution(); + if (contract.valid()) { + // Check stop token again before executing work to prevent deadlocks during shutdown + if (token.stop_requested()) { + // Abort without executing: transition Executing -> Free safely during shutdown + selectedGroup->abortExecution(contract); + break; } - // Try to get work from selected group - auto contract = selectedGroup->selectForExecution(); - if (contract.valid()) { - // Check stop token again before executing work to prevent deadlocks during shutdown - if (token.stop_requested()) { - // Abort without executing: transition Executing -> Free safely during shutdown - selectedGroup->abortExecution(contract); - break; - } + // Execute the work (includes all cleanup) + selectedGroup->executeContract(contract); - // Execute the work (includes all cleanup) - selectedGroup->executeContract(contract); + // Notify scheduler of successful execution + _scheduler->notifyWorkExecuted(selectedGroup, stThreadId); - // Notify scheduler of successful execution - _scheduler->notifyWorkExecuted(selectedGroup, stThreadId); + // Update tracking + lastExecutedGroup = selectedGroup; + stSoftFailureCount = 0; + } else { + stSoftFailureCount++; + if (stSoftFailureCount >= _config.maxSoftFailureCount) { + // Check for ready timers before sleeping + checkTimedDeferrals(); - // Update tracking - lastExecutedGroup = selectedGroup; + // Use condition variable for efficient waiting (1ms timeout as safety valve) + std::unique_lock lock(_workAvailableMutex); + _workAvailable = false; + _workAvailableCV.wait_for(lock, std::chrono::milliseconds(1), + [this, &token]() { return _workAvailable.load() || token.stop_requested(); }); stSoftFailureCount = 0; } else { - stSoftFailureCount++; - if (stSoftFailureCount >= _config.maxSoftFailureCount) { - // Check for ready timers before sleeping - checkTimedDeferrals(); - - // Use condition variable for efficient waiting (1ms timeout as safety valve) - std::unique_lock lock(_workAvailableMutex); - _workAvailable = false; - _workAvailableCV.wait_for(lock, std::chrono::milliseconds(1), [this, &token]() { - return _workAvailable.load() || token.stop_requested(); - }); - stSoftFailureCount = 0; - } else { - std::this_thread::yield(); - } + std::this_thread::yield(); } } } - - void WorkService::checkTimedDeferrals() { - // Check all work contract groups for ready timed deferrals - // WorkGraph overrides checkTimedDeferrals() to check its timer queue, - // while base WorkContractGroup returns 0 (no timers) - size_t totalScheduled = 0; - { - std::shared_lock lock(_workContractGroupsMutex); - for (auto* group : _workContractGroups) { - totalScheduled += group->checkTimedDeferrals(); - } - } - - // If any timers were scheduled, wake up waiting worker threads - if (totalScheduled > 0) { - notifyWorkAvailable(); +} + +void WorkService::checkTimedDeferrals() { + // Check all work contract groups for ready timed deferrals + // WorkGraph overrides checkTimedDeferrals() to check its timer queue, + // while base WorkContractGroup returns 0 (no timers) + size_t totalScheduled = 0; + { + std::shared_lock lock(_workContractGroupsMutex); + for (auto* group : _workContractGroups) { + totalScheduled += group->checkTimedDeferrals(); } } - void WorkService::notifyWorkAvailable(WorkContractGroup* group) { - // We don't need to track which group has work, just that work is available - _workAvailable = true; - _workAvailableCV.notify_one(); + // If any timers were scheduled, wake up waiting worker threads + if (totalScheduled > 0) { + notifyWorkAvailable(); } +} - void WorkService::notifyGroupDestroyed(WorkContractGroup* group) { - // When a group is destroyed, remove it without touching refcount to avoid - // releasing during its destructor. - std::unique_lock lock(_workContractGroupsMutex); +void WorkService::notifyWorkAvailable(WorkContractGroup* group) { + // We don't need to track which group has work, just that work is available + _workAvailable = true; + _workAvailableCV.notify_one(); +} - auto it = std::find(_workContractGroups.begin(), _workContractGroups.end(), group); - if (it != _workContractGroups.end()) { - _workContractGroups.erase(it); - _workContractGroupCount--; +void WorkService::notifyGroupDestroyed(WorkContractGroup* group) { + // When a group is destroyed, remove it without touching refcount to avoid + // releasing during its destructor. + std::unique_lock lock(_workContractGroupsMutex); - // Notify scheduler of group change - _scheduler->notifyGroupsChanged(_workContractGroups); + auto it = std::find(_workContractGroups.begin(), _workContractGroups.end(), group); + if (it != _workContractGroups.end()) { + _workContractGroups.erase(it); + _workContractGroupCount--; - // Clear the concurrency provider for this group (no release here) - group->setConcurrencyProvider(nullptr); - } - } + // Notify scheduler of group change + _scheduler->notifyGroupsChanged(_workContractGroups); - void WorkService::resetThreadLocalState() { - // This only resets the thread-local state in the calling thread, - // not in the worker threads. The worker threads reset their own - // state when they exit in the lambda function in start(). - stSoftFailureCount = 0; - stThreadId = 0; + // Clear the concurrency provider for this group (no release here) + group->setConcurrencyProvider(nullptr); + } +} + +void WorkService::resetThreadLocalState() { + // This only resets the thread-local state in the calling thread, + // not in the worker threads. The worker threads reset their own + // state when they exit in the lambda function in start(). + stSoftFailureCount = 0; + stThreadId = 0; +} + +WorkService::MainThreadWorkResult WorkService::executeMainThreadWork(size_t maxContracts) { + MainThreadWorkResult result{0, 0, false}; + + // Get current snapshot of groups + std::vector groups; + { + std::shared_lock lock(_workContractGroupsMutex); + groups = _workContractGroups; } - - WorkService::MainThreadWorkResult WorkService::executeMainThreadWork(size_t maxContracts) { - MainThreadWorkResult result{0, 0, false}; - // Get current snapshot of groups - std::vector groups; - { - std::shared_lock lock(_workContractGroupsMutex); - groups = _workContractGroups; - } + size_t remaining = maxContracts; - size_t remaining = maxContracts; + // Execute work from each group that has main thread work + for (auto* group : groups) { + if (group && group->hasMainThreadWork()) { + result.groupsWithWork++; + size_t executed = group->executeMainThreadWork(remaining); + result.contractsExecuted += executed; + remaining -= executed; - // Execute work from each group that has main thread work - for (auto* group : groups) { - if (group && group->hasMainThreadWork()) { - result.groupsWithWork++; - size_t executed = group->executeMainThreadWork(remaining); - result.contractsExecuted += executed; - remaining -= executed; - - // Stop if we've hit our limit - if (remaining == 0) { - result.moreWorkAvailable = true; - break; - } + // Stop if we've hit our limit + if (remaining == 0) { + result.moreWorkAvailable = true; + break; } } - - // Check if there's more work available - if (remaining > 0) { - for (auto* group : groups) { - if (group && group->hasMainThreadWork()) { - result.moreWorkAvailable = true; - break; - } + } + + // Check if there's more work available + if (remaining > 0) { + for (auto* group : groups) { + if (group && group->hasMainThreadWork()) { + result.moreWorkAvailable = true; + break; } } - - return result; } - - size_t WorkService::executeMainThreadWork(WorkContractGroup* group, size_t maxContracts) { - if (!group) { - return 0; - } - - return group->executeMainThreadWork(maxContracts); + + return result; +} + +size_t WorkService::executeMainThreadWork(WorkContractGroup* group, size_t maxContracts) { + if (!group) { + return 0; } - - bool WorkService::hasMainThreadWork() const { - std::shared_lock lock(_workContractGroupsMutex); - for (auto* group : _workContractGroups) { - if (group && group->hasMainThreadWork()) { - return true; - } + return group->executeMainThreadWork(maxContracts); +} + +bool WorkService::hasMainThreadWork() const { + std::shared_lock lock(_workContractGroupsMutex); + + for (auto* group : _workContractGroups) { + if (group && group->hasMainThreadWork()) { + return true; } - - return false; } -} // Concurrency -} // Core -} // EntropyEngine \ No newline at end of file + return false; +} + +} // namespace Concurrency +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Concurrency/WorkService.h b/src/Concurrency/WorkService.h index d48142d..b83ce50 100644 --- a/src/Concurrency/WorkService.h +++ b/src/Concurrency/WorkService.h @@ -18,21 +18,25 @@ */ #pragma once -#include -#include -#include #include -#include -#include #include #include -#include "IWorkScheduler.h" -#include "IConcurrencyProvider.h" +#include +#include +#include +#include +#include + #include "Core/EntropyService.h" +#include "IConcurrencyProvider.h" +#include "IWorkScheduler.h" -namespace EntropyEngine { -namespace Core { -namespace Concurrency { +namespace EntropyEngine +{ +namespace Core +{ +namespace Concurrency +{ class WorkContractGroup; /** @@ -82,15 +86,16 @@ class WorkContractGroup; * service.stop(); * @endcode */ -class WorkService : public IConcurrencyProvider, public ::EntropyEngine::Core::EntropyService { +class WorkService : public IConcurrencyProvider, public ::EntropyEngine::Core::EntropyService +{ // Shared mutex management of work contract groups // HOT PATH (executeWork): shared_lock for concurrent reads // COLD PATH (add/remove): unique_lock for exclusive writes mutable std::shared_mutex _workContractGroupsMutex; std::vector _workContractGroups; - size_t _workContractGroupCount = 0; ///< Current count of work contract groups - std::vector _threads; ///< Worker threads that execute contracts - std::unique_ptr _scheduler; ///< Scheduler strategy for selecting work groups + size_t _workContractGroupCount = 0; ///< Current count of work contract groups + std::vector _threads; ///< Worker threads that execute contracts + std::unique_ptr _scheduler; ///< Scheduler strategy for selecting work groups std::atomic _running = false; @@ -102,16 +107,17 @@ class WorkService : public IConcurrencyProvider, public ::EntropyEngine::Core::E public: /** * @brief Result structure for main thread work execution - * + * * Provides detailed information about the execution results to the caller, * allowing them to make informed decisions about scheduling and performance. */ - struct MainThreadWorkResult { - size_t contractsExecuted; ///< Number of contracts actually executed - size_t groupsWithWork; ///< Number of groups that had work available - bool moreWorkAvailable; ///< Whether there's more work that could be executed + struct MainThreadWorkResult + { + size_t contractsExecuted; ///< Number of contracts actually executed + size_t groupsWithWork; ///< Number of groups that had work available + bool moreWorkAvailable; ///< Whether there's more work that could be executed }; - + /** * @brief Configuration parameters for the work service. * @@ -119,13 +125,15 @@ class WorkService : public IConcurrencyProvider, public ::EntropyEngine::Core::E * work well for general-purpose work distribution, but you might want to adjust * them based on your use case. */ - struct Config { - uint32_t threadCount = 0; ///< Worker thread count - 0 means use all CPU cores - size_t maxSoftFailureCount = 5; ///< Number of times work selection is allowed to fail before sleeping. Yields after every failure. - size_t failureSleepTime = 1; ///< Sleep duration in nanoseconds when no work found - prevents CPU spinning + struct Config + { + uint32_t threadCount = 0; ///< Worker thread count - 0 means use all CPU cores + size_t maxSoftFailureCount = + 5; ///< Number of times work selection is allowed to fail before sleeping. Yields after every failure. + size_t failureSleepTime = 1; ///< Sleep duration in nanoseconds when no work found - prevents CPU spinning // Scheduler-specific configuration - IWorkScheduler::Config schedulerConfig; ///< Configuration passed to scheduler + IWorkScheduler::Config schedulerConfig; ///< Configuration passed to scheduler }; /** @@ -162,17 +170,31 @@ class WorkService : public IConcurrencyProvider, public ::EntropyEngine::Core::E */ ~WorkService(); - // EntropyService identity and lifecycle - const char* id() const override { return "com.entropy.core.work"; } - const char* name() const override { return "WorkService"; } - const char* version() const override { return "0.1.0"; } - // RTTI-less static type identity and dependencies - TypeSystem::TypeID typeId() const override { return TypeSystem::createTypeId(); } - std::vector dependsOnTypes() const override { return {}; } - std::vector dependsOn() const override { return {}; } - - void load() override { /* no-op: configured on construction */ } - void unload() override { clear(); } + // EntropyService identity and lifecycle + const char* id() const override { + return "com.entropy.core.work"; + } + const char* name() const override { + return "WorkService"; + } + const char* version() const override { + return "0.1.0"; + } + // RTTI-less static type identity and dependencies + TypeSystem::TypeID typeId() const override { + return TypeSystem::createTypeId(); + } + std::vector dependsOnTypes() const override { + return {}; + } + std::vector dependsOn() const override { + return {}; + } + + void load() override { /* no-op: configured on construction */ } + void unload() override { + clear(); + } /** * @brief Starts the worker threads and begins executing work. @@ -240,7 +262,8 @@ class WorkService : public IConcurrencyProvider, public ::EntropyEngine::Core::E */ void clear(); - enum class GroupOperationStatus { + enum class GroupOperationStatus + { Added = 0, Removed = 1, OutOfSpace = 2, @@ -339,42 +362,42 @@ class WorkService : public IConcurrencyProvider, public ::EntropyEngine::Core::E /** * @brief Execute main thread targeted work from all registered groups - * + * * Call from your main thread to process UI, rendering, or other main-thread-only * work. Distributes execution fairly across groups. Use maxContracts to limit * work per frame and maintain responsiveness. - * + * * @param maxContracts Maximum number of contracts to execute (default: unlimited) * @return MainThreadWorkResult with execution statistics - * + * * @code * // Game loop with frame budget * void gameUpdate() { * // Process up to 10 main thread tasks per frame * auto result = service.executeMainThreadWork(10); - * + * * if (result.moreWorkAvailable) { * // More work pending - will process next frame * needsUpdate = true; * } - * + * * // Continue with rendering * render(); * } * @endcode */ MainThreadWorkResult executeMainThreadWork(size_t maxContracts = std::numeric_limits::max()); - + /** * @brief Execute main thread work from a specific group - * + * * Use when you need fine-grained control over which group's work executes. * Useful for prioritizing certain subsystems over others. - * + * * @param group The group to execute work from * @param maxContracts Maximum number of contracts to execute * @return Number of contracts executed - * + * * @code * // Prioritize UI work over other main thread tasks * size_t uiWork = service.executeMainThreadWork(&uiGroup, 5); @@ -382,15 +405,15 @@ class WorkService : public IConcurrencyProvider, public ::EntropyEngine::Core::E * @endcode */ size_t executeMainThreadWork(WorkContractGroup* group, size_t maxContracts = std::numeric_limits::max()); - + /** * @brief Check if any registered group has main thread work available - * + * * Quick non-blocking check to determine if you need to pump main thread work. * Use this to avoid unnecessary calls to executeMainThreadWork(). - * + * * @return true if at least one group has main thread work scheduled - * + * * @code * // Only pump if there's work to do * if (service.hasMainThreadWork()) { @@ -455,14 +478,13 @@ class WorkService : public IConcurrencyProvider, public ::EntropyEngine::Core::E /// gradual backoff from aggressive spinning to efficient sleeping. /// Thread-local because each thread should adapt independently to its own workload pattern. static thread_local size_t stSoftFailureCount; - + /// Thread-local identifier for debugging and scheduler context /// Provides a stable thread ID (0 to threadCount-1) for the lifetime of each worker thread. /// Thread-local because each thread needs its own unique, persistent identifier. static thread_local size_t stThreadId; }; -} // Concurrency -} // Core -} // EntropyEngine - +} // namespace Concurrency +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Core/EntropyApplication.cpp b/src/Core/EntropyApplication.cpp index 54a62f5..8c5a83d 100644 --- a/src/Core/EntropyApplication.cpp +++ b/src/Core/EntropyApplication.cpp @@ -8,32 +8,36 @@ */ #include "Core/EntropyApplication.h" -#include "Concurrency/WorkService.h" -#include "Core/TimerService.h" -#include -#include + #include #include +#include +#include + +#include "Concurrency/WorkService.h" +#include "Core/TimerService.h" #if defined(_WIN32) #define WIN32_LEAN_AND_MEAN #include #else -#include -#include #include #include +#include +#include #endif -namespace EntropyEngine { namespace Core { - +namespace EntropyEngine +{ +namespace Core +{ EntropyApplication& EntropyApplication::shared() { return *sharedPtr(); } std::shared_ptr EntropyApplication::sharedPtr() { - static std::shared_ptr inst{ std::shared_ptr(new EntropyApplication()) }; + static std::shared_ptr inst{std::shared_ptr(new EntropyApplication())}; return inst; } @@ -114,7 +118,7 @@ int EntropyApplication::run() { HANDLE ctrlH = static_cast(_ctrlEvent); while (!stopToken.stop_requested() && !_terminateRequested.load(std::memory_order_acquire)) { if (ctrlH) { - DWORD w = WaitForSingleObject(ctrlH, 100); // 100ms timeout for responsiveness + DWORD w = WaitForSingleObject(ctrlH, 100); // 100ms timeout for responsiveness if (w == WAIT_OBJECT_0) { auto type = _lastCtrlType.load(std::memory_order_relaxed); handleConsoleSignal(type); @@ -130,7 +134,7 @@ int EntropyApplication::run() { struct pollfd pfd; pfd.fd = _signalPipe[0]; pfd.events = POLLIN; - int ret = poll(&pfd, 1, 100); // 100ms timeout for responsiveness + int ret = poll(&pfd, 1, 100); // 100ms timeout for responsiveness if (ret > 0 && (pfd.revents & POLLIN)) { // Signal received - drain pipe and handle @@ -202,13 +206,14 @@ void EntropyApplication::terminate(int code) { _loopCv.notify_all(); } #if defined(_WIN32) -namespace { - // Free function with exact signature expected by SetConsoleCtrlHandler - static BOOL WINAPI EntropyConsoleCtrlHandler(DWORD ctrlType) { - EntropyEngine::Core::EntropyApplication::shared().notifyConsoleSignalFromHandler(ctrlType); - return TRUE; - } +namespace +{ +// Free function with exact signature expected by SetConsoleCtrlHandler +static BOOL WINAPI EntropyConsoleCtrlHandler(DWORD ctrlType) { + EntropyEngine::Core::EntropyApplication::shared().notifyConsoleSignalFromHandler(ctrlType); + return TRUE; } +} // namespace void EntropyApplication::installSignalHandlers() { if (_handlersInstalled.exchange(true)) return; @@ -247,7 +252,7 @@ void EntropyApplication::handleConsoleSignal(unsigned long ctrlType) { case CTRL_SHUTDOWN_EVENT: break; default: - return; // ignore others + return; // ignore others } bool first = !_signalSeen.exchange(true); @@ -256,8 +261,10 @@ void EntropyApplication::handleConsoleSignal(unsigned long ctrlType) { // Optionally consult delegate; if vetoed, just return on first request bool allow = true; if (_delegate) { - try { allow = _delegate->applicationShouldTerminate(); } - catch (...) { /* swallow in signal path */ } + try { + allow = _delegate->applicationShouldTerminate(); + } catch (...) { /* swallow in signal path */ + } } if (allow) { terminate(0); @@ -266,7 +273,7 @@ void EntropyApplication::handleConsoleSignal(unsigned long ctrlType) { if (!_escalationStarted.exchange(true)) { auto deadline = _cfg.shutdownDeadline; std::weak_ptr weak = EntropyApplication::sharedPtr(); - std::thread([weak, deadline]{ + std::thread([weak, deadline] { auto endAt = std::chrono::steady_clock::now() + deadline; std::this_thread::sleep_until(endAt); if (auto sp = weak.lock()) { @@ -295,12 +302,13 @@ void EntropyApplication::handleConsoleSignal(unsigned long ctrlType) { } #else // Unix/POSIX signal handling -namespace { - // Signal handler - must be async-signal-safe - static void EntropySigHandler(int signum) { - EntropyEngine::Core::EntropyApplication::shared().notifyPosixSignalFromHandler(signum); - } +namespace +{ +// Signal handler - must be async-signal-safe +static void EntropySigHandler(int signum) { + EntropyEngine::Core::EntropyApplication::shared().notifyPosixSignalFromHandler(signum); } +} // namespace void EntropyApplication::installSignalHandlers() { if (_handlersInstalled.exchange(true)) return; @@ -321,7 +329,7 @@ void EntropyApplication::installSignalHandlers() { struct sigaction fatal_sa; fatal_sa.sa_handler = EntropySigHandler; sigemptyset(&fatal_sa.sa_mask); - fatal_sa.sa_flags = SA_RESETHAND; // Reset to default after first signal + fatal_sa.sa_flags = SA_RESETHAND; // Reset to default after first signal sigaction(SIGABRT, &fatal_sa, nullptr); // abort sigaction(SIGSEGV, &fatal_sa, nullptr); // segmentation fault @@ -362,7 +370,7 @@ void EntropyApplication::handlePosixSignal(int signum) { case SIGTERM: case SIGHUP: case SIGQUIT: - break; // Graceful termination signals + break; // Graceful termination signals case SIGABRT: case SIGSEGV: case SIGBUS: @@ -371,7 +379,7 @@ void EntropyApplication::handlePosixSignal(int signum) { isFatal = true; break; default: - return; // ignore others + return; // ignore others } bool first = !_signalSeen.exchange(true); @@ -380,8 +388,10 @@ void EntropyApplication::handlePosixSignal(int signum) { // Optionally consult delegate; if vetoed, just return on first request bool allow = true; if (_delegate && !isFatal) { - try { allow = _delegate->applicationShouldTerminate(); } - catch (...) { /* swallow in signal path */ } + try { + allow = _delegate->applicationShouldTerminate(); + } catch (...) { /* swallow in signal path */ + } } if (allow || isFatal) { @@ -392,7 +402,7 @@ void EntropyApplication::handlePosixSignal(int signum) { if (!_escalationStarted.exchange(true) && !isFatal) { auto deadline = _cfg.shutdownDeadline; std::weak_ptr weak = EntropyApplication::sharedPtr(); - std::thread([weak, deadline]{ + std::thread([weak, deadline] { auto endAt = std::chrono::steady_clock::now() + deadline; std::this_thread::sleep_until(endAt); if (auto sp = weak.lock()) { @@ -420,5 +430,5 @@ void EntropyApplication::handlePosixSignal(int signum) { } #endif - -}} // namespace EntropyEngine::Core +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Core/EntropyApplication.h b/src/Core/EntropyApplication.h index ff55301..ca0f500 100644 --- a/src/Core/EntropyApplication.h +++ b/src/Core/EntropyApplication.h @@ -11,38 +11,46 @@ #include #include +#include #include #include #include -#include #include -#include +#include #include + #include "Core/EntropyServiceRegistry.h" #if defined(_WIN32) #endif -namespace EntropyEngine { namespace Core { - +namespace EntropyEngine +{ +namespace Core +{ -class EntropyAppDelegate { +class EntropyAppDelegate +{ public: virtual ~EntropyAppDelegate() = default; virtual void applicationWillFinishLaunching() {} virtual void applicationDidFinishLaunching() {} - virtual bool applicationShouldTerminate() { return true; } + virtual bool applicationShouldTerminate() { + return true; + } virtual void applicationWillTerminate() {} virtual void applicationDidCatchUnhandledException(std::exception_ptr) {} virtual void applicationMainLoop() {} }; -struct EntropyApplicationConfig { - size_t workerThreads = 0; // 0 => auto +struct EntropyApplicationConfig +{ + size_t workerThreads = 0; // 0 => auto std::chrono::milliseconds shutdownDeadline{3000}; }; -class EntropyApplication { +class EntropyApplication +{ public: static EntropyApplication& shared(); static std::shared_ptr sharedPtr(); @@ -55,10 +63,16 @@ class EntropyApplication { void terminate(int code); // thread-safe, idempotent // Services access - EntropyServiceRegistry& services() { return _services; } + EntropyServiceRegistry& services() { + return _services; + } - int exitCode() const { return _exitCode.load(); } - bool isRunning() const noexcept { return _running.load(); } + int exitCode() const { + return _exitCode.load(); + } + bool isRunning() const noexcept { + return _running.load(); + } #if defined(_WIN32) // Exposed for console control handler forwarder @@ -101,15 +115,15 @@ class EntropyApplication { std::atomic _signalSeen{false}; std::atomic _escalationStarted{false}; // Signal handling internals - void* _ctrlEvent{nullptr}; // HANDLE, kept as void* to avoid windows.h in header (auto-reset) - void* _terminateEvent{nullptr}; // HANDLE, kept as void* (manual-reset) + void* _ctrlEvent{nullptr}; // HANDLE, kept as void* to avoid windows.h in header (auto-reset) + void* _terminateEvent{nullptr}; // HANDLE, kept as void* (manual-reset) std::atomic _lastCtrlType{0}; #else std::atomic _handlersInstalled{false}; std::atomic _signalSeen{false}; std::atomic _escalationStarted{false}; // Unix signal handling internals - int _signalPipe[2]{-1, -1}; // Pipe for signal-safe notification + int _signalPipe[2]{-1, -1}; // Pipe for signal-safe notification std::atomic _lastSignal{0}; #endif @@ -118,4 +132,5 @@ class EntropyApplication { std::condition_variable _loopCv; }; -}} // namespace EntropyEngine::Core +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Core/EntropyCAPI.cpp b/src/Core/EntropyCAPI.cpp index bd80e20..6d3303e 100644 --- a/src/Core/EntropyCAPI.cpp +++ b/src/Core/EntropyCAPI.cpp @@ -2,17 +2,18 @@ * C API bridge for EntropyCore */ -#include "entropy_c_api.h" -#include "EntropyObject.h" -#include "EntropyClass.h" -#include "../Logging/Logger.h" +#include +#include #include #include -#include #include -#include #include +#include "../Logging/Logger.h" +#include "EntropyClass.h" +#include "EntropyObject.h" +#include "entropy_c_api.h" + using namespace EntropyEngine::Core; extern "C" { @@ -21,7 +22,7 @@ ENTROPY_API void entropy_get_version(uint32_t* major, uint32_t* minor, uint32_t* if (major) *major = 1; if (minor) *minor = 0; if (patch) *patch = 0; - if (abi) *abi = 0; // versioning not a concern right now + if (abi) *abi = 0; // versioning not a concern right now } ENTROPY_API void* entropy_alloc(size_t size) { @@ -34,15 +35,24 @@ ENTROPY_API void entropy_free(void* p) { ENTROPY_API const char* entropy_status_to_string(EntropyStatus s) { switch (s) { - case ENTROPY_OK: return "ENTROPY_OK"; - case ENTROPY_ERR_UNKNOWN: return "ENTROPY_ERR_UNKNOWN"; - case ENTROPY_ERR_INVALID_ARG: return "ENTROPY_ERR_INVALID_ARG"; - case ENTROPY_ERR_NOT_FOUND: return "ENTROPY_ERR_NOT_FOUND"; - case ENTROPY_ERR_TYPE_MISMATCH: return "ENTROPY_ERR_TYPE_MISMATCH"; - case ENTROPY_ERR_BUFFER_TOO_SMALL: return "ENTROPY_ERR_BUFFER_TOO_SMALL"; - case ENTROPY_ERR_NO_MEMORY: return "ENTROPY_ERR_NO_MEMORY"; - case ENTROPY_ERR_UNAVAILABLE: return "ENTROPY_ERR_UNAVAILABLE"; - default: return "ENTROPY_STATUS_UNKNOWN"; + case ENTROPY_OK: + return "ENTROPY_OK"; + case ENTROPY_ERR_UNKNOWN: + return "ENTROPY_ERR_UNKNOWN"; + case ENTROPY_ERR_INVALID_ARG: + return "ENTROPY_ERR_INVALID_ARG"; + case ENTROPY_ERR_NOT_FOUND: + return "ENTROPY_ERR_NOT_FOUND"; + case ENTROPY_ERR_TYPE_MISMATCH: + return "ENTROPY_ERR_TYPE_MISMATCH"; + case ENTROPY_ERR_BUFFER_TOO_SMALL: + return "ENTROPY_ERR_BUFFER_TOO_SMALL"; + case ENTROPY_ERR_NO_MEMORY: + return "ENTROPY_ERR_NO_MEMORY"; + case ENTROPY_ERR_UNAVAILABLE: + return "ENTROPY_ERR_UNAVAILABLE"; + default: + return "ENTROPY_STATUS_UNKNOWN"; } } @@ -132,11 +142,13 @@ ENTROPY_API EntropyStatus entropy_object_description(const EntropyObjectRef* obj } ENTROPY_API EntropyBool entropy_handle_is_valid(EntropyHandle h) { - return h.owner != nullptr ? ENTROPY_TRUE : ENTROPY_FALSE; // quick check only + return h.owner != nullptr ? ENTROPY_TRUE : ENTROPY_FALSE; // quick check only } ENTROPY_API EntropyBool entropy_handle_equals(EntropyHandle a, EntropyHandle b) { - return ((a.owner == b.owner) && (a.index == b.index) && (a.generation == b.generation) && (a.type_id == b.type_id)) ? ENTROPY_TRUE : ENTROPY_FALSE; + return ((a.owner == b.owner) && (a.index == b.index) && (a.generation == b.generation) && (a.type_id == b.type_id)) + ? ENTROPY_TRUE + : ENTROPY_FALSE; } ENTROPY_API EntropyBool entropy_handle_type_matches(EntropyHandle h, EntropyTypeId expected) { @@ -172,14 +184,13 @@ ENTROPY_API EntropyStatus entropy_handle_release(EntropyHandle h) { EntropyObjectRef* obj = entropy_resolve_handle(h); if (!obj) return ENTROPY_ERR_NOT_FOUND; // Drop the resolved ref and one more to achieve net -1 - entropy_object_release(obj); // back to 0 net - entropy_object_release(obj); // net -1 + entropy_object_release(obj); // back to 0 net + entropy_object_release(obj); // net -1 return ENTROPY_OK; } -ENTROPY_API EntropyStatus entropy_handle_info(EntropyHandle h, - EntropyTypeId* out_type_id, - EntropyOwnedString* out_class_name) { +ENTROPY_API EntropyStatus entropy_handle_info(EntropyHandle h, EntropyTypeId* out_type_id, + EntropyOwnedString* out_class_name) { if (!h.owner) return ENTROPY_ERR_INVALID_ARG; EntropyObjectRef* obj = entropy_resolve_handle(h); if (!obj) return ENTROPY_ERR_NOT_FOUND; @@ -193,13 +204,18 @@ ENTROPY_API EntropyStatus entropy_handle_info(EntropyHandle h, } // Owner vtable registry ------------------------------------------------------- -struct OwnerVTable { EntropyResolveFn resolve; EntropyValidateFn validate; }; +struct OwnerVTable +{ + EntropyResolveFn resolve; + EntropyValidateFn validate; +}; static std::unordered_map g_ownerVTables; static std::mutex g_ownerVTablesMutex; -ENTROPY_API void entropy_register_owner_vtable(const void* owner, EntropyResolveFn resolve, EntropyValidateFn validate) { +ENTROPY_API void entropy_register_owner_vtable(const void* owner, EntropyResolveFn resolve, + EntropyValidateFn validate) { std::lock_guard lock(g_ownerVTablesMutex); - g_ownerVTables[owner] = OwnerVTable{ resolve, validate }; + g_ownerVTables[owner] = OwnerVTable{resolve, validate}; } ENTROPY_API EntropyObjectRef* entropy_resolve_handle(EntropyHandle h) { @@ -213,7 +229,6 @@ ENTROPY_API EntropyObjectRef* entropy_resolve_handle(EntropyHandle h) { return fn(h.owner, h.index, h.generation); } - // (No demo functionality is provided in the C API implementation. The API is production-only.) -} // extern "C" +} // extern "C" diff --git a/src/Core/EntropyClass.h b/src/Core/EntropyClass.h index 435232e..110c17d 100644 --- a/src/Core/EntropyClass.h +++ b/src/Core/EntropyClass.h @@ -6,6 +6,7 @@ #define ENTROPYCORE_ENTROPYCLASS_H #include + #include "TypeSystem/TypeID.h" /** @@ -24,18 +25,16 @@ * }; * @endcode */ -#define ENTROPY_CLASS_BODY(ClassName) \ - public: \ - /* Virtual instance overrides (no toString/description) */ \ - const char* className() const noexcept override \ - { \ - return #ClassName; \ - } \ - uint64_t classHash() const noexcept override \ - { \ - static const uint64_t hash = static_cast(::EntropyEngine::Core::TypeSystem::createTypeId().id); \ - return hash; \ - } - +#define ENTROPY_CLASS_BODY(ClassName) \ +public: \ + /* Virtual instance overrides (no toString/description) */ \ + const char* className() const noexcept override { \ + return #ClassName; \ + } \ + uint64_t classHash() const noexcept override { \ + static const uint64_t hash = \ + static_cast(::EntropyEngine::Core::TypeSystem::createTypeId().id); \ + return hash; \ + } -#endif //ENTROPYCORE_ENTROPYCLASS_H \ No newline at end of file +#endif // ENTROPYCORE_ENTROPYCLASS_H diff --git a/src/Core/EntropyInterop.h b/src/Core/EntropyInterop.h index 7fca8bd..b21062f 100644 --- a/src/Core/EntropyInterop.h +++ b/src/Core/EntropyInterop.h @@ -1,17 +1,20 @@ #pragma once -#include "RefObject.h" #include -namespace EntropyEngine::Core { +#include "RefObject.h" + +namespace EntropyEngine::Core +{ -template -struct EntropyDeleter { +template +struct EntropyDeleter +{ void operator()(T* ptr) const noexcept { if (ptr) ptr->release(); } }; -template +template std::shared_ptr toSharedPtr(const RefObject& ref) { T* ptr = ref.get(); if (ptr) { @@ -21,7 +24,7 @@ std::shared_ptr toSharedPtr(const RefObject& ref) { return nullptr; } -template +template std::shared_ptr wrapInSharedPtr(T* ptr) { if (ptr) { ptr->retain(); @@ -30,4 +33,4 @@ std::shared_ptr wrapInSharedPtr(T* ptr) { return nullptr; } -} // namespace EntropyEngine::Core \ No newline at end of file +} // namespace EntropyEngine::Core diff --git a/src/Core/EntropyMain.cpp b/src/Core/EntropyMain.cpp index bdfb7ac..0c996c8 100644 --- a/src/Core/EntropyMain.cpp +++ b/src/Core/EntropyMain.cpp @@ -7,14 +7,15 @@ * This file is part of the Entropy Core project. */ -#include "Core/entropy_main.h" #include "Core/EntropyApplication.h" +#include "Core/entropy_main.h" using EntropyEngine::Core::EntropyApplication; extern "C" { -struct CppDelegate : EntropyEngine::Core::EntropyAppDelegate { +struct CppDelegate : EntropyEngine::Core::EntropyAppDelegate +{ EntropyAppDelegateC del{}; EntropyApplication* app = nullptr; void applicationWillFinishLaunching() override { @@ -35,8 +36,7 @@ struct CppDelegate : EntropyEngine::Core::EntropyAppDelegate { } }; -int entropy_main_run(const EntropyMainConfig* cfg, - const EntropyAppDelegateC* delegate) { +int entropy_main_run(const EntropyMainConfig* cfg, const EntropyAppDelegateC* delegate) { auto& app = EntropyApplication::shared(); EntropyEngine::Core::EntropyApplicationConfig cc{}; @@ -59,6 +59,8 @@ void entropy_main_terminate(int code) { EntropyApplication::shared().terminate(code); } -EntropyApp* entropy_main_app(void) { return (EntropyApp*)&EntropyApplication::shared(); } +EntropyApp* entropy_main_app(void) { + return (EntropyApp*)&EntropyApplication::shared(); +} -} // extern "C" +} // extern "C" diff --git a/src/Core/EntropyObject.cpp b/src/Core/EntropyObject.cpp index 683e79f..ec76036 100644 --- a/src/Core/EntropyObject.cpp +++ b/src/Core/EntropyObject.cpp @@ -3,36 +3,33 @@ // #include "EntropyObject.h" -#include "../Logging/Logger.h" -#include "../TypeSystem/TypeID.h" -#include + #include +#include -namespace EntropyEngine::Core { +#include "../Logging/Logger.h" +#include "../TypeSystem/TypeID.h" -void EntropyObject::retain() const noexcept +namespace EntropyEngine::Core { + +void EntropyObject::retain() const noexcept { _refCount.fetch_add(1, std::memory_order_acq_rel); } - -void EntropyObject::release() const noexcept -{ + +void EntropyObject::release() const noexcept { uint32_t oldCount = _refCount.fetch_sub(1, std::memory_order_acq_rel); - + #ifdef ENTROPY_DEBUG uint32_t newCount = oldCount - 1; - ENTROPY_LOG_TRACE_CAT("RefCount", - std::format("Release {} @ {} -> refcount={}", - className(), static_cast(this), newCount)); + ENTROPY_LOG_TRACE_CAT("RefCount", std::format("Release {} @ {} -> refcount={}", className(), + static_cast(this), newCount)); #endif - - if (oldCount == 1) - { + + if (oldCount == 1) { #ifdef ENTROPY_DEBUG const char* name = className(); - ENTROPY_LOG_TRACE_CAT("RefCount", - std::format("Delete {} @ {}", - name, static_cast(this))); + ENTROPY_LOG_TRACE_CAT("RefCount", std::format("Delete {} @ {}", name, static_cast(this))); #endif // Call memory profiling hook before delete (while pointer is still valid) if (EntropyObjectMemoryHooks::onFree) { @@ -42,50 +39,44 @@ void EntropyObject::release() const noexcept delete this; } } - -uint32_t EntropyObject::refCount() const noexcept -{ + +uint32_t EntropyObject::refCount() const noexcept { // Use acquire to ensure we observe the latest completed updates after thread joins return _refCount.load(std::memory_order_acquire); } -bool EntropyObject::tryRetain() const noexcept -{ +bool EntropyObject::tryRetain() const noexcept { uint32_t count = _refCount.load(std::memory_order_acquire); while (count != 0) { - if (_refCount.compare_exchange_weak(count, count + 1, - std::memory_order_acq_rel, - std::memory_order_acquire)) { + if (_refCount.compare_exchange_weak(count, count + 1, std::memory_order_acq_rel, std::memory_order_acquire)) { return true; } // count has been updated with the current value; loop and retry unless it hit 0 } return false; } - -uint64_t EntropyObject::classHash() const noexcept -{ - static const uint64_t hash = static_cast(EntropyEngine::Core::TypeSystem::createTypeId().id); + +uint64_t EntropyObject::classHash() const noexcept { + static const uint64_t hash = + static_cast(EntropyEngine::Core::TypeSystem::createTypeId().id); return hash; } -std::string EntropyObject::toString() const -{ +std::string EntropyObject::toString() const { return std::format("{}@{}", className(), static_cast(this)); } - -std::string EntropyObject::debugString() const -{ + +std::string EntropyObject::debugString() const { // Avoid repeating className() since toString() already includes it by default if (hasHandle()) { - return std::format("{} [refs:{} handle:{:08X}:{:08X}]", toString(), refCount(), handleIndex(), handleGeneration()); + return std::format("{} [refs:{} handle:{:08X}:{:08X}]", toString(), refCount(), handleIndex(), + handleGeneration()); } return std::format("{} [refs:{}]", toString(), refCount()); } - -std::string EntropyObject::description() const -{ + +std::string EntropyObject::description() const { return toString(); } -} // namespace EntropyEngine::Core \ No newline at end of file +} // namespace EntropyEngine::Core diff --git a/src/Core/EntropyObject.h b/src/Core/EntropyObject.h index f7878f2..78d3c3d 100644 --- a/src/Core/EntropyObject.h +++ b/src/Core/EntropyObject.h @@ -28,12 +28,13 @@ */ #pragma once #include -#include #include -#include #include +#include +#include -namespace EntropyEngine::Core { +namespace EntropyEngine::Core +{ /** * @brief Memory profiling callbacks for EntropyObject lifecycle tracking @@ -51,20 +52,24 @@ namespace EntropyEngine::Core { * }; * @endcode */ -struct EntropyObjectMemoryHooks { - using AllocCallback = void(*)(void* ptr, size_t size, const char* className); - using FreeCallback = void(*)(void* ptr, const char* className); +struct EntropyObjectMemoryHooks +{ + using AllocCallback = void (*)(void* ptr, size_t size, const char* className); + using FreeCallback = void (*)(void* ptr, const char* className); static inline AllocCallback onAlloc = nullptr; static inline FreeCallback onFree = nullptr; }; // Forward declarations -namespace TypeSystem { - class TypeInfo; - template class GenericHandle; // fwd decl for helpers (optional) - template class TypedHandle; // fwd decl for helpers (optional) -} +namespace TypeSystem +{ +class TypeInfo; +template +class GenericHandle; // fwd decl for helpers (optional) +template +class TypedHandle; // fwd decl for helpers (optional) +} // namespace TypeSystem /** * @class EntropyObject @@ -75,9 +80,10 @@ namespace TypeSystem { * can participate in owner/index/generation validation without coupling to * a specific handle type. */ -class EntropyObject { +class EntropyObject +{ protected: - mutable std::atomic _refCount{1}; ///< Thread-safe retain/release counter + mutable std::atomic _refCount{1}; ///< Thread-safe retain/release counter /** * @brief Optional handle identity stamped by an owner/registry @@ -85,19 +91,28 @@ class EntropyObject { * When set, identifies the object within its owner using index+generation. * This enables generation-based validation and C API interop. */ - struct HandleCore { - void* owner = nullptr; ///< Owning registry that stamped this object - uint32_t index = 0; ///< Slot index within the owner - uint32_t generation = 0; ///< Generation for stale-handle detection - bool isSet() const noexcept { return owner != nullptr; } - uint64_t id64() const noexcept { return (static_cast(index) << 32) | generation; } + struct HandleCore + { + void* owner = nullptr; ///< Owning registry that stamped this object + uint32_t index = 0; ///< Slot index within the owner + uint32_t generation = 0; ///< Generation for stale-handle detection + bool isSet() const noexcept { + return owner != nullptr; + } + uint64_t id64() const noexcept { + return (static_cast(index) << 32) | generation; + } } _handle{}; // Internal setters used by owners/registries to stamp identity void _setHandleIdentity(void* owner, uint32_t index, uint32_t generation) noexcept { - _handle.owner = owner; _handle.index = index; _handle.generation = generation; + _handle.owner = owner; + _handle.index = index; + _handle.generation = generation; + } + void _clearHandleIdentity() noexcept { + _handle = {}; } - void _clearHandleIdentity() noexcept { _handle = {}; } // Grant access to helper friend struct HandleAccess; @@ -108,9 +123,9 @@ class EntropyObject { EntropyObject(const EntropyObject&) = delete; EntropyObject& operator=(const EntropyObject&) = delete; EntropyObject& operator=(EntropyObject&&) = delete; - + virtual ~EntropyObject() noexcept = default; - + /** * @brief Increments the reference count * @note Thread-safe; may be called from any thread. @@ -118,13 +133,13 @@ class EntropyObject { void retain() const noexcept; /** * @brief Attempts to retain only if the object is still alive - * + * * Use when you need to safely grab a reference from a background thread * without racing destruction. If the refcount has already reached zero, * tryRetain() returns false and the object must not be used. - * + * * @return true on success (reference acquired), false if object already dead - * + * * @code * // Example: Try to use an object that might be concurrently released * if (obj->tryRetain()) { @@ -148,15 +163,25 @@ class EntropyObject { // Handle introspection (safe even if not stamped) /** @return true if an owner has stamped this object with handle identity */ - bool hasHandle() const noexcept { return _handle.isSet(); } + bool hasHandle() const noexcept { + return _handle.isSet(); + } /** @return Owner pointer that stamped this object, or null if none */ - const void* handleOwner() const noexcept { return _handle.owner; } + const void* handleOwner() const noexcept { + return _handle.owner; + } /** @return Index value stamped by the owner (undefined if !hasHandle()) */ - uint32_t handleIndex() const noexcept { return _handle.index; } + uint32_t handleIndex() const noexcept { + return _handle.index; + } /** @return Generation value stamped by the owner (undefined if !hasHandle()) */ - uint32_t handleGeneration() const noexcept { return _handle.generation; } + uint32_t handleGeneration() const noexcept { + return _handle.generation; + } /** @return 64-bit packed index:generation identifier (undefined if !hasHandle()) */ - uint64_t handleId() const noexcept { return _handle.id64(); } + uint64_t handleId() const noexcept { + return _handle.id64(); + } /** * @brief Returns the stamped owner pointer cast to the requested type @@ -168,21 +193,25 @@ class EntropyObject { OwnerT* handleOwnerAs() const noexcept { return static_cast(_handle.owner); } - + /** @brief Runtime class name for diagnostics and reflection */ - virtual const char* className() const noexcept { return "EntropyObject"; } + virtual const char* className() const noexcept { + return "EntropyObject"; + } /** @brief Stable type hash for cross-language identification */ virtual uint64_t classHash() const noexcept; - + /** @brief Human-readable short string (class@ptr by default) */ virtual std::string toString() const; /** @brief Debug-oriented string including refcount and handle when present */ virtual std::string debugString() const; /** @brief Long-form description; defaults to toString() */ virtual std::string description() const; - + /** @brief Optional richer type information; may be null */ - virtual const TypeSystem::TypeInfo* typeInfo() const { return nullptr; } + virtual const TypeSystem::TypeInfo* typeInfo() const { + return nullptr; + } }; /** @@ -191,9 +220,14 @@ class EntropyObject { * Usage: HandleAccess::set(obj, owner, index, generation) when allocating; and * HandleAccess::clear(obj) before releasing/bumping generation. */ -struct HandleAccess { - static void set(EntropyObject& o, void* owner, uint32_t index, uint32_t generation) noexcept { o._setHandleIdentity(owner, index, generation); } - static void clear(EntropyObject& o) noexcept { o._clearHandleIdentity(); } +struct HandleAccess +{ + static void set(EntropyObject& o, void* owner, uint32_t index, uint32_t generation) noexcept { + o._setHandleIdentity(owner, index, generation); + } + static void clear(EntropyObject& o) noexcept { + o._clearHandleIdentity(); + } }; -} // namespace EntropyEngine::Core \ No newline at end of file +} // namespace EntropyEngine::Core diff --git a/src/Core/EntropyService.h b/src/Core/EntropyService.h index fdab1c8..555d221 100644 --- a/src/Core/EntropyService.h +++ b/src/Core/EntropyService.h @@ -9,84 +9,101 @@ #pragma once -#include "CoreCommon.h" -#include "EntropyObject.h" +#include #include #include -#include + +#include "CoreCommon.h" +#include "EntropyObject.h" #include "TypeSystem/TypeID.h" -namespace EntropyEngine { - namespace Core { - - class EntropyServiceRegistry; // forward declaration - - /** - * @brief Lifecycle states for an EntropyService instance. - */ - enum class ServiceState { - Registered, - Loaded, - Started, - Stopped, - Unloaded - }; - - /** - * @brief Base interface for pluggable services within Entropy. - * - * Services encapsulate optional subsystems (e.g., Work execution, Scene, Renderer) - * and participate in the application lifecycle via load/start/stop/unload callbacks. - * - * Services inherit from EntropyObject, enabling: - * - Reference counting for safe shared ownership - * - Handle stamping for generation-based validation - * - WeakRef support for non-owning references across subsystems - * - * Implementations should be lightweight to construct; heavy initialization should - * happen in load()/start(). All lifecycle methods are expected to be called on the - * main thread by the orchestrator. - */ - class EntropyService : public EntropyObject { - public: - ~EntropyService() override = default; - - // Identity (metadata only; not used for lookups) - virtual const char* id() const = 0; // stable unique id, e.g. "com.entropy.core.work" - virtual const char* name() const = 0; // human readable - - // Static type identity for RTTI-less registration and lookup - virtual TypeSystem::TypeID typeId() const = 0; - - // Optional semantic version for compatibility checks - virtual const char* version() const { return "0.1.0"; } - - // RTTI-less dependency declaration by static types. Preferred and used for ordering. - virtual std::vector dependsOnTypes() const { return {}; } - - // (Legacy metadata) String-based dependencies retained for diagnostics only; ignored by orchestrator. - virtual std::vector dependsOn() const { return {}; } - - // Lifecycle hooks (main thread unless documented otherwise) - virtual void load() {} - virtual void start() {} - virtual void stop() {} - virtual void unload() {} - - // Observability - ServiceState state() const noexcept { return _state.load(std::memory_order_acquire); } - - // EntropyObject overrides - const char* className() const noexcept override { return name(); } - - protected: - // For orchestration: allow registry/application to transition state - void setState(ServiceState s) noexcept { _state.store(s, std::memory_order_release); } - - private: - friend class EntropyServiceRegistry; // allow registry to drive state transitions - std::atomic _state{ServiceState::Registered}; - }; - - } // namespace Core -} // namespace EntropyEngine +namespace EntropyEngine +{ +namespace Core +{ + +class EntropyServiceRegistry; // forward declaration + +/** + * @brief Lifecycle states for an EntropyService instance. + */ +enum class ServiceState +{ + Registered, + Loaded, + Started, + Stopped, + Unloaded +}; + +/** + * @brief Base interface for pluggable services within Entropy. + * + * Services encapsulate optional subsystems (e.g., Work execution, Scene, Renderer) + * and participate in the application lifecycle via load/start/stop/unload callbacks. + * + * Services inherit from EntropyObject, enabling: + * - Reference counting for safe shared ownership + * - Handle stamping for generation-based validation + * - WeakRef support for non-owning references across subsystems + * + * Implementations should be lightweight to construct; heavy initialization should + * happen in load()/start(). All lifecycle methods are expected to be called on the + * main thread by the orchestrator. + */ +class EntropyService : public EntropyObject +{ +public: + ~EntropyService() override = default; + + // Identity (metadata only; not used for lookups) + virtual const char* id() const = 0; // stable unique id, e.g. "com.entropy.core.work" + virtual const char* name() const = 0; // human readable + + // Static type identity for RTTI-less registration and lookup + virtual TypeSystem::TypeID typeId() const = 0; + + // Optional semantic version for compatibility checks + virtual const char* version() const { + return "0.1.0"; + } + + // RTTI-less dependency declaration by static types. Preferred and used for ordering. + virtual std::vector dependsOnTypes() const { + return {}; + } + + // (Legacy metadata) String-based dependencies retained for diagnostics only; ignored by orchestrator. + virtual std::vector dependsOn() const { + return {}; + } + + // Lifecycle hooks (main thread unless documented otherwise) + virtual void load() {} + virtual void start() {} + virtual void stop() {} + virtual void unload() {} + + // Observability + ServiceState state() const noexcept { + return _state.load(std::memory_order_acquire); + } + + // EntropyObject overrides + const char* className() const noexcept override { + return name(); + } + +protected: + // For orchestration: allow registry/application to transition state + void setState(ServiceState s) noexcept { + _state.store(s, std::memory_order_release); + } + +private: + friend class EntropyServiceRegistry; // allow registry to drive state transitions + std::atomic _state{ServiceState::Registered}; +}; + +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Core/EntropyServiceRegistry.cpp b/src/Core/EntropyServiceRegistry.cpp index 0ceb87b..a6eead0 100644 --- a/src/Core/EntropyServiceRegistry.cpp +++ b/src/Core/EntropyServiceRegistry.cpp @@ -8,183 +8,188 @@ */ #include "Core/EntropyServiceRegistry.h" + #include #include -namespace EntropyEngine { - namespace Core { - - EntropyServiceRegistry::~EntropyServiceRegistry() { - // Clear all handle stamps before destruction - for (auto& [tid, slot] : _slots) { - if (slot.service) { - HandleSlotOps::clear(*slot.service); - } - } - } - - bool EntropyServiceRegistry::registerService(std::shared_ptr service) { - if (!service) return false; - - auto tid = service->typeId(); - auto it = _slots.find(tid); - - if (it != _slots.end()) { - // Service already registered - clear old stamp and update - if (it->second.service) { - HandleSlotOps::release(*it->second.service, it->second.generation); - } - it->second.service = service; - // Stamp with new generation - HandleSlotOps::stamp(*service, this, it->second.slotIndex, it->second.generation); - return false; // Not a new insertion - } - - // New service - create slot - ServiceSlot slot; - slot.service = service; - slot.slotIndex = _nextSlotIndex++; - - // Stamp service with handle identity - HandleSlotOps::stamp(*service, this, slot.slotIndex, slot.generation); +namespace EntropyEngine +{ +namespace Core +{ - _slots[tid] = std::move(slot); - return true; +EntropyServiceRegistry::~EntropyServiceRegistry() { + // Clear all handle stamps before destruction + for (auto& [tid, slot] : _slots) { + if (slot.service) { + HandleSlotOps::clear(*slot.service); } + } +} - bool EntropyServiceRegistry::unregisterService(const TypeSystem::TypeID& tid) { - auto it = _slots.find(tid); - if (it == _slots.end()) return false; - - ServiceSlot& slot = it->second; - if (slot.service) { - // Clear handle and increment generation to invalidate WeakRefs - HandleSlotOps::release(*slot.service, slot.generation); - } +bool EntropyServiceRegistry::registerService(std::shared_ptr service) { + if (!service) return false; - _slots.erase(it); - return true; - } + auto tid = service->typeId(); + auto it = _slots.find(tid); - std::shared_ptr EntropyServiceRegistry::get(const TypeSystem::TypeID& tid) const { - auto slot = getSlot(tid); - return slot ? slot->service : nullptr; + if (it != _slots.end()) { + // Service already registered - clear old stamp and update + if (it->second.service) { + HandleSlotOps::release(*it->second.service, it->second.generation); } - - bool EntropyServiceRegistry::has(const TypeSystem::TypeID& tid) const noexcept { - return _slots.find(tid) != _slots.end(); + it->second.service = service; + // Stamp with new generation + HandleSlotOps::stamp(*service, this, it->second.slotIndex, it->second.generation); + return false; // Not a new insertion + } + + // New service - create slot + ServiceSlot slot; + slot.service = service; + slot.slotIndex = _nextSlotIndex++; + + // Stamp service with handle identity + HandleSlotOps::stamp(*service, this, slot.slotIndex, slot.generation); + + _slots[tid] = std::move(slot); + return true; +} + +bool EntropyServiceRegistry::unregisterService(const TypeSystem::TypeID& tid) { + auto it = _slots.find(tid); + if (it == _slots.end()) return false; + + ServiceSlot& slot = it->second; + if (slot.service) { + // Clear handle and increment generation to invalidate WeakRefs + HandleSlotOps::release(*slot.service, slot.generation); + } + + _slots.erase(it); + return true; +} + +std::shared_ptr EntropyServiceRegistry::get(const TypeSystem::TypeID& tid) const { + auto slot = getSlot(tid); + return slot ? slot->service : nullptr; +} + +bool EntropyServiceRegistry::has(const TypeSystem::TypeID& tid) const noexcept { + return _slots.find(tid) != _slots.end(); +} + +bool EntropyServiceRegistry::isValid(const EntropyService* service) const noexcept { + if (!service || !service->hasHandle()) return false; + if (service->handleOwner() != this) return false; + + // Find the slot by iterating (services are keyed by TypeID, not slot index) + for (const auto& [tid, slot] : _slots) { + if (slot.service.get() == service) { + return slot.generation.current() == service->handleGeneration(); } - - bool EntropyServiceRegistry::isValid(const EntropyService* service) const noexcept { - if (!service || !service->hasHandle()) return false; - if (service->handleOwner() != this) return false; - - // Find the slot by iterating (services are keyed by TypeID, not slot index) - for (const auto& [tid, slot] : _slots) { - if (slot.service.get() == service) { - return slot.generation.current() == service->handleGeneration(); - } + } + return false; +} + +const EntropyServiceRegistry::ServiceSlot* EntropyServiceRegistry::getSlot(const TypeSystem::TypeID& tid) const { + auto it = _slots.find(tid); + return it != _slots.end() ? &it->second : nullptr; +} + +EntropyServiceRegistry::ServiceSlot* EntropyServiceRegistry::getSlot(const TypeSystem::TypeID& tid) { + auto it = _slots.find(tid); + return it != _slots.end() ? &it->second : nullptr; +} + +void EntropyServiceRegistry::loadAll() { + auto order = topoOrder(); + for (const auto& tid : order) { + auto& slot = _slots.at(tid); + slot.service->setState(ServiceState::Loaded); + slot.service->load(); + } +} + +void EntropyServiceRegistry::startAll() { + auto order = topoOrder(); + for (const auto& tid : order) { + auto& slot = _slots.at(tid); + slot.service->setState(ServiceState::Started); + slot.service->start(); + } +} + +void EntropyServiceRegistry::stopAll() { + auto order = topoOrder(); + // stop in reverse order + for (auto it = order.rbegin(); it != order.rend(); ++it) { + auto& slot = _slots.at(*it); + slot.service->stop(); + slot.service->setState(ServiceState::Stopped); + } +} + +void EntropyServiceRegistry::unloadAll() { + auto order = topoOrder(); + for (auto it = order.rbegin(); it != order.rend(); ++it) { + auto& slot = _slots.at(*it); + slot.service->unload(); + slot.service->setState(ServiceState::Unloaded); + } +} + +std::vector EntropyServiceRegistry::topoOrder() const { + // Kahn's algorithm on TypeIDs + // Build adjacency: dep -> svc + std::unordered_map indegree; + std::unordered_map> adj; + + // Initialize vertices + for (const auto& [tid, _] : _slots) { + indegree[tid] = 0; + } + + // Add edges and indegrees using type-based dependencies + for (const auto& [tid, slot] : _slots) { + for (const auto& depTid : slot.service->dependsOnTypes()) { + if (!has(depTid)) { + // Diagnostic message uses metadata strings, not for lookup + throw std::runtime_error(std::string("Missing dependency required by service '") + slot.service->id() + + "'"); } - return false; - } - - const EntropyServiceRegistry::ServiceSlot* EntropyServiceRegistry::getSlot(const TypeSystem::TypeID& tid) const { - auto it = _slots.find(tid); - return it != _slots.end() ? &it->second : nullptr; - } - - EntropyServiceRegistry::ServiceSlot* EntropyServiceRegistry::getSlot(const TypeSystem::TypeID& tid) { - auto it = _slots.find(tid); - return it != _slots.end() ? &it->second : nullptr; + adj[depTid].push_back(tid); + indegree[tid] += 1; } - - void EntropyServiceRegistry::loadAll() { - auto order = topoOrder(); - for (const auto& tid : order) { - auto& slot = _slots.at(tid); - slot.service->setState(ServiceState::Loaded); - slot.service->load(); + } + + std::queue q; + for (const auto& [tid, deg] : indegree) { + if (deg == 0) q.push(tid); + } + + std::vector order; + order.reserve(_slots.size()); + while (!q.empty()) { + auto u = q.front(); + q.pop(); + order.push_back(u); + auto it = adj.find(u); + if (it != adj.end()) { + for (const auto& v : it->second) { + auto& d = indegree[v]; + if (d == 0) continue; // defensive + d -= 1; + if (d == 0) q.push(v); } } + } - void EntropyServiceRegistry::startAll() { - auto order = topoOrder(); - for (const auto& tid : order) { - auto& slot = _slots.at(tid); - slot.service->setState(ServiceState::Started); - slot.service->start(); - } - } - - void EntropyServiceRegistry::stopAll() { - auto order = topoOrder(); - // stop in reverse order - for (auto it = order.rbegin(); it != order.rend(); ++it) { - auto& slot = _slots.at(*it); - slot.service->stop(); - slot.service->setState(ServiceState::Stopped); - } - } - - void EntropyServiceRegistry::unloadAll() { - auto order = topoOrder(); - for (auto it = order.rbegin(); it != order.rend(); ++it) { - auto& slot = _slots.at(*it); - slot.service->unload(); - slot.service->setState(ServiceState::Unloaded); - } - } - - std::vector EntropyServiceRegistry::topoOrder() const { - // Kahn's algorithm on TypeIDs - // Build adjacency: dep -> svc - std::unordered_map indegree; - std::unordered_map> adj; - - // Initialize vertices - for (const auto& [tid, _] : _slots) { - indegree[tid] = 0; - } + if (order.size() != _slots.size()) { + throw std::runtime_error("Service dependency cycle detected"); + } - // Add edges and indegrees using type-based dependencies - for (const auto& [tid, slot] : _slots) { - for (const auto& depTid : slot.service->dependsOnTypes()) { - if (!has(depTid)) { - // Diagnostic message uses metadata strings, not for lookup - throw std::runtime_error(std::string("Missing dependency required by service '") + slot.service->id() + "'"); - } - adj[depTid].push_back(tid); - indegree[tid] += 1; - } - } - - std::queue q; - for (const auto& [tid, deg] : indegree) { - if (deg == 0) q.push(tid); - } - - std::vector order; - order.reserve(_slots.size()); - while (!q.empty()) { - auto u = q.front(); q.pop(); - order.push_back(u); - auto it = adj.find(u); - if (it != adj.end()) { - for (const auto& v : it->second) { - auto& d = indegree[v]; - if (d == 0) continue; // defensive - d -= 1; - if (d == 0) q.push(v); - } - } - } - - if (order.size() != _slots.size()) { - throw std::runtime_error("Service dependency cycle detected"); - } - - return order; - } + return order; +} - } // namespace Core -} // namespace EntropyEngine +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Core/EntropyServiceRegistry.h b/src/Core/EntropyServiceRegistry.h index 20f9ec2..3e54e70 100644 --- a/src/Core/EntropyServiceRegistry.h +++ b/src/Core/EntropyServiceRegistry.h @@ -9,125 +9,136 @@ #pragma once -#include "CoreCommon.h" -#include "Core/EntropyService.h" -#include "Core/HandleSlot.h" -#include "Core/RefObject.h" -#include "TypeSystem/TypeID.h" #include +#include #include +#include #include -#include -#include #include -#include +#include -namespace EntropyEngine { - namespace Core { - - /** - * @brief Registry and lifecycle orchestrator for EntropyService instances. - * - * Services are registered and looked up by static TypeID (RTTI-less). The registry - * can load/start/stop/unload all registered services honoring their declared type - * dependencies. - * - * Services are stamped with handle identity (owner/index/generation) on registration, - * enabling generation-based validation and WeakRef support for safe cross-subsystem - * references. - */ - class EntropyServiceRegistry { - public: - EntropyServiceRegistry() = default; - ~EntropyServiceRegistry(); - - // Non-copyable, non-movable (services hold back-references) - EntropyServiceRegistry(const EntropyServiceRegistry&) = delete; - EntropyServiceRegistry& operator=(const EntropyServiceRegistry&) = delete; - EntropyServiceRegistry(EntropyServiceRegistry&&) = delete; - EntropyServiceRegistry& operator=(EntropyServiceRegistry&&) = delete; - - // Registration API - bool registerService(std::shared_ptr service); - // Preferred: register with static type to avoid dynamic RTTI lookups - template - bool registerService(std::shared_ptr service) { - static_assert(std::is_base_of_v, "TService must derive from EntropyService"); - // Delegate to non-template version for stamping - return registerService(std::static_pointer_cast(service)); - } - - /** - * @brief Unregister a service by type - * - * Clears the service's handle stamp and removes it from the registry. - * The slot's generation is incremented to invalidate stale WeakRefs. - * - * @return true if service was found and unregistered - */ - bool unregisterService(const TypeSystem::TypeID& tid); - template - bool unregisterService() { return unregisterService(TypeSystem::createTypeId()); } - - // Type-based lookup API (returns shared_ptr for compatibility) - std::shared_ptr get(const TypeSystem::TypeID& tid) const; - template - std::shared_ptr get() const { - auto base = get(TypeSystem::createTypeId()); - return std::static_pointer_cast(base); - } - - /** - * @brief Get a RefObject reference to a service - * - * Returns a RefObject that can be used with WeakRef for safe non-owning - * references. The service is stamped with generation for validation. - */ - template - RefObject getRef() const { - auto slot = getSlot(TypeSystem::createTypeId()); - if (!slot || !slot->service) return {}; - // Create RefObject with retain - return RefObject(retain, static_cast(slot->service.get())); - } - - bool has(const TypeSystem::TypeID& tid) const noexcept; - template - bool has() const noexcept { return has(TypeSystem::createTypeId()); } - - /** - * @brief Validate a service is still registered with matching generation - */ - bool isValid(const EntropyService* service) const noexcept; - - size_t serviceCount() const noexcept { return _slots.size(); } - - // Lifecycle control (throws std::runtime_error on dependency errors) - void loadAll(); - void startAll(); - void stopAll(); - void unloadAll(); - - private: - /** - * @brief Internal slot for service storage with generation tracking - */ - struct ServiceSlot { - std::shared_ptr service; ///< The service instance - SlotGeneration generation; ///< Handle validation counter - uint32_t slotIndex = 0; ///< Index used for handle stamping - }; - - // Returns topologically sorted type ids according to dependencies - std::vector topoOrder() const; - - // Get slot by type (internal) - const ServiceSlot* getSlot(const TypeSystem::TypeID& tid) const; - ServiceSlot* getSlot(const TypeSystem::TypeID& tid); - - std::unordered_map _slots; ///< Type -> slot mapping - uint32_t _nextSlotIndex = 0; ///< Counter for unique slot indices - }; - - } // namespace Core -} // namespace EntropyEngine +#include "Core/EntropyService.h" +#include "Core/HandleSlot.h" +#include "Core/RefObject.h" +#include "CoreCommon.h" +#include "TypeSystem/TypeID.h" + +namespace EntropyEngine +{ +namespace Core +{ + +/** + * @brief Registry and lifecycle orchestrator for EntropyService instances. + * + * Services are registered and looked up by static TypeID (RTTI-less). The registry + * can load/start/stop/unload all registered services honoring their declared type + * dependencies. + * + * Services are stamped with handle identity (owner/index/generation) on registration, + * enabling generation-based validation and WeakRef support for safe cross-subsystem + * references. + */ +class EntropyServiceRegistry +{ +public: + EntropyServiceRegistry() = default; + ~EntropyServiceRegistry(); + + // Non-copyable, non-movable (services hold back-references) + EntropyServiceRegistry(const EntropyServiceRegistry&) = delete; + EntropyServiceRegistry& operator=(const EntropyServiceRegistry&) = delete; + EntropyServiceRegistry(EntropyServiceRegistry&&) = delete; + EntropyServiceRegistry& operator=(EntropyServiceRegistry&&) = delete; + + // Registration API + bool registerService(std::shared_ptr service); + // Preferred: register with static type to avoid dynamic RTTI lookups + template + bool registerService(std::shared_ptr service) { + static_assert(std::is_base_of_v, "TService must derive from EntropyService"); + // Delegate to non-template version for stamping + return registerService(std::static_pointer_cast(service)); + } + + /** + * @brief Unregister a service by type + * + * Clears the service's handle stamp and removes it from the registry. + * The slot's generation is incremented to invalidate stale WeakRefs. + * + * @return true if service was found and unregistered + */ + bool unregisterService(const TypeSystem::TypeID& tid); + template + bool unregisterService() { + return unregisterService(TypeSystem::createTypeId()); + } + + // Type-based lookup API (returns shared_ptr for compatibility) + std::shared_ptr get(const TypeSystem::TypeID& tid) const; + template + std::shared_ptr get() const { + auto base = get(TypeSystem::createTypeId()); + return std::static_pointer_cast(base); + } + + /** + * @brief Get a RefObject reference to a service + * + * Returns a RefObject that can be used with WeakRef for safe non-owning + * references. The service is stamped with generation for validation. + */ + template + RefObject getRef() const { + auto slot = getSlot(TypeSystem::createTypeId()); + if (!slot || !slot->service) return {}; + // Create RefObject with retain + return RefObject(retain, static_cast(slot->service.get())); + } + + bool has(const TypeSystem::TypeID& tid) const noexcept; + template + bool has() const noexcept { + return has(TypeSystem::createTypeId()); + } + + /** + * @brief Validate a service is still registered with matching generation + */ + bool isValid(const EntropyService* service) const noexcept; + + size_t serviceCount() const noexcept { + return _slots.size(); + } + + // Lifecycle control (throws std::runtime_error on dependency errors) + void loadAll(); + void startAll(); + void stopAll(); + void unloadAll(); + +private: + /** + * @brief Internal slot for service storage with generation tracking + */ + struct ServiceSlot + { + std::shared_ptr service; ///< The service instance + SlotGeneration generation; ///< Handle validation counter + uint32_t slotIndex = 0; ///< Index used for handle stamping + }; + + // Returns topologically sorted type ids according to dependencies + std::vector topoOrder() const; + + // Get slot by type (internal) + const ServiceSlot* getSlot(const TypeSystem::TypeID& tid) const; + ServiceSlot* getSlot(const TypeSystem::TypeID& tid); + + std::unordered_map _slots; ///< Type -> slot mapping + uint32_t _nextSlotIndex = 0; ///< Counter for unique slot indices +}; + +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Core/EventBus.h b/src/Core/EventBus.h index e23fa46..73ddf42 100644 --- a/src/Core/EventBus.h +++ b/src/Core/EventBus.h @@ -10,7 +10,7 @@ /** * @file EventBus.h * @brief Type-safe publish-subscribe event system - because coupling is for train cars, not code - * + * * This file contains a lightweight EventBus implementation designed for per-instance use. * Unlike traditional global event buses, this one is meant to be embedded in your objects - * every WorkGraph, game entity, or UI widget can have its own private event highway. @@ -18,45 +18,47 @@ #pragma once +#include #include -#include -#include -#include #include +#include #include -#include +#include +#include -namespace EntropyEngine { -namespace Core { +namespace EntropyEngine +{ +namespace Core +{ /** * @brief Type-safe publish-subscribe event system for decoupled communication - * + * * EventBus implements a publish-subscribe pattern where components can publish events * and subscribe to specific event types without direct knowledge of each other. * Publishers and subscribers only need to share common event type definitions. - * + * * Unlike traditional global event buses, this implementation is lightweight enough * to support thousands of instances - one per WorkGraph, game entity, or UI widget. * Memory usage remains manageable even with many instances. - * + * * Key features: * - Type-safe: Can't accidentally subscribe to the wrong event type * - Thread-safe: Supports concurrent publishing from any thread * - Zero virtual functions: No vtable overhead * - Exception-safe: One bad handler won't crash the whole system * - Self-cleaning: Removes empty handler lists when unsubscribing - * + * * Common use cases: * - Decoupling UI from game logic * - Progress notifications from long-running operations * - State change notifications in complex systems * - Any time you're tempted to add Yet Another Callback Parameter - * + * * Complexity characteristics: * - Subscribe: O(1) amortized * - Publish: O(n) where n = subscribers for that event type - * + * * @code * // Define your event types - just plain structs * struct PlayerHealthChanged { @@ -64,7 +66,7 @@ namespace Core { * int newHealth; * bool isDead() const { return newHealth <= 0; } * }; - * + * * // Subscribe from anywhere * EventBus& bus = gameEntity.getEventBus(); * auto healthId = bus.subscribe([this](const auto& e) { @@ -73,45 +75,46 @@ namespace Core { * showGameOverScreen(); * } * }); - * + * * // Publish from anywhere else * bus.publish(PlayerHealthChanged{100, 0}); // RIP player - * + * * // Clean up when done * bus.unsubscribe(healthId); * @endcode */ -class EventBus { +class EventBus +{ public: using HandlerId = size_t; using EventHandler = std::function; - + EventBus() = default; ~EventBus() = default; - + // Move-only to prevent accidental copies EventBus(const EventBus&) = delete; EventBus& operator=(const EventBus&) = delete; // Cannot move with mutex member EventBus(EventBus&&) = delete; EventBus& operator=(EventBus&&) = delete; - + /** * @brief Sign up to receive a specific type of event - like subscribing to a newsletter - * + * * Handler called for each published event. Returns ID for unsubscribing. * Thread-safe. - * + * * @tparam EventType The event struct/class you want to receive * @param handler Your callback - lambda, function, or callable * @return A unique ID for this subscription (save it!) - * + * * @code * // Simple lambda subscription * auto id = bus.subscribe([](const MouseClick& e) { * std::cout << "Click at (" << e.x << ", " << e.y << ")\n"; * }); - * + * * // Capture local state * int clickCount = 0; * bus.subscribe([&clickCount](const MouseClick& e) { @@ -120,17 +123,17 @@ class EventBus { * unlockAchievement("ClickHappy"); * } * }); - * + * * // Member function binding * bus.subscribe( * std::bind(&UIManager::onGameStateChanged, this, std::placeholders::_1) * ); * @endcode */ - template + template HandlerId subscribe(std::function handler) { std::lock_guard lock(_mutex); - + auto typeIndex = std::type_index(typeid(EventType)); auto wrappedHandler = [handler](const std::any& event) { try { @@ -140,57 +143,56 @@ class EventBus { // Type mismatch - should not happen with correct usage } }; - + HandlerId id = _nextHandlerId++; _handlers[typeIndex].emplace_back(id, std::move(wrappedHandler)); - - + return id; } - + /** * @brief Cancel your subscription - stop receiving these events - * + * * Pass the ID from subscribe() to remove handler. Cleans up empty lists. * Thread-safe. - * + * * @tparam EventType The same event type you subscribed to * @param handlerId The ID you got from subscribe() * @return true if successfully unsubscribed, false if ID wasn't found - * + * * @code * // Always save your subscription IDs! * class GameUI { * EventBus& bus; * EventBus::HandlerId healthSubId; - * + * * void onEnable() { * healthSubId = bus.subscribe([this](auto& e) { * updateHealthBar(e.newHealth); * }); * } - * + * * void onDisable() { * bus.unsubscribe(healthSubId); * } * }; * @endcode */ - template + template bool unsubscribe(HandlerId handlerId) { std::lock_guard lock(_mutex); - + auto typeIndex = std::type_index(typeid(EventType)); auto it = _handlers.find(typeIndex); if (it == _handlers.end()) { return false; } - + auto& handlers = it->second; for (auto handlerIt = handlers.begin(); handlerIt != handlers.end(); ++handlerIt) { if (handlerIt->first == handlerId) { handlers.erase(handlerIt); - + // Clean up empty handler list to save memory if (handlers.empty()) { _handlers.erase(it); @@ -198,47 +200,47 @@ class EventBus { return true; } } - + return false; } - + /** * @brief Broadcast an event to all interested parties - fire and forget! - * + * * Sends to all subscribers of this type. Handlers called synchronously. * Safe: copies handlers, catches exceptions. Thread-safe. - * + * * @tparam EventType The event type you're publishing * @param event The event data to send - * + * * @code * // Fire a simple event * bus.publish(LevelCompleted{currentLevel, score, timeElapsed}); - * + * * // Events can have methods * struct DamageEvent { * Entity* target; * int amount; * DamageType type; - * - * bool isLethal() const { - * return target->health <= amount; + * + * bool isLethal() const { + * return target->health <= amount; * } * }; - * + * * bus.publish(DamageEvent{player, 50, DamageType::Fire}); - * + * * // Publishing to no subscribers is fine - nothing happens * bus.publish(ObscureDebugEvent{}); // No subscribers? No problem! * @endcode */ - template + template void publish(const EventType& event) { std::vector handlersToCall; - + { std::lock_guard lock(_mutex); - + auto typeIndex = std::type_index(typeid(EventType)); auto it = _handlers.find(typeIndex); if (it != _handlers.end()) { @@ -249,8 +251,7 @@ class EventBus { } } } - - + // Call handlers outside of lock to prevent deadlock for (const auto& handler : handlersToCall) { try { @@ -261,15 +262,15 @@ class EventBus { } } } - + /** * @brief Count how many subscribers are listening for a specific event type - * + * * Skip expensive work if nobody's listening. Good for debugging too. - * + * * @tparam EventType The event type to check * @return Number of active subscribers for this event type - * + * * @code * // Optimize expensive operations * if (bus.getSubscriberCount() > 0) { @@ -279,28 +280,28 @@ class EventBus { * } * @endcode */ - template + template size_t getSubscriberCount() const { std::lock_guard lock(_mutex); - + auto typeIndex = std::type_index(typeid(EventType)); auto it = _handlers.find(typeIndex); return (it != _handlers.end()) ? it->second.size() : 0; } - + /** * @brief Quick check if anyone is listening to anything at all - * + * * @return true if any handlers are registered, false if completely empty */ bool hasSubscribers() const { std::lock_guard lock(_mutex); return !_handlers.empty(); } - + /** * @brief Count total subscriptions across all event types - * + * * @return Sum of all subscriptions for all event types */ size_t getTotalSubscriptions() const { @@ -311,10 +312,10 @@ class EventBus { } return total; } - + /** * @brief Nuclear option: remove all subscriptions for all event types - * + * * Wipes clean. All handler IDs become invalid. For shutdown cleanup. * Thread-safe. */ @@ -322,43 +323,43 @@ class EventBus { std::lock_guard lock(_mutex); _handlers.clear(); } - + /** * @brief Estimate how much memory this EventBus is using - * + * * Includes object + dynamic allocations. Close enough for profiling. - * + * * @return Approximate bytes used by this EventBus - * + * * @code * // Memory profiling * if (bus.getMemoryUsage() > 1024 * 1024) { // 1MB - * LOG_WARN("EventBus using {}KB of memory!", + * LOG_WARN("EventBus using {}KB of memory!", * bus.getMemoryUsage() / 1024); * } * @endcode */ size_t getMemoryUsage() const { std::lock_guard lock(_mutex); - + size_t usage = sizeof(*this); usage += _handlers.size() * (sizeof(std::type_index) + sizeof(std::vector>)); - + for (const auto& [type, handlers] : _handlers) { usage += handlers.size() * (sizeof(HandlerId) + sizeof(EventHandler)); } - + return usage; } - + private: mutable std::mutex _mutex; ///< Protects all operations (mutable for const methods) - + /// Type-erased storage: maps event types to lists of (id, handler) pairs std::unordered_map>> _handlers; - + HandlerId _nextHandlerId = 1; ///< Simple incrementing ID generator }; -} // namespace Core -} // namespace EntropyEngine \ No newline at end of file +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Core/HandleSlot.h b/src/Core/HandleSlot.h index 58565ae..ab0edc3 100644 --- a/src/Core/HandleSlot.h +++ b/src/Core/HandleSlot.h @@ -50,12 +50,14 @@ #pragma once -#include "EntropyObject.h" #include #include #include -namespace EntropyEngine::Core { +#include "EntropyObject.h" + +namespace EntropyEngine::Core +{ /** * @brief Generation counter component for slot-based pools @@ -73,7 +75,8 @@ namespace EntropyEngine::Core { * }; * @endcode */ -struct SlotGeneration { +struct SlotGeneration +{ std::atomic value{1}; ///< Generation counter (starts at 1, 0 reserved for "never allocated") SlotGeneration() noexcept = default; @@ -82,9 +85,7 @@ struct SlotGeneration { SlotGeneration(const SlotGeneration&) = delete; SlotGeneration& operator=(const SlotGeneration&) = delete; - SlotGeneration(SlotGeneration&& other) noexcept - : value(other.value.load(std::memory_order_relaxed)) { - } + SlotGeneration(SlotGeneration&& other) noexcept : value(other.value.load(std::memory_order_relaxed)) {} SlotGeneration& operator=(SlotGeneration&& other) noexcept { value.store(other.value.load(std::memory_order_relaxed), std::memory_order_relaxed); @@ -130,7 +131,8 @@ struct SlotGeneration { * These helper functions encapsulate the common handle lifecycle operations, * ensuring consistent usage of the generation pattern across all pools. */ -struct HandleSlotOps { +struct HandleSlotOps +{ /** * @brief Stamps an EntropyObject with the slot's identity * @@ -143,10 +145,10 @@ struct HandleSlotOps { * @param index Slot index within the pool * @param generation The slot's generation counter */ - template + template static void stamp(T& obj, void* owner, uint32_t index, const SlotGeneration& generation) { static_assert(std::is_base_of_v, - "HandleSlotOps::stamp requires T to derive from EntropyObject"); + "HandleSlotOps::stamp requires T to derive from EntropyObject"); HandleAccess::set(obj, owner, index, generation.current()); } @@ -167,11 +169,11 @@ struct HandleSlotOps { * @param generation The slot's generation counter * @return true if the object is valid for this slot */ - template - [[nodiscard]] static bool validate(const T* obj, const void* expectedOwner, - uint32_t expectedIndex, const SlotGeneration& generation) noexcept { + template + [[nodiscard]] static bool validate(const T* obj, const void* expectedOwner, uint32_t expectedIndex, + const SlotGeneration& generation) noexcept { static_assert(std::is_base_of_v, - "HandleSlotOps::validate requires T to derive from EntropyObject"); + "HandleSlotOps::validate requires T to derive from EntropyObject"); if (!obj) return false; if (!obj->hasHandle()) return false; @@ -190,10 +192,10 @@ struct HandleSlotOps { * @param obj Object to release * @param generation The slot's generation counter (will be incremented) */ - template + template static void release(T& obj, SlotGeneration& generation) { static_assert(std::is_base_of_v, - "HandleSlotOps::release requires T to derive from EntropyObject"); + "HandleSlotOps::release requires T to derive from EntropyObject"); HandleAccess::clear(obj); generation.increment(); } @@ -208,10 +210,10 @@ struct HandleSlotOps { * @tparam T EntropyObject-derived type * @param obj Object to clear */ - template + template static void clear(T& obj) { static_assert(std::is_base_of_v, - "HandleSlotOps::clear requires T to derive from EntropyObject"); + "HandleSlotOps::clear requires T to derive from EntropyObject"); HandleAccess::clear(obj); } }; @@ -223,4 +225,4 @@ struct HandleSlotOps { */ constexpr uint32_t INVALID_SLOT_INDEX = ~0u; -} // namespace EntropyEngine::Core +} // namespace EntropyEngine::Core diff --git a/src/Core/RefObject.h b/src/Core/RefObject.h index 46a519f..d573051 100644 --- a/src/Core/RefObject.h +++ b/src/Core/RefObject.h @@ -3,28 +3,36 @@ // #pragma once -#include "EntropyObject.h" -#include -#include #include +#include +#include -namespace EntropyEngine::Core { +#include "EntropyObject.h" + +namespace EntropyEngine::Core +{ -struct adopt_t { explicit adopt_t() = default; }; -struct retain_t { explicit retain_t() = default; }; +struct adopt_t +{ + explicit adopt_t() = default; +}; +struct retain_t +{ + explicit retain_t() = default; +}; inline constexpr adopt_t adopt{}; inline constexpr retain_t retain{}; -template -class RefObject { - static_assert(std::is_base_of_v, - "T must derive from EntropyObject"); - +template +class RefObject +{ + static_assert(std::is_base_of_v, "T must derive from EntropyObject"); + T* _ptr = nullptr; - + public: RefObject() noexcept = default; - + // Adopting constructor (default behavior) explicit RefObject(T* ptr) noexcept : _ptr(ptr) {} @@ -33,14 +41,13 @@ class RefObject { explicit RefObject(retain_t, T* ptr) noexcept : _ptr(nullptr) { if (ptr && ptr->tryRetain()) _ptr = ptr; } - + ~RefObject() noexcept { if (_ptr) _ptr->release(); } - - RefObject(RefObject&& other) noexcept - : _ptr(std::exchange(other._ptr, nullptr)) {} - + + RefObject(RefObject&& other) noexcept : _ptr(std::exchange(other._ptr, nullptr)) {} + RefObject& operator=(RefObject&& other) noexcept { if (this != &other) { if (_ptr) _ptr->release(); @@ -48,13 +55,13 @@ class RefObject { } return *this; } - + RefObject(const RefObject& other) noexcept : _ptr(other._ptr) { if (_ptr && !_ptr->tryRetain()) { _ptr = nullptr; } } - + RefObject& operator=(const RefObject& other) noexcept { if (this != &other) { T* newPtr = other._ptr; @@ -69,7 +76,7 @@ class RefObject { } // Converting copy ctor from RefObject where U derives from T (upcast) - template>> + template >> RefObject(const RefObject& other) noexcept : _ptr(static_cast(other.get())) { if (_ptr && !_ptr->tryRetain()) { _ptr = nullptr; @@ -77,11 +84,11 @@ class RefObject { } // Converting move ctor from RefObject where U derives from T (upcast) - template>> + template >> RefObject(RefObject&& other) noexcept : _ptr(static_cast(other.detach())) {} // Converting copy assignment - template>> + template >> RefObject& operator=(const RefObject& other) noexcept { T* newPtr = static_cast(other.get()); if (_ptr != newPtr) { @@ -96,7 +103,7 @@ class RefObject { } // Converting move assignment - template>> + template >> RefObject& operator=(RefObject&& other) noexcept { if (reinterpret_cast(_ptr) != reinterpret_cast(other.get())) { if (_ptr) _ptr->release(); @@ -107,19 +114,27 @@ class RefObject { } return *this; } - - T* get() const noexcept { return _ptr; } - T* operator->() const noexcept { return _ptr; } - T& operator*() const noexcept { return *_ptr; } - explicit operator bool() const noexcept { return _ptr != nullptr; } - + + T* get() const noexcept { + return _ptr; + } + T* operator->() const noexcept { + return _ptr; + } + T& operator*() const noexcept { + return *_ptr; + } + explicit operator bool() const noexcept { + return _ptr != nullptr; + } + [[nodiscard]] T* detach() noexcept { return std::exchange(_ptr, nullptr); } - + void reset(T* ptr = nullptr) noexcept { - if (ptr == _ptr) return; // no-op if identical - if (ptr) ptr->retain(); // take ownership of new before dropping old + if (ptr == _ptr) return; // no-op if identical + if (ptr) ptr->retain(); // take ownership of new before dropping old T* old = _ptr; _ptr = ptr; if (old) old->release(); @@ -166,10 +181,10 @@ class RefObject { * } // else: mesh was destroyed or slot reused * @endcode */ -template -class WeakRef { - static_assert(std::is_base_of_v, - "T must derive from EntropyObject"); +template +class WeakRef +{ + static_assert(std::is_base_of_v, "T must derive from EntropyObject"); T* _ptr = nullptr; uint32_t _generation = 0; ///< Captured handle generation for validation @@ -178,20 +193,15 @@ class WeakRef { WeakRef() noexcept = default; /// Construct from raw pointer (non-owning, captures current generation) - explicit WeakRef(T* ptr) noexcept - : _ptr(ptr) - , _generation(ptr ? ptr->handleGeneration() : 0) {} + explicit WeakRef(T* ptr) noexcept : _ptr(ptr), _generation(ptr ? ptr->handleGeneration() : 0) {} /// Construct from RefObject (non-owning, captures current generation) - WeakRef(const RefObject& ref) noexcept - : _ptr(ref.get()) - , _generation(_ptr ? _ptr->handleGeneration() : 0) {} + WeakRef(const RefObject& ref) noexcept : _ptr(ref.get()), _generation(_ptr ? _ptr->handleGeneration() : 0) {} /// Construct from derived RefObject - template>> + template >> WeakRef(const RefObject& ref) noexcept - : _ptr(static_cast(ref.get())) - , _generation(_ptr ? _ptr->handleGeneration() : 0) {} + : _ptr(static_cast(ref.get())), _generation(_ptr ? _ptr->handleGeneration() : 0) {} // Default copy/move operations (copies generation too) WeakRef(const WeakRef&) noexcept = default; @@ -207,7 +217,7 @@ class WeakRef { } /// Assign from derived RefObject (captures new generation) - template>> + template >> WeakRef& operator=(const RefObject& ref) noexcept { _ptr = static_cast(ref.get()); _generation = _ptr ? _ptr->handleGeneration() : 0; @@ -255,13 +265,19 @@ class WeakRef { } /// Get raw pointer (unsafe - for debugging/comparison only) - [[nodiscard]] T* get() const noexcept { return _ptr; } + [[nodiscard]] T* get() const noexcept { + return _ptr; + } /// Get the captured generation (for debugging) - [[nodiscard]] uint32_t generation() const noexcept { return _generation; } + [[nodiscard]] uint32_t generation() const noexcept { + return _generation; + } /// Check if pointing to something (may still be expired) - explicit operator bool() const noexcept { return _ptr != nullptr; } + explicit operator bool() const noexcept { + return _ptr != nullptr; + } /// Clear the weak reference void reset() noexcept { @@ -283,7 +299,7 @@ class WeakRef { } }; -template +template [[nodiscard]] RefObject makeRef(Args&&... args) { T* ptr = new T(std::forward(args)...); // Call memory profiling hook after allocation @@ -294,23 +310,35 @@ template } // Transparent hasher/equality for heterogeneous lookup by raw pointer -template -struct RefPtrHash { +template +struct RefPtrHash +{ using is_transparent = void; - size_t operator()(const RefObject& r) const noexcept { return std::hash{}(r.get()); } - size_t operator()(const T* p) const noexcept { return std::hash{}(p); } + size_t operator()(const RefObject& r) const noexcept { + return std::hash{}(r.get()); + } + size_t operator()(const T* p) const noexcept { + return std::hash{}(p); + } }; -template -struct RefPtrEq { +template +struct RefPtrEq +{ using is_transparent = void; - bool operator()(const RefObject& a, const RefObject& b) const noexcept { return a.get() == b.get(); } - bool operator()(const RefObject& a, const T* b) const noexcept { return a.get() == b; } - bool operator()(const T* a, const RefObject& b) const noexcept { return a == b.get(); } + bool operator()(const RefObject& a, const RefObject& b) const noexcept { + return a.get() == b.get(); + } + bool operator()(const RefObject& a, const T* b) const noexcept { + return a.get() == b; + } + bool operator()(const T* a, const RefObject& b) const noexcept { + return a == b.get(); + } }; // Casting helpers: wrap the same underlying pointer and RETAIN it to avoid double-release -template +template RefObject ref_static_cast(const RefObject& r) noexcept { if (auto p = static_cast(r.get())) { if (p->tryRetain()) return RefObject(adopt, p); @@ -318,7 +346,7 @@ RefObject ref_static_cast(const RefObject& r) noexcept { return RefObject{}; } -template +template RefObject ref_dynamic_cast(const RefObject& r) noexcept { if (auto p = dynamic_cast(r.get())) { if (p->tryRetain()) return RefObject(adopt, p); @@ -326,14 +354,16 @@ RefObject ref_dynamic_cast(const RefObject& r) noexcept { return RefObject{}; } -} // namespace EntropyEngine::Core +} // namespace EntropyEngine::Core // Hash support for RefObject by identity -namespace std { - template - struct hash> { - size_t operator()(const EntropyEngine::Core::RefObject& r) const noexcept { - return std::hash{}(r.get()); - } - }; -} \ No newline at end of file +namespace std +{ +template +struct hash> +{ + size_t operator()(const EntropyEngine::Core::RefObject& r) const noexcept { + return std::hash{}(r.get()); + } +}; +} // namespace std diff --git a/src/Core/SlotPool.h b/src/Core/SlotPool.h index 5fb1500..4205968 100644 --- a/src/Core/SlotPool.h +++ b/src/Core/SlotPool.h @@ -49,13 +49,15 @@ #pragma once -#include "HandleSlot.h" -#include "RefObject.h" -#include #include #include +#include -namespace EntropyEngine::Core { +#include "HandleSlot.h" +#include "RefObject.h" + +namespace EntropyEngine::Core +{ /** * @brief Slot-based pool with generation-based handle validation @@ -66,13 +68,15 @@ namespace EntropyEngine::Core { * - All public methods are thread-safe (protected by internal mutex) * - SlotData must be safe to access under the returned lock guard */ -template -class SlotPool { +template +class SlotPool +{ public: /** * @brief Internal slot structure combining generation tracking with user data */ - struct Slot { + struct Slot + { SlotGeneration generation; ///< Authoritative generation counter SlotData data; ///< User-defined slot data bool active = false; ///< Slot is currently allocated @@ -81,7 +85,8 @@ class SlotPool { /** * @brief Result of allocate() containing slot index and data reference */ - struct AllocResult { + struct AllocResult + { uint32_t index; SlotData& data; }; @@ -94,10 +99,7 @@ class SlotPool { * * @param capacity Maximum number of slots */ - explicit SlotPool(size_t capacity) - : _slots(capacity) - , _capacity(capacity) - { + explicit SlotPool(size_t capacity) : _slots(capacity), _capacity(capacity) { // Initialize free list (all slots are free initially) _freeList.reserve(capacity); for (size_t i = 0; i < capacity; ++i) { @@ -136,7 +138,7 @@ class SlotPool { * @param obj Object to stamp * @param index Slot index from allocate() */ - template + template void stamp(T& obj, uint32_t index) { std::lock_guard lock(_mutex); @@ -152,7 +154,7 @@ class SlotPool { * @param index Expected slot index * @return true if object is valid for this slot */ - template + template [[nodiscard]] bool isValid(const T* obj, uint32_t index) const { std::lock_guard lock(_mutex); @@ -172,7 +174,7 @@ class SlotPool { * @param obj Object to validate * @return Pointer to slot data, or nullptr if invalid */ - template + template [[nodiscard]] const SlotData* getIfValid(const T* obj) const { if (!obj || !obj->hasHandle()) return nullptr; if (obj->handleOwner() != this) return nullptr; @@ -199,7 +201,7 @@ class SlotPool { * @param obj Object to release * @param index Slot index */ - template + template void release(T& obj, uint32_t index) { std::lock_guard lock(_mutex); @@ -258,7 +260,9 @@ class SlotPool { } // Statistics - [[nodiscard]] size_t capacity() const noexcept { return _capacity; } + [[nodiscard]] size_t capacity() const noexcept { + return _capacity; + } [[nodiscard]] size_t activeCount() const noexcept { std::lock_guard lock(_mutex); return _activeCount; @@ -273,7 +277,7 @@ class SlotPool { * * @param fn Callback receiving (index, SlotData&) for each active slot */ - template + template void forEachActive(Fn&& fn) { std::lock_guard lock(_mutex); for (size_t i = 0; i < _capacity; ++i) { @@ -286,7 +290,7 @@ class SlotPool { /** * @brief Iterates over all active slots (const) */ - template + template void forEachActive(Fn&& fn) const { std::lock_guard lock(_mutex); for (size_t i = 0; i < _capacity; ++i) { @@ -304,4 +308,4 @@ class SlotPool { mutable std::mutex _mutex; }; -} // namespace EntropyEngine::Core +} // namespace EntropyEngine::Core diff --git a/src/Core/Timer.cpp b/src/Core/Timer.cpp index 3ac3cd0..5e8ee9a 100644 --- a/src/Core/Timer.cpp +++ b/src/Core/Timer.cpp @@ -8,28 +8,23 @@ */ #include "Timer.h" + #include "TimerService.h" -namespace EntropyEngine { -namespace Core { +namespace EntropyEngine +{ +namespace Core +{ -Timer::Timer(TimerService* service, - Concurrency::WorkGraph::NodeHandle node, - Duration interval, - bool repeating) - : _service(service) - , _node(std::move(node)) - , _interval(interval) - , _repeating(repeating) - , _valid(true) { -} +Timer::Timer(TimerService* service, Concurrency::WorkGraph::NodeHandle node, Duration interval, bool repeating) + : _service(service), _node(std::move(node)), _interval(interval), _repeating(repeating), _valid(true) {} Timer::Timer(Timer&& other) noexcept - : _service(other._service) - , _node(std::move(other._node)) - , _interval(other._interval) - , _repeating(other._repeating) - , _valid(other._valid.load(std::memory_order_acquire)) { + : _service(other._service), + _node(std::move(other._node)), + _interval(other._interval), + _repeating(other._repeating), + _valid(other._valid.load(std::memory_order_acquire)) { other._service = nullptr; other._valid.store(false, std::memory_order_release); } @@ -71,5 +66,5 @@ bool Timer::isValid() const { return _valid.load(std::memory_order_acquire); } -} // namespace Core -} // namespace EntropyEngine +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Core/Timer.h b/src/Core/Timer.h index 8d1ed91..bfe1cfd 100644 --- a/src/Core/Timer.h +++ b/src/Core/Timer.h @@ -18,15 +18,18 @@ #pragma once +#include #include #include -#include #include + #include "../Concurrency/WorkGraph.h" #include "../Concurrency/WorkGraphTypes.h" -namespace EntropyEngine { -namespace Core { +namespace EntropyEngine +{ +namespace Core +{ // Forward declaration class TimerService; @@ -80,7 +83,8 @@ class TimerService; * ); * @endcode */ -class Timer { +class Timer +{ public: using TimePoint = std::chrono::steady_clock::time_point; using Duration = std::chrono::steady_clock::duration; @@ -162,14 +166,18 @@ class Timer { * * @return The interval duration, or zero for one-shot timers */ - Duration getInterval() const { return _interval; } + Duration getInterval() const { + return _interval; + } /** * @brief Checks if this is a repeating timer * * @return true if the timer repeats, false for one-shot */ - bool isRepeating() const { return _repeating; } + bool isRepeating() const { + return _repeating; + } private: // Only TimerService can create active timers @@ -183,10 +191,7 @@ class Timer { * @param interval Timer interval (for repeating timers) * @param repeating Whether this timer repeats */ - Timer(TimerService* service, - Concurrency::WorkGraph::NodeHandle node, - Duration interval, - bool repeating); + Timer(TimerService* service, Concurrency::WorkGraph::NodeHandle node, Duration interval, bool repeating); TimerService* _service = nullptr; Concurrency::WorkGraph::NodeHandle _node; @@ -195,5 +200,5 @@ class Timer { std::atomic _valid{false}; }; -} // namespace Core -} // namespace EntropyEngine +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Core/TimerService.cpp b/src/Core/TimerService.cpp index 2d15476..8c46cb4 100644 --- a/src/Core/TimerService.cpp +++ b/src/Core/TimerService.cpp @@ -8,19 +8,19 @@ */ #include "TimerService.h" -#include "../Concurrency/WorkService.h" + #include -namespace EntropyEngine { -namespace Core { +#include "../Concurrency/WorkService.h" -TimerService::TimerService() - : TimerService(Config{}) { -} +namespace EntropyEngine +{ +namespace Core +{ -TimerService::TimerService(const Config& config) - : _config(config) { -} +TimerService::TimerService() : TimerService(Config{}) {} + +TimerService::TimerService(const Config& config) : _config(config) {} TimerService::~TimerService() { // Ensure clean shutdown @@ -136,10 +136,8 @@ void TimerService::setWorkService(Concurrency::WorkService* workService) { } } -Timer TimerService::scheduleTimer(std::chrono::steady_clock::duration interval, - Timer::WorkFunction work, - bool repeating, - Concurrency::ExecutionType executionType) { +Timer TimerService::scheduleTimer(std::chrono::steady_clock::duration interval, Timer::WorkFunction work, + bool repeating, Concurrency::ExecutionType executionType) { if (!_workGraph) { throw std::runtime_error("TimerService not loaded"); } @@ -190,9 +188,7 @@ Timer TimerService::scheduleTimer(std::chrono::steady_clock::duration interval, // Not time yet - yield until fire time instead of immediate reschedule return Concurrency::WorkResultContext::yieldUntil(timerData->fireTime); }, - "Timer", - nullptr, - executionType, + "Timer", nullptr, executionType, std::nullopt // No max reschedules for timers ); @@ -269,20 +265,15 @@ void TimerService::restartPumpContract() { return; } - _pumpContractHandle = _workContractGroup->createContract( - *pumpFunction, - Concurrency::ExecutionType::AnyThread - ); + _pumpContractHandle = + _workContractGroup->createContract(*pumpFunction, Concurrency::ExecutionType::AnyThread); _pumpContractHandle.schedule(); } // Execution mutex released here - stop() can now proceed }; // Schedule initial execution on background thread - _pumpContractHandle = _workContractGroup->createContract( - *_pumpFunction, - Concurrency::ExecutionType::AnyThread - ); + _pumpContractHandle = _workContractGroup->createContract(*_pumpFunction, Concurrency::ExecutionType::AnyThread); _pumpContractHandle.schedule(); } @@ -293,5 +284,5 @@ size_t TimerService::processReadyTimers() { return 0; } -} // namespace Core -} // namespace EntropyEngine +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Core/TimerService.h b/src/Core/TimerService.h index 5c54dc9..0384ef1 100644 --- a/src/Core/TimerService.h +++ b/src/Core/TimerService.h @@ -18,22 +18,26 @@ #pragma once -#include #include +#include #include #include -#include "EntropyService.h" -#include "Timer.h" -#include "../Concurrency/WorkGraph.h" + #include "../Concurrency/WorkContractGroup.h" +#include "../Concurrency/WorkGraph.h" #include "../TypeSystem/TypeID.h" +#include "EntropyService.h" +#include "Timer.h" -namespace EntropyEngine { -namespace Core { +namespace EntropyEngine +{ +namespace Core +{ // Forward declaration -namespace Concurrency { - class WorkService; +namespace Concurrency +{ +class WorkService; } /** @@ -89,12 +93,14 @@ namespace Concurrency { * frameTimer.invalidate(); * @endcode */ -class TimerService : public EntropyService { +class TimerService : public EntropyService +{ public: /** * @brief Configuration for the timer service */ - struct Config { + struct Config + { size_t workContractGroupSize = 1024; ///< Size of internal work contract pool }; @@ -116,9 +122,15 @@ class TimerService : public EntropyService { ~TimerService() override; // EntropyService interface - const char* id() const override { return "com.entropy.core.timers"; } - const char* name() const override { return "TimerService"; } - const char* version() const override { return "0.1.0"; } + const char* id() const override { + return "com.entropy.core.timers"; + } + const char* name() const override { + return "TimerService"; + } + const char* version() const override { + return "0.1.0"; + } TypeSystem::TypeID typeId() const override { return TypeSystem::createTypeId(); } @@ -184,10 +196,8 @@ class TimerService : public EntropyService { * ); * @endcode */ - Timer scheduleTimer(std::chrono::steady_clock::duration interval, - Timer::WorkFunction work, - bool repeating = false, - Concurrency::ExecutionType executionType = Concurrency::ExecutionType::AnyThread); + Timer scheduleTimer(std::chrono::steady_clock::duration interval, Timer::WorkFunction work, bool repeating = false, + Concurrency::ExecutionType executionType = Concurrency::ExecutionType::AnyThread); /** * @brief Gets the number of currently active timers @@ -240,11 +250,12 @@ class TimerService : public EntropyService { /** * @brief Internal timer data tracked per node */ - struct TimerData { - Timer::TimePoint fireTime; ///< When timer should fire - Timer::Duration interval; ///< Interval for repeating timers - Timer::WorkFunction work; ///< User's work function - bool repeating; ///< Whether timer repeats + struct TimerData + { + Timer::TimePoint fireTime; ///< When timer should fire + Timer::Duration interval; ///< Interval for repeating timers + Timer::WorkFunction work; ///< User's work function + bool repeating; ///< Whether timer repeats std::atomic cancelled{false}; ///< Cancellation flag }; @@ -269,5 +280,5 @@ class TimerService : public EntropyService { std::atomic _pumpShouldStop{false}; }; -} // namespace Core -} // namespace EntropyEngine +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Core/entropy_c_api.h b/src/Core/entropy_c_api.h index dd82c29..55483c5 100644 --- a/src/Core/entropy_c_api.h +++ b/src/Core/entropy_c_api.h @@ -3,8 +3,8 @@ // C ABI for EntropyCore base object and handle interop // This header is C-compatible and can be consumed by C, Rust, C#, etc. -#include #include +#include #ifdef __cplusplus extern "C" { @@ -12,25 +12,26 @@ extern "C" { // Export macro (works for both static and shared builds) #if defined(_WIN32) - #if defined(ENTROPYCORE_SHARED) - #if defined(ENTROPYCORE_BUILDING) - #define ENTROPY_API __declspec(dllexport) - #else - #define ENTROPY_API __declspec(dllimport) - #endif - #else - #define ENTROPY_API - #endif +#if defined(ENTROPYCORE_SHARED) +#if defined(ENTROPYCORE_BUILDING) +#define ENTROPY_API __declspec(dllexport) +#else +#define ENTROPY_API __declspec(dllimport) +#endif +#else +#define ENTROPY_API +#endif +#else +#if defined(ENTROPYCORE_SHARED) +#define ENTROPY_API __attribute__((visibility("default"))) #else - #if defined(ENTROPYCORE_SHARED) - #define ENTROPY_API __attribute__((visibility("default"))) - #else - #define ENTROPY_API - #endif +#define ENTROPY_API +#endif #endif // Status codes for C API functions -typedef enum EntropyStatus { +typedef enum EntropyStatus +{ ENTROPY_OK = 0, ENTROPY_ERR_UNKNOWN = 1, ENTROPY_ERR_INVALID_ARG = 2, @@ -42,18 +43,20 @@ typedef enum EntropyStatus { } EntropyStatus; // Booleans (explicit, stable width across languages) -typedef int32_t EntropyBool; // 0 = false, non-zero = true +typedef int32_t EntropyBool; // 0 = false, non-zero = true #define ENTROPY_FALSE 0 -#define ENTROPY_TRUE 1 +#define ENTROPY_TRUE 1 // Owned string (UTF-8). Caller must dispose via entropy_string_dispose. -typedef struct EntropyOwnedString { +typedef struct EntropyOwnedString +{ const char* ptr; - uint32_t len; + uint32_t len; } EntropyOwnedString; // Owned buffer (bytes). Caller must dispose via entropy_buffer_dispose. -typedef struct EntropyOwnedBuffer { +typedef struct EntropyOwnedBuffer +{ uint8_t* ptr; uint32_t len; } EntropyOwnedBuffer; @@ -66,42 +69,43 @@ typedef struct EntropyObjectRefTag EntropyObjectRef; typedef uint64_t EntropyTypeId; // POD value handle (owner pointer is process-local) -typedef struct EntropyHandle { - const void* owner; - uint32_t index; - uint32_t generation; - EntropyTypeId type_id; // optional canonical type id (0 if unknown) +typedef struct EntropyHandle +{ + const void* owner; + uint32_t index; + uint32_t generation; + EntropyTypeId type_id; // optional canonical type id (0 if unknown) } EntropyHandle; // Library/version/memory ----------------------------------------------------- ENTROPY_API void entropy_get_version(uint32_t* major, uint32_t* minor, uint32_t* patch, uint32_t* abi); ENTROPY_API void* entropy_alloc(size_t size); -ENTROPY_API void entropy_free(void* p); +ENTROPY_API void entropy_free(void* p); // Convenience/diagnostics ---------------------------------------------------- -ENTROPY_API const char* entropy_status_to_string(EntropyStatus s); // static string, no free -ENTROPY_API void entropy_string_free(const char* s); // alias of entropy_free -ENTROPY_API void entropy_string_dispose(EntropyOwnedString s); -ENTROPY_API void entropy_buffer_dispose(EntropyOwnedBuffer b); +ENTROPY_API const char* entropy_status_to_string(EntropyStatus s); // static string, no free +ENTROPY_API void entropy_string_free(const char* s); // alias of entropy_free +ENTROPY_API void entropy_string_dispose(EntropyOwnedString s); +ENTROPY_API void entropy_buffer_dispose(EntropyOwnedBuffer b); // Lifetime management (pointer-based; native/internal) ----------------------- -ENTROPY_API void entropy_object_retain(const EntropyObjectRef* obj); -ENTROPY_API void entropy_object_release(const EntropyObjectRef* obj); +ENTROPY_API void entropy_object_retain(const EntropyObjectRef* obj); +ENTROPY_API void entropy_object_release(const EntropyObjectRef* obj); ENTROPY_API uint32_t entropy_object_ref_count(const EntropyObjectRef* obj); // Introspection (pointer-based) ---------------------------------------------- -ENTROPY_API EntropyTypeId entropy_object_type_id(const EntropyObjectRef* obj); -ENTROPY_API const char* entropy_object_class_name(const EntropyObjectRef* obj); // borrowed, do not free -ENTROPY_API EntropyStatus entropy_object_class_name_owned(const EntropyObjectRef* obj, EntropyOwnedString* out); -ENTROPY_API EntropyStatus entropy_object_to_string(const EntropyObjectRef* obj, EntropyOwnedString* out); -ENTROPY_API EntropyStatus entropy_object_debug_string(const EntropyObjectRef* obj, EntropyOwnedString* out); -ENTROPY_API EntropyStatus entropy_object_description(const EntropyObjectRef* obj, EntropyOwnedString* out); +ENTROPY_API EntropyTypeId entropy_object_type_id(const EntropyObjectRef* obj); +ENTROPY_API const char* entropy_object_class_name(const EntropyObjectRef* obj); // borrowed, do not free +ENTROPY_API EntropyStatus entropy_object_class_name_owned(const EntropyObjectRef* obj, EntropyOwnedString* out); +ENTROPY_API EntropyStatus entropy_object_to_string(const EntropyObjectRef* obj, EntropyOwnedString* out); +ENTROPY_API EntropyStatus entropy_object_debug_string(const EntropyObjectRef* obj, EntropyOwnedString* out); +ENTROPY_API EntropyStatus entropy_object_description(const EntropyObjectRef* obj, EntropyOwnedString* out); // Handle helpers -------------------------------------------------------------- ENTROPY_API EntropyStatus entropy_object_to_handle(const EntropyObjectRef* obj, EntropyHandle* out_handle); -ENTROPY_API EntropyBool entropy_handle_is_valid(EntropyHandle h); -ENTROPY_API EntropyBool entropy_handle_equals(EntropyHandle a, EntropyHandle b); -ENTROPY_API EntropyBool entropy_handle_type_matches(EntropyHandle h, EntropyTypeId expected); +ENTROPY_API EntropyBool entropy_handle_is_valid(EntropyHandle h); +ENTROPY_API EntropyBool entropy_handle_equals(EntropyHandle a, EntropyHandle b); +ENTROPY_API EntropyBool entropy_handle_type_matches(EntropyHandle h, EntropyTypeId expected); // Handle-first operations ----------------------------------------------------- // Adjust object lifetime using a value handle (in-process only) @@ -109,18 +113,16 @@ ENTROPY_API EntropyStatus entropy_handle_retain(EntropyHandle h); ENTROPY_API EntropyStatus entropy_handle_release(EntropyHandle h); // Fetch basic info by handle (owned class name) -ENTROPY_API EntropyStatus entropy_handle_info(EntropyHandle h, - EntropyTypeId* out_type_id, - EntropyOwnedString* out_class_name); - +ENTROPY_API EntropyStatus entropy_handle_info(EntropyHandle h, EntropyTypeId* out_type_id, + EntropyOwnedString* out_class_name); // Owner vtable registration & generic resolver (process-local) --------------- typedef EntropyObjectRef* (*EntropyResolveFn)(const void* owner, uint32_t index, uint32_t generation); -typedef int (*EntropyValidateFn)(const void* owner, uint32_t index, uint32_t generation); +typedef int (*EntropyValidateFn)(const void* owner, uint32_t index, uint32_t generation); ENTROPY_API void entropy_register_owner_vtable(const void* owner, EntropyResolveFn resolve, EntropyValidateFn validate); -ENTROPY_API EntropyObjectRef* entropy_resolve_handle(EntropyHandle h); // returns retained object or NULL +ENTROPY_API EntropyObjectRef* entropy_resolve_handle(EntropyHandle h); // returns retained object or NULL #ifdef __cplusplus -} // extern "C" +} // extern "C" #endif diff --git a/src/Core/entropy_main.h b/src/Core/entropy_main.h index f1d4ff4..dac94a0 100644 --- a/src/Core/entropy_main.h +++ b/src/Core/entropy_main.h @@ -21,7 +21,8 @@ extern "C" { typedef struct EntropyApp EntropyApp; // Delegate callbacks (executed on main thread) -typedef struct EntropyAppDelegateC { +typedef struct EntropyAppDelegateC +{ void (*will_finish_launching)(EntropyApp* app, void* userdata); void (*did_finish_launching)(EntropyApp* app, void* userdata); bool (*should_terminate)(EntropyApp* app, void* userdata); @@ -31,14 +32,14 @@ typedef struct EntropyAppDelegateC { } EntropyAppDelegateC; // Runtime configuration for bootstrap -typedef struct EntropyMainConfig { - uint32_t worker_threads; // 0 => auto - uint32_t shutdown_deadline_ms; // graceful drain window +typedef struct EntropyMainConfig +{ + uint32_t worker_threads; // 0 => auto + uint32_t shutdown_deadline_ms; // graceful drain window } EntropyMainConfig; // Bootstrap / run -int entropy_main_run(const EntropyMainConfig* cfg, - const EntropyAppDelegateC* delegate); +int entropy_main_run(const EntropyMainConfig* cfg, const EntropyAppDelegateC* delegate); // Request termination from any thread void entropy_main_terminate(int code); @@ -47,5 +48,5 @@ void entropy_main_terminate(int code); EntropyApp* entropy_main_app(void); #ifdef __cplusplus -} // extern "C" +} // extern "C" #endif diff --git a/src/CoreCommon.h b/src/CoreCommon.h index 4aeb612..dd444b1 100644 --- a/src/CoreCommon.h +++ b/src/CoreCommon.h @@ -12,19 +12,22 @@ /** * @file CoreCommon.h * @brief Core common utilities and debugging macros for EntropyCore - * + * * This header provides essential debugging utilities and common macros * used throughout the EntropyCore library. It includes debug assertions * and build configuration flags. */ #include +#include #include #include -#include #ifdef EntropyDebug -#define ENTROPY_DEBUG_BLOCK(code) do { code } while(0) +#define ENTROPY_DEBUG_BLOCK(code) \ + do { \ + code \ + } while (0) #undef NDEBUG #define ENTROPY_ASSERT(condition, message) assert(condition) #else @@ -32,32 +35,34 @@ #define ENTROPY_ASSERT(condition, message) ((void)0) #endif -namespace EntropyEngine { -namespace Core { - // Cross-platform safe environment variable getter that avoids returning raw pointers - // and copies into std::string. Returns std::nullopt if the variable is not set. - inline std::optional safeGetEnv(const char* name) { - if (!name) return std::nullopt; +namespace EntropyEngine +{ +namespace Core +{ +// Cross-platform safe environment variable getter that avoids returning raw pointers +// and copies into std::string. Returns std::nullopt if the variable is not set. +inline std::optional safeGetEnv(const char* name) { + if (!name) return std::nullopt; #if defined(_WIN32) - // Use secure getenv_s to query size first - size_t required = 0; - errno_t err = getenv_s(&required, nullptr, 0, name); - if (err != 0 || required == 0) return std::nullopt; - // required includes the null terminator - std::string value; - value.resize(required); - size_t read = 0; - err = getenv_s(&read, value.data(), value.size(), name); - if (err != 0 || read == 0) return std::nullopt; - // Trim trailing null if present - if (!value.empty() && value.back() == '\0') value.pop_back(); - return value; + // Use secure getenv_s to query size first + size_t required = 0; + errno_t err = getenv_s(&required, nullptr, 0, name); + if (err != 0 || required == 0) return std::nullopt; + // required includes the null terminator + std::string value; + value.resize(required); + size_t read = 0; + err = getenv_s(&read, value.data(), value.size(), name); + if (err != 0 || read == 0) return std::nullopt; + // Trim trailing null if present + if (!value.empty() && value.back() == '\0') value.pop_back(); + return value; #else - const char* v = std::getenv(name); - if (!v) return std::nullopt; - return std::string(v); + const char* v = std::getenv(name); + if (!v) return std::nullopt; + return std::string(v); #endif - } -} // namespace Core - // Core utility namespace - currently empty but reserved for future utilities -} \ No newline at end of file +} +} // namespace Core + // Core utility namespace - currently empty but reserved for future utilities +} // namespace EntropyEngine diff --git a/src/Debug/Debug.h b/src/Debug/Debug.h index 0ceb56c..2d525bd 100644 --- a/src/Debug/Debug.h +++ b/src/Debug/Debug.h @@ -10,257 +10,258 @@ /** * @file Debug.h * @brief Master header for all debug utilities - * + * * This file consolidates all debug tools: object tracking, profiling, assertions, * and logging. Include this single header to access the complete debugging toolkit. */ #pragma once -#include "INamed.h" -#include "DebugUtilities.h" -#include -#include +#include #include +#include +#include #include -#include -namespace EntropyEngine { -namespace Core { -namespace Debug { +#include "DebugUtilities.h" +#include "INamed.h" + +namespace EntropyEngine +{ +namespace Core +{ +namespace Debug +{ +/** + * @brief Central command center for all debugging operations + * + * Call initialize() at startup to get profiling, logging, and object + * tracking running. Most features compile to nothing in release builds. + */ +class DebugSystem +{ +public: /** - * @brief Central command center for all debugging operations - * - * Call initialize() at startup to get profiling, logging, and object - * tracking running. Most features compile to nothing in release builds. + * @brief Fire up all debug systems - call this once at startup + * + * Initializes Tracy profiler, logging, object tracking, and build + * configuration reporting. Safe to call multiple times. */ - class DebugSystem { - public: - /** - * @brief Fire up all debug systems - call this once at startup - * - * Initializes Tracy profiler, logging, object tracking, and build - * configuration reporting. Safe to call multiple times. - */ - static void initialize() { - ENTROPY_LOG_INFO_CAT("Debug", "Debug system initialized"); + static void initialize() { + ENTROPY_LOG_INFO_CAT("Debug", "Debug system initialized"); - #ifdef EntropyDebug - ENTROPY_LOG_INFO_CAT("Debug", "Debug build - assertions enabled"); - #else - ENTROPY_LOG_INFO_CAT("Debug", "Release build - assertions disabled"); - #endif - } - - /** - * @brief Clean shutdown of debug systems - * - * Flushes pending data and shuts down profiling. Optional but recommended. - */ - static void shutdown() { - ENTROPY_LOG_INFO_CAT("Debug", "Debug system shutdown"); - } +#ifdef EntropyDebug + ENTROPY_LOG_INFO_CAT("Debug", "Debug build - assertions enabled"); +#else + ENTROPY_LOG_INFO_CAT("Debug", "Release build - assertions disabled"); +#endif + } + + /** + * @brief Clean shutdown of debug systems + * + * Flushes pending data and shuts down profiling. Optional but recommended. + */ + static void shutdown() { + ENTROPY_LOG_INFO_CAT("Debug", "Debug system shutdown"); + } +}; + +/** + * @brief Registry for runtime object tracking and discovery + * + * Tracks INamed objects with their types, names, and creation times. + * Thread-safe. Useful for debugging lifecycle issues and leak detection. + * + * @code + * // Find all WorkGraphs + * auto graphs = DebugRegistry::getInstance().findByType("WorkGraph"); + * LOG_INFO("Found {} active WorkGraphs", graphs.size()); + * + * // Find a specific object + * auto results = DebugRegistry::getInstance().findByName("MainMenuUI"); + * if (!results.empty()) { + * LOG_INFO("MainMenuUI is at {}", results[0]); + * } + * @endcode + */ +class DebugRegistry +{ +private: + struct Entry + { + const INamed* object; ///< Pointer to the tracked object + std::string typeName; ///< Type name for categorization + std::chrono::system_clock::time_point creationTime; ///< When it was registered }; + mutable std::shared_mutex _mutex; + std::unordered_map _objects; + +public: + static DebugRegistry& getInstance() { + // Meyer's singleton - thread-safe in C++11, avoids static destruction order fiasco + static DebugRegistry instance; + return instance; + } + /** - * @brief Registry for runtime object tracking and discovery - * - * Tracks INamed objects with their types, names, and creation times. - * Thread-safe. Useful for debugging lifecycle issues and leak detection. - * - * @code - * // Find all WorkGraphs - * auto graphs = DebugRegistry::getInstance().findByType("WorkGraph"); - * LOG_INFO("Found {} active WorkGraphs", graphs.size()); - * - * // Find a specific object - * auto results = DebugRegistry::getInstance().findByName("MainMenuUI"); - * if (!results.empty()) { - * LOG_INFO("MainMenuUI is at {}", results[0]); - * } - * @endcode + * @brief Add an object to the registry + * + * Registry holds non-owning pointers - unregister before destruction. + * + * @param object The object to track (must outlive the registration) + * @param typeName Human-readable type name for grouping */ - class DebugRegistry { - private: - struct Entry { - const INamed* object; ///< Pointer to the tracked object - std::string typeName; ///< Type name for categorization - std::chrono::system_clock::time_point creationTime; ///< When it was registered - }; - - mutable std::shared_mutex _mutex; - std::unordered_map _objects; - - public: - static DebugRegistry& getInstance() { - // Meyer's singleton - thread-safe in C++11, avoids static destruction order fiasco - static DebugRegistry instance; - return instance; - } - - /** - * @brief Add an object to the registry - * - * Registry holds non-owning pointers - unregister before destruction. - * - * @param object The object to track (must outlive the registration) - * @param typeName Human-readable type name for grouping - */ - void registerObject(const INamed* object, std::string_view typeName) { - std::unique_lock lock(_mutex); - _objects[object] = Entry{ - object, - std::string(typeName), - std::chrono::system_clock::now() - }; - - auto msg = std::format("Registered {} '{}' at {}", - typeName, object->getName(), static_cast(object)); + void registerObject(const INamed* object, std::string_view typeName) { + std::unique_lock lock(_mutex); + _objects[object] = Entry{object, std::string(typeName), std::chrono::system_clock::now()}; + + auto msg = + std::format("Registered {} '{}' at {}", typeName, object->getName(), static_cast(object)); + ENTROPY_LOG_TRACE_CAT("DebugRegistry", msg); + } + + /** + * @brief Remove an object from tracking + * + * Must be called before object destruction to avoid dangling pointers. + * + * @param object The object to stop tracking + */ + void unregisterObject(const INamed* object) { + std::unique_lock lock(_mutex); + auto it = _objects.find(object); + if (it != _objects.end()) { + auto msg = std::format("Unregistered {} '{}' at {}", it->second.typeName, object->getName(), + static_cast(object)); ENTROPY_LOG_TRACE_CAT("DebugRegistry", msg); + _objects.erase(it); } - - /** - * @brief Remove an object from tracking - * - * Must be called before object destruction to avoid dangling pointers. - * - * @param object The object to stop tracking - */ - void unregisterObject(const INamed* object) { - std::unique_lock lock(_mutex); - auto it = _objects.find(object); - if (it != _objects.end()) { - auto msg = std::format("Unregistered {} '{}' at {}", - it->second.typeName, object->getName(), static_cast(object)); - ENTROPY_LOG_TRACE_CAT("DebugRegistry", msg); - _objects.erase(it); - } - } - - /** - * @brief Search for all objects with a specific name - * - * Names can be non-unique. Returns all exact matches. - * - * @param name The name to search for - * @return Vector of matching object pointers - */ - [[nodiscard]] std::vector findByName(std::string_view name) const { - std::shared_lock lock(_mutex); - std::vector results; - - for (const auto& [object, entry] : _objects) { - if (object->getName() == name) { - results.push_back(object); - } - } - - return results; - } - - /** - * @brief Find all objects of a specific type - * - * Useful for counting active instances or type-specific debugging. - * - * @param typeName The type name used during registration - * @return Vector of all objects of this type - */ - [[nodiscard]] std::vector findByType(std::string_view typeName) const { - std::shared_lock lock(_mutex); - std::vector results; - - for (const auto& [object, entry] : _objects) { - if (entry.typeName == typeName) { - results.push_back(object); - } + } + + /** + * @brief Search for all objects with a specific name + * + * Names can be non-unique. Returns all exact matches. + * + * @param name The name to search for + * @return Vector of matching object pointers + */ + [[nodiscard]] std::vector findByName(std::string_view name) const { + std::shared_lock lock(_mutex); + std::vector results; + + for (const auto& [object, entry] : _objects) { + if (object->getName() == name) { + results.push_back(object); } - - return results; } - - /** - * @brief Dump the entire registry to the log - * - * Outputs all registered objects grouped by type. Can be verbose! - * - * Output format: - * ``` - * === Registered Debug Objects (42) === - * WorkGraph (3 instances): - * - 'MainGraph' at 0x7fff12345678 - * - 'UIGraph' at 0x7fff87654321 - * - 'AudioGraph' at 0x7fff11111111 - * EventBus (15 instances): - * ... - * ``` - */ - void logAllObjects() const { - std::shared_lock lock(_mutex); - - auto headerMsg = std::format("=== Registered Debug Objects ({}) ===", _objects.size()); - ENTROPY_LOG_INFO_CAT("DebugRegistry", headerMsg); - - // Group by type - std::unordered_map> byType; - for (const auto& [object, entry] : _objects) { - byType[entry.typeName].push_back(object); - } - - // Log grouped by type - for (const auto& [typeName, objects] : byType) { - auto typeMsg = std::format("{} ({} instances):", typeName, objects.size()); - ENTROPY_LOG_INFO_CAT("DebugRegistry", typeMsg); - for (const auto* obj : objects) { - auto objMsg = std::format(" - '{}' at {}", - obj->getName(), static_cast(obj)); - ENTROPY_LOG_INFO_CAT("DebugRegistry", objMsg); - } + + return results; + } + + /** + * @brief Find all objects of a specific type + * + * Useful for counting active instances or type-specific debugging. + * + * @param typeName The type name used during registration + * @return Vector of all objects of this type + */ + [[nodiscard]] std::vector findByType(std::string_view typeName) const { + std::shared_lock lock(_mutex); + std::vector results; + + for (const auto& [object, entry] : _objects) { + if (entry.typeName == typeName) { + results.push_back(object); } } - }; + + return results; + } /** - * @brief RAII wrapper for automatic debug registration - * - * Inherit from this to get automatic registration/unregistration. - * No more forgetting to unregister! - * - * @tparam T The base class that implements INamed - * - * @code - * // Instead of: - * class MySystem : public INamed { - * MySystem() : INamed("MySystem") { - * DebugRegistry::getInstance().registerObject(this, "MySystem"); - * } - * ~MySystem() { - * DebugRegistry::getInstance().unregisterObject(this); - * } - * }; - * - * // Just do: - * class MySystem : public AutoDebugRegistered { - * MySystem() : AutoDebugRegistered("MySystem", "MySystem") {} - * }; - * @endcode + * @brief Dump the entire registry to the log + * + * Outputs all registered objects grouped by type. Can be verbose! + * + * Output format: + * ``` + * === Registered Debug Objects (42) === + * WorkGraph (3 instances): + * - 'MainGraph' at 0x7fff12345678 + * - 'UIGraph' at 0x7fff87654321 + * - 'AudioGraph' at 0x7fff11111111 + * EventBus (15 instances): + * ... + * ``` */ - template - class AutoDebugRegistered : public T { - public: - template - explicit AutoDebugRegistered(std::string_view typeName, Args&&... args) - : T(std::forward(args)...) { - DebugRegistry::getInstance().registerObject(this, typeName); + void logAllObjects() const { + std::shared_lock lock(_mutex); + + auto headerMsg = std::format("=== Registered Debug Objects ({}) ===", _objects.size()); + ENTROPY_LOG_INFO_CAT("DebugRegistry", headerMsg); + + // Group by type + std::unordered_map> byType; + for (const auto& [object, entry] : _objects) { + byType[entry.typeName].push_back(object); } - - ~AutoDebugRegistered() { - DebugRegistry::getInstance().unregisterObject(this); + + // Log grouped by type + for (const auto& [typeName, objects] : byType) { + auto typeMsg = std::format("{} ({} instances):", typeName, objects.size()); + ENTROPY_LOG_INFO_CAT("DebugRegistry", typeMsg); + for (const auto* obj : objects) { + auto objMsg = std::format(" - '{}' at {}", obj->getName(), static_cast(obj)); + ENTROPY_LOG_INFO_CAT("DebugRegistry", objMsg); + } } - }; + } +}; + +/** + * @brief RAII wrapper for automatic debug registration + * + * Inherit from this to get automatic registration/unregistration. + * No more forgetting to unregister! + * + * @tparam T The base class that implements INamed + * + * @code + * // Instead of: + * class MySystem : public INamed { + * MySystem() : INamed("MySystem") { + * DebugRegistry::getInstance().registerObject(this, "MySystem"); + * } + * ~MySystem() { + * DebugRegistry::getInstance().unregisterObject(this); + * } + * }; + * + * // Just do: + * class MySystem : public AutoDebugRegistered { + * MySystem() : AutoDebugRegistered("MySystem", "MySystem") {} + * }; + * @endcode + */ +template +class AutoDebugRegistered : public T +{ +public: + template + explicit AutoDebugRegistered(std::string_view typeName, Args&&... args) : T(std::forward(args)...) { + DebugRegistry::getInstance().registerObject(this, typeName); + } -} // namespace Debug -} // namespace Core -} // namespace EntropyEngine + ~AutoDebugRegistered() { + DebugRegistry::getInstance().unregisterObject(this); + } +}; +} // namespace Debug +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Debug/DebugUtilities.h b/src/Debug/DebugUtilities.h index 25ff2f2..0be3b36 100644 --- a/src/Debug/DebugUtilities.h +++ b/src/Debug/DebugUtilities.h @@ -10,24 +10,25 @@ /** * @file DebugUtilities.h * @brief Platform-specific debug helpers and assertion macros - * + * * This file provides cross-platform debugging utilities including debugger detection, * breakpoints, assertions, timing utilities, and memory tracking. */ #pragma once -#include -#include -#include -#include #include +#include #include +#include +#include +#include + #include "../Logging/Logger.h" #ifdef __APPLE__ -#include #include +#include #include #endif @@ -35,323 +36,319 @@ #include #endif -namespace EntropyEngine { -namespace Core { -namespace Debug { +namespace EntropyEngine +{ +namespace Core +{ +namespace Debug +{ - /** - * @brief Trigger a debugger breakpoint - * - * Stops execution when a debugger is attached. Uses platform-specific - * intrinsics for clean breakpoint handling. - * - * @code - * if (criticalErrorOccurred) { - * LOG_ERROR("Critical error detected"); - * debugBreak(); // Stop execution for debugging - * } - * @endcode - */ - inline void debugBreak() { +/** + * @brief Trigger a debugger breakpoint + * + * Stops execution when a debugger is attached. Uses platform-specific + * intrinsics for clean breakpoint handling. + * + * @code + * if (criticalErrorOccurred) { + * LOG_ERROR("Critical error detected"); + * debugBreak(); // Stop execution for debugging + * } + * @endcode + */ +inline void debugBreak() { #if defined(_MSC_VER) - __debugbreak(); + __debugbreak(); #elif defined(__clang__) || defined(__GNUC__) - __builtin_trap(); + __builtin_trap(); #else - // Fallback - cause a segfault - *static_cast(nullptr) = 0; + // Fallback - cause a segfault + *static_cast(nullptr) = 0; #endif - } +} - /** - * @brief Check if a debugger is attached to the process - * - * Useful for conditional behavior like enhanced validation during debugging. - * Works on Windows (IsDebuggerPresent), macOS (sysctl), and Linux (/proc). - * - * @return true if a debugger is attached, false otherwise - * - * @code - * if (isDebuggerAttached()) { - * // Enable additional validation when debugging - * validateAllInvariants(); - * dumpDetailedState(); - * } - * @endcode - */ - inline bool isDebuggerAttached() { +/** + * @brief Check if a debugger is attached to the process + * + * Useful for conditional behavior like enhanced validation during debugging. + * Works on Windows (IsDebuggerPresent), macOS (sysctl), and Linux (/proc). + * + * @return true if a debugger is attached, false otherwise + * + * @code + * if (isDebuggerAttached()) { + * // Enable additional validation when debugging + * validateAllInvariants(); + * dumpDetailedState(); + * } + * @endcode + */ +inline bool isDebuggerAttached() { #if defined(_WIN32) - return IsDebuggerPresent() != 0; + return IsDebuggerPresent() != 0; #elif defined(__APPLE__) - // Check for debugger on macOS - int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, getpid()}; - struct kinfo_proc info; - size_t size = sizeof(info); - info.kp_proc.p_flag = 0; - sysctl(mib, 4, &info, &size, nullptr, 0); - return (info.kp_proc.p_flag & P_TRACED) != 0; + // Check for debugger on macOS + int mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, getpid()}; + struct kinfo_proc info; + size_t size = sizeof(info); + info.kp_proc.p_flag = 0; + sysctl(mib, 4, &info, &size, nullptr, 0); + return (info.kp_proc.p_flag & P_TRACED) != 0; #else - // Linux - check TracerPid in /proc/self/status - std::ifstream status("/proc/self/status"); - std::string line; - while (std::getline(status, line)) { - if (line.compare(0, 10, "TracerPid:") == 0) { - return std::stoi(line.substr(10)) != 0; - } + // Linux - check TracerPid in /proc/self/status + std::ifstream status("/proc/self/status"); + std::string line; + while (std::getline(status, line)) { + if (line.compare(0, 10, "TracerPid:") == 0) { + return std::stoi(line.substr(10)) != 0; } - return false; -#endif } + return false; +#endif +} - /** - * @brief Debug assertion that only runs in debug builds - * - * Validates conditions during development. Logs fatal error and breaks on - * failure. Compiles to nothing in release builds. - * - * Usage: - * ENTROPY_DEBUG_ASSERT(pointer != nullptr, "Widget pointer must not be null"); - * ENTROPY_DEBUG_ASSERT(count > 0, "Requires at least one item to process"); - */ - #ifdef EntropyDebug - #define ENTROPY_DEBUG_ASSERT(condition, message) \ - do { \ - if (!(condition)) { \ - auto msg = ::EntropyEngine::Core::Debug::debugFormat( \ - "Assertion failed: {} - {}", \ - #condition, \ - message \ - ); \ - ::EntropyEngine::Core::Logging::Logger::global().fatal( \ - "Assertion", \ - msg \ - ); \ - ::EntropyEngine::Core::Debug::debugBreak(); \ - } \ - } while(0) - #else - #define ENTROPY_DEBUG_ASSERT(condition, message) ((void)0) - #endif +/** + * @brief Debug assertion that only runs in debug builds + * + * Validates conditions during development. Logs fatal error and breaks on + * failure. Compiles to nothing in release builds. + * + * Usage: + * ENTROPY_DEBUG_ASSERT(pointer != nullptr, "Widget pointer must not be null"); + * ENTROPY_DEBUG_ASSERT(count > 0, "Requires at least one item to process"); + */ +#ifdef EntropyDebug +#define ENTROPY_DEBUG_ASSERT(condition, message) \ + do { \ + if (!(condition)) { \ + auto msg = ::EntropyEngine::Core::Debug::debugFormat("Assertion failed: {} - {}", #condition, message); \ + ::EntropyEngine::Core::Logging::Logger::global().fatal("Assertion", msg); \ + ::EntropyEngine::Core::Debug::debugBreak(); \ + } \ + } while (0) +#else +#define ENTROPY_DEBUG_ASSERT(condition, message) ((void)0) +#endif - /** - * @brief Debug-only code block - * - * Excludes wrapped code from release builds. Useful for expensive validation - * or debug output that would impact production performance. - * - * @code - * ENTROPY_DEBUG_ONLY({ - * // This block is excluded from release builds - * validateDataStructure(); - * dumpStateToFile("debug_state.txt"); - * checkMemoryLeaks(); - * }); - * @endcode - */ - #ifdef EntropyDebug - #define ENTROPY_DEBUG_ONLY(code) do { code } while(0) - #else - #define ENTROPY_DEBUG_ONLY(code) ((void)0) - #endif +/** + * @brief Debug-only code block + * + * Excludes wrapped code from release builds. Useful for expensive validation + * or debug output that would impact production performance. + * + * @code + * ENTROPY_DEBUG_ONLY({ + * // This block is excluded from release builds + * validateDataStructure(); + * dumpStateToFile("debug_state.txt"); + * checkMemoryLeaks(); + * }); + * @endcode + */ +#ifdef EntropyDebug +#define ENTROPY_DEBUG_ONLY(code) \ + do { \ + code \ + } while (0) +#else +#define ENTROPY_DEBUG_ONLY(code) ((void)0) +#endif - /** - * @brief Mark a variable as used only in debug builds - * - * Prevents unused variable warnings in release builds by marking as - * [[maybe_unused]] when debug code is disabled. - * - * @code - * void processData(const Data& data) { - * ENTROPY_DEBUG_VARIABLE(size_t originalSize) = data.size(); - * - * // ... process data ... - * - * ENTROPY_DEBUG_ASSERT(data.size() >= originalSize, - * "Data size must not decrease during processing"); - * } - * @endcode - */ - #ifdef EntropyDebug - #define ENTROPY_DEBUG_VARIABLE(x) x - #else - #define ENTROPY_DEBUG_VARIABLE(x) [[maybe_unused]] x - #endif +/** + * @brief Mark a variable as used only in debug builds + * + * Prevents unused variable warnings in release builds by marking as + * [[maybe_unused]] when debug code is disabled. + * + * @code + * void processData(const Data& data) { + * ENTROPY_DEBUG_VARIABLE(size_t originalSize) = data.size(); + * + * // ... process data ... + * + * ENTROPY_DEBUG_ASSERT(data.size() >= originalSize, + * "Data size must not decrease during processing"); + * } + * @endcode + */ +#ifdef EntropyDebug +#define ENTROPY_DEBUG_VARIABLE(x) x +#else +#define ENTROPY_DEBUG_VARIABLE(x) [[maybe_unused]] x +#endif - /** - * @brief Helper to create formatted debug strings - * - * Type-safe string formatting using C++20's std::format. - * - * @tparam Args Variadic template arguments - * @param fmt Format string using std::format syntax - * @param args Arguments to format - * @return Formatted string - * - * @code - * auto msg = debugFormat("Object {} at position ({}, {}) has {} children", - * obj.getName(), obj.x, obj.y, obj.getChildCount()); - * LOG_DEBUG(msg); - * @endcode - */ - template - [[nodiscard]] std::string debugFormat(std::format_string fmt, Args&&... args) { - return std::format(fmt, std::forward(args)...); - } +/** + * @brief Helper to create formatted debug strings + * + * Type-safe string formatting using C++20's std::format. + * + * @tparam Args Variadic template arguments + * @param fmt Format string using std::format syntax + * @param args Arguments to format + * @return Formatted string + * + * @code + * auto msg = debugFormat("Object {} at position ({}, {}) has {} children", + * obj.getName(), obj.x, obj.y, obj.getChildCount()); + * LOG_DEBUG(msg); + * @endcode + */ +template +[[nodiscard]] std::string debugFormat(std::format_string fmt, Args&&... args) { + return std::format(fmt, std::forward(args)...); +} - /** - * @brief Scoped debug timer for measuring execution time - * - * Automatically measures and logs execution time when destroyed. Uses - * high-resolution clocks for microsecond precision. - * - * @code - * void expensiveOperation() { - * ScopedTimer timer("ExpensiveOperation"); - * - * // Perform work - * processLargeDataset(); - * - * } // Automatically logs: "ExpensiveOperation took 1234.567ms" - * - * // Manual duration checking - * { - * ScopedTimer timer("CustomTiming", false); // Disable automatic logging - * doWork(); - * if (timer.getDuration() > 100.0) { - * LOG_WARN("Operation exceeded time limit: {:.2f}ms", timer.getDuration()); - * } - * } - * @endcode - */ - class ScopedTimer { - private: - std::string_view _name; - std::chrono::high_resolution_clock::time_point _start; - bool _logOnDestruct; - - public: - explicit ScopedTimer(std::string_view name, bool logOnDestruct = true) - : _name(name) - , _start(std::chrono::high_resolution_clock::now()) - , _logOnDestruct(logOnDestruct) {} - - ~ScopedTimer() { - if (_logOnDestruct) { - auto duration = getDuration(); - auto msg = debugFormat("{} took {:.3f}ms", _name, duration); - ENTROPY_LOG_DEBUG_CAT("Timer", msg); - } - } - - [[nodiscard]] double getDuration() const { - auto end = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration(end - _start); - return duration.count(); - } - }; +/** + * @brief Scoped debug timer for measuring execution time + * + * Automatically measures and logs execution time when destroyed. Uses + * high-resolution clocks for microsecond precision. + * + * @code + * void expensiveOperation() { + * ScopedTimer timer("ExpensiveOperation"); + * + * // Perform work + * processLargeDataset(); + * + * } // Automatically logs: "ExpensiveOperation took 1234.567ms" + * + * // Manual duration checking + * { + * ScopedTimer timer("CustomTiming", false); // Disable automatic logging + * doWork(); + * if (timer.getDuration() > 100.0) { + * LOG_WARN("Operation exceeded time limit: {:.2f}ms", timer.getDuration()); + * } + * } + * @endcode + */ +class ScopedTimer +{ +private: + std::string_view _name; + std::chrono::high_resolution_clock::time_point _start; + bool _logOnDestruct; - /** - * @brief RAII helper for debug scope tracking - * - * Logs entry and exit from code scopes. Provides execution flow visibility - * in debug logs. RAII ensures exit logging even when exceptions are thrown. - * - * @code - * void complexAlgorithm() { - * DebugScope scope("ComplexAlgorithm"); - * // Logs: "Entering: ComplexAlgorithm" - * - * if (someCondition) { - * DebugScope innerScope("ComplexAlgorithm::OptimizedPath"); - * // Logs: "Entering: ComplexAlgorithm::OptimizedPath" - * doOptimizedWork(); - * // Logs: "Leaving: ComplexAlgorithm::OptimizedPath" - * } - * - * // Logs: "Leaving: ComplexAlgorithm" - * } - * @endcode - */ - class DebugScope { - private: - std::string_view _name; - - public: - explicit DebugScope(std::string_view name) : _name(name) { - auto msg = debugFormat("Entering: {}", _name); - ENTROPY_LOG_TRACE_CAT("Scope", msg); - } - - ~DebugScope() { - auto msg = debugFormat("Leaving: {}", _name); - ENTROPY_LOG_TRACE_CAT("Scope", msg); +public: + explicit ScopedTimer(std::string_view name, bool logOnDestruct = true) + : _name(name), _start(std::chrono::high_resolution_clock::now()), _logOnDestruct(logOnDestruct) {} + + ~ScopedTimer() { + if (_logOnDestruct) { + auto duration = getDuration(); + auto msg = debugFormat("{} took {:.3f}ms", _name, duration); + ENTROPY_LOG_DEBUG_CAT("Timer", msg); } - }; + } - /** - * @brief Macro for creating a debug scope - * - * Simplified scope tracking. Generates unique variable names to allow - * multiple uses. Compiles to nothing in release builds. - * - * @code - * void processRequest(const Request& req) { - * ENTROPY_DEBUG_SCOPE("ProcessRequest"); - * - * validateRequest(req); - * - * { - * ENTROPY_DEBUG_SCOPE("ProcessRequest::DatabaseUpdate"); - * updateDatabase(req); - * } - * - * sendResponse(req); - * } - * @endcode - */ - #ifdef EntropyDebug - #define ENTROPY_DEBUG_SCOPE(name) \ - ::EntropyEngine::Core::Debug::DebugScope _debugScope##__LINE__(name) - #else - #define ENTROPY_DEBUG_SCOPE(name) ((void)0) - #endif + [[nodiscard]] double getDuration() const { + auto end = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration(end - _start); + return duration.count(); + } +}; - /** - * @brief Validate a pointer and log if null - * - * Validates pointers and logs error with source location when null. - * Uses C++20 source_location for automatic location capture. - * - * @tparam T The pointed-to type - * @param ptr The pointer to validate - * @param name Human-readable name for the pointer - * @param loc Source location (captured automatically) - * @return true if pointer is valid, false if null - * - * @code - * void processWidget(Widget* widget) { - * if (!validatePointer(widget, "widget")) { - * return; // Error logged with file:line information - * } - * - * // Pointer is valid - * widget->update(); - * } - * @endcode - */ - template - [[nodiscard]] bool validatePointer(T* ptr, std::string_view name, - const std::source_location& loc = std::source_location::current()) { - if (!ptr) { - auto msg = debugFormat("Null pointer: {} at {}:{}", - name, loc.file_name(), loc.line()); - ENTROPY_LOG_WARNING_CAT("Validation", msg); - return false; - } - return true; +/** + * @brief RAII helper for debug scope tracking + * + * Logs entry and exit from code scopes. Provides execution flow visibility + * in debug logs. RAII ensures exit logging even when exceptions are thrown. + * + * @code + * void complexAlgorithm() { + * DebugScope scope("ComplexAlgorithm"); + * // Logs: "Entering: ComplexAlgorithm" + * + * if (someCondition) { + * DebugScope innerScope("ComplexAlgorithm::OptimizedPath"); + * // Logs: "Entering: ComplexAlgorithm::OptimizedPath" + * doOptimizedWork(); + * // Logs: "Leaving: ComplexAlgorithm::OptimizedPath" + * } + * + * // Logs: "Leaving: ComplexAlgorithm" + * } + * @endcode + */ +class DebugScope +{ +private: + std::string_view _name; + +public: + explicit DebugScope(std::string_view name) : _name(name) { + auto msg = debugFormat("Entering: {}", _name); + ENTROPY_LOG_TRACE_CAT("Scope", msg); } -} // namespace Debug -} // namespace Core -} // namespace EntropyEngine + ~DebugScope() { + auto msg = debugFormat("Leaving: {}", _name); + ENTROPY_LOG_TRACE_CAT("Scope", msg); + } +}; + +/** + * @brief Macro for creating a debug scope + * + * Simplified scope tracking. Generates unique variable names to allow + * multiple uses. Compiles to nothing in release builds. + * + * @code + * void processRequest(const Request& req) { + * ENTROPY_DEBUG_SCOPE("ProcessRequest"); + * + * validateRequest(req); + * + * { + * ENTROPY_DEBUG_SCOPE("ProcessRequest::DatabaseUpdate"); + * updateDatabase(req); + * } + * + * sendResponse(req); + * } + * @endcode + */ +#ifdef EntropyDebug +#define ENTROPY_DEBUG_SCOPE(name) ::EntropyEngine::Core::Debug::DebugScope _debugScope##__LINE__(name) +#else +#define ENTROPY_DEBUG_SCOPE(name) ((void)0) +#endif + +/** + * @brief Validate a pointer and log if null + * + * Validates pointers and logs error with source location when null. + * Uses C++20 source_location for automatic location capture. + * + * @tparam T The pointed-to type + * @param ptr The pointer to validate + * @param name Human-readable name for the pointer + * @param loc Source location (captured automatically) + * @return true if pointer is valid, false if null + * + * @code + * void processWidget(Widget* widget) { + * if (!validatePointer(widget, "widget")) { + * return; // Error logged with file:line information + * } + * + * // Pointer is valid + * widget->update(); + * } + * @endcode + */ +template +[[nodiscard]] bool validatePointer(T* ptr, std::string_view name, + const std::source_location& loc = std::source_location::current()) { + if (!ptr) { + auto msg = debugFormat("Null pointer: {} at {}:{}", name, loc.file_name(), loc.line()); + ENTROPY_LOG_WARNING_CAT("Validation", msg); + return false; + } + return true; +} +} // namespace Debug +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Debug/INamed.h b/src/Debug/INamed.h index 402586f..a0e79dd 100644 --- a/src/Debug/INamed.h +++ b/src/Debug/INamed.h @@ -10,7 +10,7 @@ /** * @file INamed.h * @brief Debug naming interface for meaningful object identification - * + * * This file provides an interface for objects that can be named for debugging. * Named objects provide human-readable identifiers in logs and debug output. */ @@ -20,125 +20,129 @@ #include #include -namespace EntropyEngine { -namespace Core { -namespace Debug { +namespace EntropyEngine +{ +namespace Core +{ +namespace Debug +{ + +/** + * @brief Interface for objects that support debug naming + * + * Standardizes debug naming across objects. Implement when objects appear + * in logs, debug output, or profiler traces. + * + * @code + * class MyComponent : public INamed { + * std::string name; + * public: + * void setName(std::string_view n) override { name = n; } + * std::string_view getName() const override { return name; } + * }; + * + * // Meaningful log output + * LOG_INFO("Processing component: {}", component->getName()); + * // Output: "Processing component: PlayerHealthUI" + * @endcode + */ +class INamed +{ +public: + virtual ~INamed() = default; + + /** + * @brief Set the debug name for this object + * + * Assigns a name for use in logs and debug output. Choose descriptive + * names that indicate purpose (e.g., "MainMenuUI", "PlayerHealthBar"). + * + * @param name The debug name to assign (can be empty to clear) + */ + virtual void setName(std::string_view name) = 0; /** - * @brief Interface for objects that support debug naming - * - * Standardizes debug naming across objects. Implement when objects appear - * in logs, debug output, or profiler traces. - * + * @brief Get the debug name of this object + * + * Returns the assigned name or empty string if unnamed. + * + * @return The current debug name (may be empty) + * * @code - * class MyComponent : public INamed { - * std::string name; - * public: - * void setName(std::string_view n) override { name = n; } - * std::string_view getName() const override { return name; } - * }; - * - * // Meaningful log output - * LOG_INFO("Processing component: {}", component->getName()); - * // Output: "Processing component: PlayerHealthUI" + * if (object->hasName()) { + * LOG_INFO("Found object: {}", object->getName()); + * } else { + * LOG_INFO("Found unnamed object at {}", static_cast(object)); + * } * @endcode */ - class INamed { - public: - virtual ~INamed() = default; - - /** - * @brief Set the debug name for this object - * - * Assigns a name for use in logs and debug output. Choose descriptive - * names that indicate purpose (e.g., "MainMenuUI", "PlayerHealthBar"). - * - * @param name The debug name to assign (can be empty to clear) - */ - virtual void setName(std::string_view name) = 0; - - /** - * @brief Get the debug name of this object - * - * Returns the assigned name or empty string if unnamed. - * - * @return The current debug name (may be empty) - * - * @code - * if (object->hasName()) { - * LOG_INFO("Found object: {}", object->getName()); - * } else { - * LOG_INFO("Found unnamed object at {}", static_cast(object)); - * } - * @endcode - */ - [[nodiscard]] virtual std::string_view getName() const = 0; - - /** - * @brief Check if this object has a debug name set - * - * Useful for conditional formatting with fallback to addresses. - * - * @return true if a non-empty debug name is set - * - * @code - * std::string getObjectDescription(const INamed* obj) { - * if (obj->hasName()) { - * return std::format("'{}'", obj->getName()); - * } else { - * return std::format("", static_cast(obj)); - * } - * } - * @endcode - */ - [[nodiscard]] virtual bool hasName() const { - return !getName().empty(); - } - }; - + [[nodiscard]] virtual std::string_view getName() const = 0; + /** - * @brief Simple implementation of INamed that can be inherited - * - * Provides basic INamed implementation with string storage. Inherit to add - * naming functionality without implementing the interface yourself. - * + * @brief Check if this object has a debug name set + * + * Useful for conditional formatting with fallback to addresses. + * + * @return true if a non-empty debug name is set + * * @code - * // Direct inheritance - * class MySystem : public Named { - * public: - * MySystem() : Named("MySystem") {} - * }; - * - * // Multiple inheritance - * class MyComponent : public Component, public Named { - * public: - * MyComponent(std::string_view name) : Named(name) {} - * }; - * - * // Virtual inheritance for diamond patterns - * class MyMultiBase : public virtual Named, public OtherBase { - * // Avoids naming conflicts through virtual inheritance - * }; + * std::string getObjectDescription(const INamed* obj) { + * if (obj->hasName()) { + * return std::format("'{}'", obj->getName()); + * } else { + * return std::format("", static_cast(obj)); + * } + * } * @endcode */ - class Named : public virtual INamed { - private: - std::string _name; - - public: - Named() = default; - explicit Named(std::string_view name) : _name(name) {} - - void setName(std::string_view name) override { - _name = name; - } - - [[nodiscard]] std::string_view getName() const override { - return _name; - } - }; + [[nodiscard]] virtual bool hasName() const { + return !getName().empty(); + } +}; + +/** + * @brief Simple implementation of INamed that can be inherited + * + * Provides basic INamed implementation with string storage. Inherit to add + * naming functionality without implementing the interface yourself. + * + * @code + * // Direct inheritance + * class MySystem : public Named { + * public: + * MySystem() : Named("MySystem") {} + * }; + * + * // Multiple inheritance + * class MyComponent : public Component, public Named { + * public: + * MyComponent(std::string_view name) : Named(name) {} + * }; + * + * // Virtual inheritance for diamond patterns + * class MyMultiBase : public virtual Named, public OtherBase { + * // Avoids naming conflicts through virtual inheritance + * }; + * @endcode + */ +class Named : public virtual INamed +{ +private: + std::string _name; + +public: + Named() = default; + explicit Named(std::string_view name) : _name(name) {} + + void setName(std::string_view name) override { + _name = name; + } -} // namespace Debug -} // namespace Core -} // namespace EntropyEngine + [[nodiscard]] std::string_view getName() const override { + return _name; + } +}; +} // namespace Debug +} // namespace Core +} // namespace EntropyEngine diff --git a/src/EntropyCore.h b/src/EntropyCore.h index 75f4fb9..0157c42 100644 --- a/src/EntropyCore.h +++ b/src/EntropyCore.h @@ -12,7 +12,7 @@ /** * @file EntropyCore.h * @brief Single header that includes all EntropyCore components - * + * * This header can be used as an alternative to C++20 modules for * compilers that don't fully support modules yet. */ @@ -22,9 +22,9 @@ // Core object model #include "Core/EntropyClass.h" +#include "Core/EntropyInterop.h" #include "Core/EntropyObject.h" #include "Core/RefObject.h" -#include "Core/EntropyInterop.h" // Type System #include "TypeSystem/GenericHandle.h" @@ -34,27 +34,27 @@ #include "Graph/DirectedAcyclicGraph.h" // Debug -#include "Debug/INamed.h" -#include "Debug/DebugUtilities.h" #include "Debug/Debug.h" +#include "Debug/DebugUtilities.h" +#include "Debug/INamed.h" // Logging -#include "Logging/LogLevel.h" -#include "Logging/LogEntry.h" -#include "Logging/ILogSink.h" #include "Logging/ConsoleSink.h" +#include "Logging/ILogSink.h" +#include "Logging/LogEntry.h" +#include "Logging/LogLevel.h" #include "Logging/Logger.h" // Concurrency -#include "Concurrency/WorkContractHandle.h" -#include "Concurrency/WorkContractGroup.h" -#include "Concurrency/WorkGraph.h" -#include "Concurrency/WorkService.h" -#include "Concurrency/SignalTree.h" +#include "Concurrency/AdaptiveRankingScheduler.h" +#include "Concurrency/DirectScheduler.h" #include "Concurrency/IConcurrencyProvider.h" #include "Concurrency/IWorkScheduler.h" -#include "Concurrency/DirectScheduler.h" -#include "Concurrency/SpinningDirectScheduler.h" -#include "Concurrency/AdaptiveRankingScheduler.h" #include "Concurrency/RandomScheduler.h" -#include "Concurrency/RoundRobinScheduler.h" \ No newline at end of file +#include "Concurrency/RoundRobinScheduler.h" +#include "Concurrency/SignalTree.h" +#include "Concurrency/SpinningDirectScheduler.h" +#include "Concurrency/WorkContractGroup.h" +#include "Concurrency/WorkContractHandle.h" +#include "Concurrency/WorkGraph.h" +#include "Concurrency/WorkService.h" diff --git a/src/Graph/AcyclicNodeHandle.h b/src/Graph/AcyclicNodeHandle.h index 582df7a..ead277a 100644 --- a/src/Graph/AcyclicNodeHandle.h +++ b/src/Graph/AcyclicNodeHandle.h @@ -18,148 +18,145 @@ #pragma once #include -#include "../Core/EntropyObject.h" -#include "../TypeSystem/TypeID.h" // for classHash() -namespace EntropyEngine { - namespace Core { - namespace Graph { - template - class DirectedAcyclicGraph; +#include "../Core/EntropyObject.h" +#include "../TypeSystem/TypeID.h" // for classHash() - // Optional tag retained for compatibility with any external code - struct NodeTag {}; +namespace EntropyEngine +{ +namespace Core +{ +namespace Graph +{ +template +class DirectedAcyclicGraph; - /** - * @class AcyclicNodeHandle - * @brief Lightweight, stamped handle to a node in a DirectedAcyclicGraph - * - * AcyclicNodeHandle is an EntropyObject that carries an owner+index+generation - * identity stamped by DirectedAcyclicGraph. The generation prevents stale-handle - * reuse after node removal. Handles are cheap to copy and compare. - * - * @code - * using namespace EntropyEngine::Core::Graph; - * DirectedAcyclicGraph dag; - * auto a = dag.addNode(1); - * auto b = dag.addNode(2); - * dag.addEdge(a, b); // b depends on a - * - * // Validate and access data - * if (dag.isHandleValid(a)) { - * int* data = dag.getNodeData(a); - * if (data) { *data = 42; } - * } - * - * // Equality compares owner and stamped id (index:generation) - * bool same = (a == a); - * bool different = (a != b); - * @endcode - */ - template - class AcyclicNodeHandle : public EntropyEngine::Core::EntropyObject { - using GraphT = DirectedAcyclicGraph; - template - friend class DirectedAcyclicGraph; - public: - /** @brief Default-constructed handle with no identity (invalid) */ - AcyclicNodeHandle() = default; +// Optional tag retained for compatibility with any external code +struct NodeTag +{ +}; - /** - * @brief Internal constructor used by DirectedAcyclicGraph to stamp identity - * @param graph Owning graph that stamps the handle - * @param index Slot index within the graph - * @param generation Generation counter for stale-handle detection - */ - AcyclicNodeHandle(GraphT* graph, uint32_t index, uint32_t generation) { - EntropyEngine::Core::HandleAccess::set(*this, graph, index, generation); - } +/** + * @class AcyclicNodeHandle + * @brief Lightweight, stamped handle to a node in a DirectedAcyclicGraph + * + * AcyclicNodeHandle is an EntropyObject that carries an owner+index+generation + * identity stamped by DirectedAcyclicGraph. The generation prevents stale-handle + * reuse after node removal. Handles are cheap to copy and compare. + * + * @code + * using namespace EntropyEngine::Core::Graph; + * DirectedAcyclicGraph dag; + * auto a = dag.addNode(1); + * auto b = dag.addNode(2); + * dag.addEdge(a, b); // b depends on a + * + * // Validate and access data + * if (dag.isHandleValid(a)) { + * int* data = dag.getNodeData(a); + * if (data) { *data = 42; } + * } + * + * // Equality compares owner and stamped id (index:generation) + * bool same = (a == a); + * bool different = (a != b); + * @endcode + */ +template +class AcyclicNodeHandle : public EntropyEngine::Core::EntropyObject +{ + using GraphT = DirectedAcyclicGraph; + template + friend class DirectedAcyclicGraph; - /** - * @brief Copies the stamped identity from another handle (if present) - */ - AcyclicNodeHandle(const AcyclicNodeHandle& other) noexcept { - if (other.hasHandle()) { - EntropyEngine::Core::HandleAccess::set( - *this, - const_cast(other.handleOwner()), - other.handleIndex(), - other.handleGeneration()); - } - } - /** - * @brief Copies or clears the stamped identity depending on source validity - */ - AcyclicNodeHandle& operator=(const AcyclicNodeHandle& other) noexcept { - if (this != &other) { - if (other.hasHandle()) { - EntropyEngine::Core::HandleAccess::set( - *this, - const_cast(other.handleOwner()), - other.handleIndex(), - other.handleGeneration()); - } else { - EntropyEngine::Core::HandleAccess::clear(*this); - } - } - return *this; - } - /** - * @brief Moves by copying the stamped identity (handles are lightweight) - */ - AcyclicNodeHandle(AcyclicNodeHandle&& other) noexcept { - if (other.hasHandle()) { - EntropyEngine::Core::HandleAccess::set( - *this, - const_cast(other.handleOwner()), - other.handleIndex(), - other.handleGeneration()); - } - } - /** - * @brief Move-assigns by copying or clearing identity based on source validity - */ - AcyclicNodeHandle& operator=(AcyclicNodeHandle&& other) noexcept { - if (this != &other) { - if (other.hasHandle()) { - EntropyEngine::Core::HandleAccess::set( - *this, - const_cast(other.handleOwner()), - other.handleIndex(), - other.handleGeneration()); - } else { - EntropyEngine::Core::HandleAccess::clear(*this); - } - } - return *this; - } +public: + /** @brief Default-constructed handle with no identity (invalid) */ + AcyclicNodeHandle() = default; - /** @brief Runtime type name for diagnostics */ - const char* className() const noexcept override { return "AcyclicNodeHandle"; } - /** @brief Stable type hash for cross-language identification */ - uint64_t classHash() const noexcept override { - using EntropyEngine::Core::TypeSystem::createTypeId; - static const uint64_t hash = static_cast(createTypeId< AcyclicNodeHandle >().id); - return hash; - } - }; + /** + * @brief Internal constructor used by DirectedAcyclicGraph to stamp identity + * @param graph Owning graph that stamps the handle + * @param index Slot index within the graph + * @param generation Generation counter for stale-handle detection + */ + AcyclicNodeHandle(GraphT* graph, uint32_t index, uint32_t generation) { + EntropyEngine::Core::HandleAccess::set(*this, graph, index, generation); + } - /** - * @brief Equality compares owning graph and packed index:generation id - * @return true if both handles refer to the same stamped node - */ - template - inline bool operator==(const AcyclicNodeHandle& a, const AcyclicNodeHandle& b) noexcept { - return a.handleOwner() == b.handleOwner() && a.handleId() == b.handleId(); + /** + * @brief Copies the stamped identity from another handle (if present) + */ + AcyclicNodeHandle(const AcyclicNodeHandle& other) noexcept { + if (other.hasHandle()) { + EntropyEngine::Core::HandleAccess::set(*this, const_cast(other.handleOwner()), other.handleIndex(), + other.handleGeneration()); + } + } + /** + * @brief Copies or clears the stamped identity depending on source validity + */ + AcyclicNodeHandle& operator=(const AcyclicNodeHandle& other) noexcept { + if (this != &other) { + if (other.hasHandle()) { + EntropyEngine::Core::HandleAccess::set(*this, const_cast(other.handleOwner()), + other.handleIndex(), other.handleGeneration()); + } else { + EntropyEngine::Core::HandleAccess::clear(*this); } - /** - * @brief Inequality is the negation of equality - */ - template - inline bool operator!=(const AcyclicNodeHandle& a, const AcyclicNodeHandle& b) noexcept { - return !(a == b); + } + return *this; + } + /** + * @brief Moves by copying the stamped identity (handles are lightweight) + */ + AcyclicNodeHandle(AcyclicNodeHandle&& other) noexcept { + if (other.hasHandle()) { + EntropyEngine::Core::HandleAccess::set(*this, const_cast(other.handleOwner()), other.handleIndex(), + other.handleGeneration()); + } + } + /** + * @brief Move-assigns by copying or clearing identity based on source validity + */ + AcyclicNodeHandle& operator=(AcyclicNodeHandle&& other) noexcept { + if (this != &other) { + if (other.hasHandle()) { + EntropyEngine::Core::HandleAccess::set(*this, const_cast(other.handleOwner()), + other.handleIndex(), other.handleGeneration()); + } else { + EntropyEngine::Core::HandleAccess::clear(*this); } } + return *this; } -} + /** @brief Runtime type name for diagnostics */ + const char* className() const noexcept override { + return "AcyclicNodeHandle"; + } + /** @brief Stable type hash for cross-language identification */ + uint64_t classHash() const noexcept override { + using EntropyEngine::Core::TypeSystem::createTypeId; + static const uint64_t hash = static_cast(createTypeId >().id); + return hash; + } +}; + +/** + * @brief Equality compares owning graph and packed index:generation id + * @return true if both handles refer to the same stamped node + */ +template +inline bool operator==(const AcyclicNodeHandle& a, const AcyclicNodeHandle& b) noexcept { + return a.handleOwner() == b.handleOwner() && a.handleId() == b.handleId(); +} +/** + * @brief Inequality is the negation of equality + */ +template +inline bool operator!=(const AcyclicNodeHandle& a, const AcyclicNodeHandle& b) noexcept { + return !(a == b); +} +} // namespace Graph +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Graph/DirectedAcyclicGraph.h b/src/Graph/DirectedAcyclicGraph.h index 90fff30..48bfff2 100644 --- a/src/Graph/DirectedAcyclicGraph.h +++ b/src/Graph/DirectedAcyclicGraph.h @@ -9,737 +9,743 @@ #pragma once -#include -#include -#include -#include -#include #include -#include #include +#include +#include +#include +#include +#include +#include -#include "AcyclicNodeHandle.h" #include "../CoreCommon.h" +#include "AcyclicNodeHandle.h" + +namespace EntropyEngine +{ +namespace Core +{ +namespace Graph +{ + +/** + * @brief Edge storage per node + */ +struct EdgeList +{ + std::vector outgoing; ///< Outgoing edges from this node + std::vector incoming; ///< Incoming edges to this node -namespace EntropyEngine { -namespace Core { -namespace Graph { + uint16_t getInDegree() const { + return static_cast(incoming.size()); + } + uint16_t getOutDegree() const { + return static_cast(outgoing.size()); + } +}; + +/** + * @brief Node storage with generation-based handle validation + * + * Cache-aligned structure that holds node data and metadata. The generation + * counter enables detection of stale handles when slots are reused. + * + * @tparam T The type of data stored within the node. + */ +template +struct alignas(64) Node +{ + T data; ///< Node data payload + std::atomic generation{1}; ///< Generation counter for handle validation + bool occupied{false}; ///< Whether this slot contains a valid node /** - * @brief Edge storage per node + * @brief Default constructor */ - struct EdgeList { - std::vector outgoing; ///< Outgoing edges from this node - std::vector incoming; ///< Incoming edges to this node - - uint16_t getInDegree() const { return static_cast(incoming.size()); } - uint16_t getOutDegree() const { return static_cast(outgoing.size()); } - }; + Node() = default; /** - * @brief Node storage with generation-based handle validation - * - * Cache-aligned structure that holds node data and metadata. The generation - * counter enables detection of stale handles when slots are reused. + * @brief Constructs a Node with initial data + * @param d The data to move into the node + * @param occ The initial occupancy status + */ + Node(T&& d, bool occ) : data(std::move(d)), occupied(occ) {} + + /** + * @brief Move constructor * - * @tparam T The type of data stored within the node. + * Resets source node's occupied status after move. + * @param other The Node to move from + */ + Node(Node&& other) noexcept + : data(std::move(other.data)), generation(other.generation.load()), occupied(other.occupied) { + other.occupied = false; + } + + /** + * @brief Move assignment operator + * @param other The Node to move from + * @return Reference to this Node after assignment */ - template - struct alignas(64) Node { - T data; ///< Node data payload - std::atomic generation{1}; ///< Generation counter for handle validation - bool occupied{false}; ///< Whether this slot contains a valid node - - /** - * @brief Default constructor - */ - Node() = default; - - /** - * @brief Constructs a Node with initial data - * @param d The data to move into the node - * @param occ The initial occupancy status - */ - Node(T&& d, bool occ) : data(std::move(d)), occupied(occ) {} - - /** - * @brief Move constructor - * - * Resets source node's occupied status after move. - * @param other The Node to move from - */ - Node(Node&& other) noexcept - : data(std::move(other.data)), - generation(other.generation.load()), - occupied(other.occupied) { + Node& operator=(Node&& other) noexcept { + if (this != &other) { + data = std::move(other.data); + generation.store(other.generation.load()); + occupied = other.occupied; other.occupied = false; } - - /** - * @brief Move assignment operator - * @param other The Node to move from - * @return Reference to this Node after assignment - */ - Node& operator=(Node&& other) noexcept { - if (this != &other) { - data = std::move(other.data); - generation.store(other.generation.load()); - occupied = other.occupied; - other.occupied = false; - } - return *this; + return *this; + } + + // Non-copyable + Node(const Node&) = delete; + Node& operator=(const Node&) = delete; +}; + +template +/** + * @brief Cache-friendly directed acyclic graph implementation + * + * Manages dependencies between entities while preventing cycles. Uses contiguous + * storage for cache efficiency and generation-based handles for safe node references. + * Ideal for task scheduling, build systems, and dependency management. + * + * @tparam T The type of data to be stored in each node of the graph. + */ +class DirectedAcyclicGraph +{ + // Hot data - frequently accessed together + std::vector> _nodes; ///< Contiguous storage for all graph nodes. Designed for cache efficiency. + std::vector _edges; ///< Simple edge storage per node for robustness. + + // Cold data - rarely accessed + std::queue _freeList; ///< A queue of indices for recently freed node slots, enabling reuse and reducing + ///< memory fragmentation. + + // Make AcyclicNodeHandle a friend for all T types + template + friend class AcyclicNodeHandle; + +public: + /** + * @brief Constructs a new DirectedAcyclicGraph instance + * + * Pre-allocates storage for 64 nodes to reduce initial reallocations. + * + * @code + * // Create a graph to store integer data + * DirectedAcyclicGraph myGraph; + * + * // Add some nodes + * auto node1 = myGraph.addNode(10); + * auto node2 = myGraph.addNode(20); + * @endcode + */ + DirectedAcyclicGraph() { + _nodes.reserve(64); + _edges.reserve(64); + } + + /** + * @brief Adds a new node to the graph + * + * Reuses freed slots when available, otherwise extends storage. + * + * @param data The data to store in the node + * @return Handle to the newly added node + * + * @code + * DirectedAcyclicGraph stringGraph; + * auto taskA = stringGraph.addNode("Download Data"); + * auto taskB = stringGraph.addNode("Process Data"); + * auto taskC = stringGraph.addNode("Upload Results"); + * + * // taskA, taskB, taskC are now valid handles to nodes in the graph. + * @endcode + */ + AcyclicNodeHandle addNode(T data) { + uint32_t index; + + // Prefer recently freed slots (likely still in cache) + if (!_freeList.empty()) { + index = _freeList.front(); + _freeList.pop(); + _nodes[index] = std::move(Node{std::move(data), true}); + // Generation already incremented during removal + } else { + index = static_cast(_nodes.size()); + _nodes.emplace_back(Node{std::move(data), true}); + _edges.emplace_back(); } - - // Non-copyable - Node(const Node&) = delete; - Node& operator=(const Node&) = delete; - }; - template + return AcyclicNodeHandle(this, index, _nodes[index].generation.load()); + } + /** - * @brief Cache-friendly directed acyclic graph implementation + * @brief Removes a node from the graph + * + * Invalidates the handle and removes all connected edges. * - * Manages dependencies between entities while preventing cycles. Uses contiguous - * storage for cache efficiency and generation-based handles for safe node references. - * Ideal for task scheduling, build systems, and dependency management. + * @param node Handle of the node to remove + * @return true if successfully removed, false if handle was invalid * - * @tparam T The type of data to be stored in each node of the graph. + * @code + * DirectedAcyclicGraph graph; + * auto node1 = graph.addNode(1); + * auto node2 = graph.addNode(2); + * graph.addEdge(node1, node2); + * + * bool removed = graph.removeNode(node1); + * // removed will be true + * bool isValid = graph.isHandleValid(node1); + * // isValid will be false + * @endcode */ - class DirectedAcyclicGraph { - // Hot data - frequently accessed together - std::vector> _nodes; ///< Contiguous storage for all graph nodes. Designed for cache efficiency. - std::vector _edges; ///< Simple edge storage per node for robustness. - - // Cold data - rarely accessed - std::queue _freeList; ///< A queue of indices for recently freed node slots, enabling reuse and reducing memory fragmentation. - - // Make AcyclicNodeHandle a friend for all T types - template - friend class AcyclicNodeHandle; - - public: - /** - * @brief Constructs a new DirectedAcyclicGraph instance - * - * Pre-allocates storage for 64 nodes to reduce initial reallocations. - * - * @code - * // Create a graph to store integer data - * DirectedAcyclicGraph myGraph; - * - * // Add some nodes - * auto node1 = myGraph.addNode(10); - * auto node2 = myGraph.addNode(20); - * @endcode - */ - DirectedAcyclicGraph() { - _nodes.reserve(64); - _edges.reserve(64); + bool removeNode(AcyclicNodeHandle node) { + if (!isHandleValid(node)) { + return false; } - /** - * @brief Adds a new node to the graph - * - * Reuses freed slots when available, otherwise extends storage. - * - * @param data The data to store in the node - * @return Handle to the newly added node - * - * @code - * DirectedAcyclicGraph stringGraph; - * auto taskA = stringGraph.addNode("Download Data"); - * auto taskB = stringGraph.addNode("Process Data"); - * auto taskC = stringGraph.addNode("Upload Results"); - * - * // taskA, taskB, taskC are now valid handles to nodes in the graph. - * @endcode - */ - AcyclicNodeHandle addNode(T data) { - uint32_t index; - - // Prefer recently freed slots (likely still in cache) - if (!_freeList.empty()) { - index = _freeList.front(); - _freeList.pop(); - _nodes[index] = std::move(Node{std::move(data), true}); - // Generation already incremented during removal - } else { - index = static_cast(_nodes.size()); - _nodes.emplace_back(Node{std::move(data), true}); - _edges.emplace_back(); - } - - return AcyclicNodeHandle(this, index, _nodes[index].generation.load()); + uint32_t index = node.handleIndex(); + + // Remove all edges from and to this node + removeAllEdges(index); + + // Mark as unoccupied and increment generation + _nodes[index].occupied = false; + _nodes[index].generation.fetch_add(1, std::memory_order_relaxed); + + // Add to free list for reuse + _freeList.push(index); + + return true; + } + + /** + * @brief Removes a directed edge between two nodes + * + * @param from The source node handle + * @param to The target node handle + * @return true if edge was removed, false if it didn't exist + */ + bool removeEdge(AcyclicNodeHandle from, AcyclicNodeHandle to) { + if (!isHandleValid(from) || !isHandleValid(to)) { + return false; } - /** - * @brief Removes a node from the graph - * - * Invalidates the handle and removes all connected edges. - * - * @param node Handle of the node to remove - * @return true if successfully removed, false if handle was invalid - * - * @code - * DirectedAcyclicGraph graph; - * auto node1 = graph.addNode(1); - * auto node2 = graph.addNode(2); - * graph.addEdge(node1, node2); - * - * bool removed = graph.removeNode(node1); - * // removed will be true - * bool isValid = graph.isHandleValid(node1); - * // isValid will be false - * @endcode - */ - bool removeNode(AcyclicNodeHandle node) { - if (!isHandleValid(node)) { - return false; - } - - uint32_t index = node.handleIndex(); - - // Remove all edges from and to this node - removeAllEdges(index); - - // Mark as unoccupied and increment generation - _nodes[index].occupied = false; - _nodes[index].generation.fetch_add(1, std::memory_order_relaxed); - - // Add to free list for reuse - _freeList.push(index); - - return true; + uint32_t fromIdx = from.handleIndex(); + uint32_t toIdx = to.handleIndex(); + + // Remove from outgoing edges of source + auto& outgoing = _edges[fromIdx].outgoing; + auto outIt = std::find(outgoing.begin(), outgoing.end(), toIdx); + if (outIt == outgoing.end()) { + return false; // Edge doesn't exist } + outgoing.erase(outIt); - /** - * @brief Removes a directed edge between two nodes - * - * @param from The source node handle - * @param to The target node handle - * @return true if edge was removed, false if it didn't exist - */ - bool removeEdge(AcyclicNodeHandle from, AcyclicNodeHandle to) { - if (!isHandleValid(from) || !isHandleValid(to)) { - return false; - } - - uint32_t fromIdx = from.handleIndex(); - uint32_t toIdx = to.handleIndex(); - - // Remove from outgoing edges of source - auto& outgoing = _edges[fromIdx].outgoing; - auto outIt = std::find(outgoing.begin(), outgoing.end(), toIdx); - if (outIt == outgoing.end()) { - return false; // Edge doesn't exist - } - outgoing.erase(outIt); - - // Remove from incoming edges of target - auto& incoming = _edges[toIdx].incoming; - auto inIt = std::find(incoming.begin(), incoming.end(), fromIdx); - if (inIt != incoming.end()) { - incoming.erase(inIt); - } - - return true; + // Remove from incoming edges of target + auto& incoming = _edges[toIdx].incoming; + auto inIt = std::find(incoming.begin(), incoming.end(), fromIdx); + if (inIt != incoming.end()) { + incoming.erase(inIt); } - /** - * @brief Adds a directed edge from one node to another - * - * Establishes a dependency where `from` must precede `to`. Prevents cycles. - * - * @param from Source node handle - * @param to Destination node handle - * @throws std::invalid_argument If handles invalid, self-loop, or would create cycle - * - * @code - * DirectedAcyclicGraph buildGraph; - * auto compile = buildGraph.addNode("Compile Source"); - * auto link = buildGraph.addNode("Link Executable"); - * auto deploy = buildGraph.addNode("Deploy Application"); - * - * try { - * buildGraph.addEdge(compile, link); // Link depends on compile - * buildGraph.addEdge(link, deploy); // Deploy depends on link - * // buildGraph.addEdge(deploy, compile); // This would throw std::invalid_argument (cycle detected) - * } catch (const std::invalid_argument& e) { - * std::cerr << "Error adding edge: " << e.what() << std::endl; - * } - * @endcode - */ - void addEdge(AcyclicNodeHandle from, AcyclicNodeHandle to) { - if (!isHandleValid(from) || !isHandleValid(to)) { - throw std::invalid_argument("Invalid handle provided to addEdge"); - } - - uint32_t fromIdx = from.handleIndex(); - uint32_t toIdx = to.handleIndex(); - - // Check for self-loops - if (fromIdx == toIdx) { - throw std::invalid_argument("Self-loops are not allowed in acyclic graph"); - } - - // Check if edge already exists - if (hasEdge(fromIdx, toIdx)) { - return; // Edge already exists - } - - // Check for cycles using DFS - if (wouldCreateCycle(fromIdx, toIdx)) { - throw std::invalid_argument("Adding edge would create a cycle"); - } - - // Add the forward edge - _edges[fromIdx].outgoing.push_back(toIdx); - - // Add the reverse edge - _edges[toIdx].incoming.push_back(fromIdx); + return true; + } + + /** + * @brief Adds a directed edge from one node to another + * + * Establishes a dependency where `from` must precede `to`. Prevents cycles. + * + * @param from Source node handle + * @param to Destination node handle + * @throws std::invalid_argument If handles invalid, self-loop, or would create cycle + * + * @code + * DirectedAcyclicGraph buildGraph; + * auto compile = buildGraph.addNode("Compile Source"); + * auto link = buildGraph.addNode("Link Executable"); + * auto deploy = buildGraph.addNode("Deploy Application"); + * + * try { + * buildGraph.addEdge(compile, link); // Link depends on compile + * buildGraph.addEdge(link, deploy); // Deploy depends on link + * // buildGraph.addEdge(deploy, compile); // This would throw std::invalid_argument (cycle detected) + * } catch (const std::invalid_argument& e) { + * std::cerr << "Error adding edge: " << e.what() << std::endl; + * } + * @endcode + */ + void addEdge(AcyclicNodeHandle from, AcyclicNodeHandle to) { + if (!isHandleValid(from) || !isHandleValid(to)) { + throw std::invalid_argument("Invalid handle provided to addEdge"); } - /** - * @brief Gets mutable pointer to node data - * - * Returns nullptr if the handle is invalid. - * - * @param node Handle to the node - * @return Pointer to the node's data, or nullptr if invalid - * - * @code - * DirectedAcyclicGraph graph; - * auto nodeHandle = graph.addNode(100); - * int* data = graph.getNodeData(nodeHandle); - * if (data) { - * *data = 200; // Modify the node's data - * } - * @endcode - */ - T* getNodeData(AcyclicNodeHandle node) { - if (!isHandleValid(node)) { - return nullptr; - } - return &_nodes[node.handleIndex()].data; + uint32_t fromIdx = from.handleIndex(); + uint32_t toIdx = to.handleIndex(); + + // Check for self-loops + if (fromIdx == toIdx) { + throw std::invalid_argument("Self-loops are not allowed in acyclic graph"); } - /** - * @brief Gets const pointer to node data - * - * Provides read-only access with handle validation. - * - * @param node Handle to the node - * @return Const pointer to the node's data, or nullptr if invalid - * - * @code - * DirectedAcyclicGraph graph; - * auto nodeHandle = graph.addNode("Hello World"); - * const std::string* data = graph.getNodeData(nodeHandle); - * if (data) { - * std::cout << *data << std::endl; // Read the node's data - * } - * @endcode - */ - const T* getNodeData(AcyclicNodeHandle node) const { - if (!isHandleValid(node)) { - return nullptr; - } - return &_nodes[node.handleIndex()].data; + // Check if edge already exists + if (hasEdge(fromIdx, toIdx)) { + return; // Edge already exists } - - /** - * @brief Validates a node handle - * - * Checks if the handle refers to an existing node by verifying index bounds, - * occupancy status, and generation count. - * - * @param handle The handle to validate - * @return true if handle is valid and points to an active node - * - * @code - * DirectedAcyclicGraph graph; - * auto node1 = graph.addNode(10); - * auto node2 = graph.addNode(20); - * - * bool valid1 = graph.isHandleValid(node1); // true - * graph.removeNode(node1); - * bool valid1_after_remove = graph.isHandleValid(node1); // false - * @endcode - */ - bool isHandleValid(const AcyclicNodeHandle& handle) const { - auto* owner = handle.template handleOwnerAs>(); - if (owner != this) return false; - - uint32_t index = handle.handleIndex(); - if (index >= _nodes.size()) return false; - - return _nodes[index].occupied && - _nodes[index].generation.load() == handle.handleGeneration(); + + // Check for cycles using DFS + if (wouldCreateCycle(fromIdx, toIdx)) { + throw std::invalid_argument("Adding edge would create a cycle"); } - - /** - * @brief Checks if a directed edge exists between two nodes - * - * @param from Index of the source node - * @param to Index of the destination node - * @return true if edge exists from `from` to `to` - * - * @code - * DirectedAcyclicGraph graph; - * auto nodeA = graph.addNode(1); - * auto nodeB = graph.addNode(2); - * graph.addEdge(nodeA, nodeB); - * - * bool has = graph.hasEdge(nodeA.getIndex(), nodeB.getIndex()); // true - * bool has_reverse = graph.hasEdge(nodeB.getIndex(), nodeA.getIndex()); // false - * @endcode - */ - bool hasEdge(uint32_t from, uint32_t to) const { - const auto& outgoing = _edges[from].outgoing; - return std::find(outgoing.begin(), outgoing.end(), to) != outgoing.end(); + + // Add the forward edge + _edges[fromIdx].outgoing.push_back(toIdx); + + // Add the reverse edge + _edges[toIdx].incoming.push_back(fromIdx); + } + + /** + * @brief Gets mutable pointer to node data + * + * Returns nullptr if the handle is invalid. + * + * @param node Handle to the node + * @return Pointer to the node's data, or nullptr if invalid + * + * @code + * DirectedAcyclicGraph graph; + * auto nodeHandle = graph.addNode(100); + * int* data = graph.getNodeData(nodeHandle); + * if (data) { + * *data = 200; // Modify the node's data + * } + * @endcode + */ + T* getNodeData(AcyclicNodeHandle node) { + if (!isHandleValid(node)) { + return nullptr; } - - /** - * @brief Checks if adding an edge would create a cycle - * - * Uses DFS to check if `from` is reachable from `to`. - * - * @param from Index of potential source node - * @param to Index of potential destination node - * @return true if adding the edge would create a cycle - * - * @code - * DirectedAcyclicGraph graph; - * auto n1 = graph.addNode(1); - * auto n2 = graph.addNode(2); - * auto n3 = graph.addNode(3); - * graph.addEdge(n1, n2); - * graph.addEdge(n2, n3); - * - * // This would create a cycle: n3 -> n1 -> n2 -> n3 - * bool createsCycle = graph.wouldCreateCycle(n3.getIndex(), n1.getIndex()); // true - * @endcode - */ - bool wouldCreateCycle(uint32_t from, uint32_t to) const { - // DFS from 'to' to see if we can reach 'from' - std::vector visited(_nodes.size(), false); - std::vector stack; - stack.push_back(to); - - while (!stack.empty()) { - uint32_t current = stack.back(); - stack.pop_back(); - - if (current == from) { - return true; // Found cycle - } - - if (visited[current]) { - continue; - } - visited[current] = true; - - // Add all neighbors - const auto& outgoing = _edges[current].outgoing; - for (uint32_t neighbor : outgoing) { - if (!visited[neighbor]) { - stack.push_back(neighbor); - } - } - } - - return false; + return &_nodes[node.handleIndex()].data; + } + + /** + * @brief Gets const pointer to node data + * + * Provides read-only access with handle validation. + * + * @param node Handle to the node + * @return Const pointer to the node's data, or nullptr if invalid + * + * @code + * DirectedAcyclicGraph graph; + * auto nodeHandle = graph.addNode("Hello World"); + * const std::string* data = graph.getNodeData(nodeHandle); + * if (data) { + * std::cout << *data << std::endl; // Read the node's data + * } + * @endcode + */ + const T* getNodeData(AcyclicNodeHandle node) const { + if (!isHandleValid(node)) { + return nullptr; } - - /** - * @brief Removes all edges connected to a node - * - * Disconnects both incoming and outgoing edges. O(degree) complexity. - * - * @param nodeIndex Index of the node to disconnect - * - * @code - * DirectedAcyclicGraph graph; - * auto n1 = graph.addNode(1); - * auto n2 = graph.addNode(2); - * auto n3 = graph.addNode(3); - * graph.addEdge(n1, n2); - * graph.addEdge(n3, n2); - * - * graph.removeAllEdges(n2.getIndex()); - * // Now, n1 -> n2 and n3 -> n2 edges are removed. - * @endcode - */ - void removeAllEdges(uint32_t nodeIndex) { - // Remove outgoing edges - auto& outgoing = _edges[nodeIndex].outgoing; - - // For each outgoing edge, remove from target's incoming edges - for (uint32_t target : outgoing) { - auto& targetIncoming = _edges[target].incoming; - targetIncoming.erase(std::remove(targetIncoming.begin(), targetIncoming.end(), nodeIndex), targetIncoming.end()); + return &_nodes[node.handleIndex()].data; + } + + /** + * @brief Validates a node handle + * + * Checks if the handle refers to an existing node by verifying index bounds, + * occupancy status, and generation count. + * + * @param handle The handle to validate + * @return true if handle is valid and points to an active node + * + * @code + * DirectedAcyclicGraph graph; + * auto node1 = graph.addNode(10); + * auto node2 = graph.addNode(20); + * + * bool valid1 = graph.isHandleValid(node1); // true + * graph.removeNode(node1); + * bool valid1_after_remove = graph.isHandleValid(node1); // false + * @endcode + */ + bool isHandleValid(const AcyclicNodeHandle& handle) const { + auto* owner = handle.template handleOwnerAs>(); + if (owner != this) return false; + + uint32_t index = handle.handleIndex(); + if (index >= _nodes.size()) return false; + + return _nodes[index].occupied && _nodes[index].generation.load() == handle.handleGeneration(); + } + + /** + * @brief Checks if a directed edge exists between two nodes + * + * @param from Index of the source node + * @param to Index of the destination node + * @return true if edge exists from `from` to `to` + * + * @code + * DirectedAcyclicGraph graph; + * auto nodeA = graph.addNode(1); + * auto nodeB = graph.addNode(2); + * graph.addEdge(nodeA, nodeB); + * + * bool has = graph.hasEdge(nodeA.getIndex(), nodeB.getIndex()); // true + * bool has_reverse = graph.hasEdge(nodeB.getIndex(), nodeA.getIndex()); // false + * @endcode + */ + bool hasEdge(uint32_t from, uint32_t to) const { + const auto& outgoing = _edges[from].outgoing; + return std::find(outgoing.begin(), outgoing.end(), to) != outgoing.end(); + } + + /** + * @brief Checks if adding an edge would create a cycle + * + * Uses DFS to check if `from` is reachable from `to`. + * + * @param from Index of potential source node + * @param to Index of potential destination node + * @return true if adding the edge would create a cycle + * + * @code + * DirectedAcyclicGraph graph; + * auto n1 = graph.addNode(1); + * auto n2 = graph.addNode(2); + * auto n3 = graph.addNode(3); + * graph.addEdge(n1, n2); + * graph.addEdge(n2, n3); + * + * // This would create a cycle: n3 -> n1 -> n2 -> n3 + * bool createsCycle = graph.wouldCreateCycle(n3.getIndex(), n1.getIndex()); // true + * @endcode + */ + bool wouldCreateCycle(uint32_t from, uint32_t to) const { + // DFS from 'to' to see if we can reach 'from' + std::vector visited(_nodes.size(), false); + std::vector stack; + stack.push_back(to); + + while (!stack.empty()) { + uint32_t current = stack.back(); + stack.pop_back(); + + if (current == from) { + return true; // Found cycle } - - outgoing.clear(); - - // Remove incoming edges - auto& incoming = _edges[nodeIndex].incoming; - - // For each incoming edge, remove from source's outgoing edges - for (uint32_t source : incoming) { - auto& sourceOutgoing = _edges[source].outgoing; - sourceOutgoing.erase(std::remove(sourceOutgoing.begin(), sourceOutgoing.end(), nodeIndex), sourceOutgoing.end()); + + if (visited[current]) { + continue; + } + visited[current] = true; + + // Add all neighbors + const auto& outgoing = _edges[current].outgoing; + for (uint32_t neighbor : outgoing) { + if (!visited[neighbor]) { + stack.push_back(neighbor); + } } - - incoming.clear(); - } - - - /** - * @brief Gets outgoing edges for a node - * - * @param nodeIndex Index of the node - * @return Span of indices this node points to - * - * @code - * DirectedAcyclicGraph graph; - * auto n1 = graph.addNode(1); - * auto n2 = graph.addNode(2); - * auto n3 = graph.addNode(3); - * graph.addEdge(n1, n2); - * graph.addEdge(n1, n3); - * - * for (uint32_t neighborIndex : graph.getOutgoingEdges(n1.getIndex())) { - * // Process neighborIndex (e.g., get its data) - * std::cout << "Node 1 has outgoing edge to: " << neighborIndex << std::endl; - * } - * @endcode - */ - std::span getOutgoingEdges(uint32_t nodeIndex) const { - const auto& outgoing = _edges[nodeIndex].outgoing; - return std::span{outgoing.data(), outgoing.size()}; } - - /** - * @brief Gets incoming edges for a node - * - * @param nodeIndex Index of the node - * @return Span of indices that point to this node - * - * @code - * DirectedAcyclicGraph graph; - * auto n1 = graph.addNode(1); - * auto n2 = graph.addNode(2); - * auto n3 = graph.addNode(3); - * graph.addEdge(n1, n3); - * graph.addEdge(n2, n3); - * - * for (uint32_t predecessorIndex : graph.getIncomingEdges(n3.getIndex())) { - * // Process predecessorIndex - * std::cout << "Node 3 has incoming edge from: " << predecessorIndex << std::endl; - * } - * @endcode - */ - std::span getIncomingEdges(uint32_t nodeIndex) const { - const auto& incoming = _edges[nodeIndex].incoming; - return std::span{incoming.data(), incoming.size()}; + + return false; + } + + /** + * @brief Removes all edges connected to a node + * + * Disconnects both incoming and outgoing edges. O(degree) complexity. + * + * @param nodeIndex Index of the node to disconnect + * + * @code + * DirectedAcyclicGraph graph; + * auto n1 = graph.addNode(1); + * auto n2 = graph.addNode(2); + * auto n3 = graph.addNode(3); + * graph.addEdge(n1, n2); + * graph.addEdge(n3, n2); + * + * graph.removeAllEdges(n2.getIndex()); + * // Now, n1 -> n2 and n3 -> n2 edges are removed. + * @endcode + */ + void removeAllEdges(uint32_t nodeIndex) { + // Remove outgoing edges + auto& outgoing = _edges[nodeIndex].outgoing; + + // For each outgoing edge, remove from target's incoming edges + for (uint32_t target : outgoing) { + auto& targetIncoming = _edges[target].incoming; + targetIncoming.erase(std::remove(targetIncoming.begin(), targetIncoming.end(), nodeIndex), + targetIncoming.end()); } - - /** - * @brief Gets the children of a node as handles - * - * @param node Parent node handle - * @return Vector of child node handles - * - * @code - * auto parent = graph.addNode("Parent"); - * auto child1 = graph.addNode("Child 1"); - * auto child2 = graph.addNode("Child 2"); - * graph.addEdge(parent, child1); - * graph.addEdge(parent, child2); - * - * auto children = graph.getChildren(parent); - * for (auto child : children) { - * std::cout << "Child: " << child.getData()->name << std::endl; - * } - * @endcode - */ - std::vector> getChildren(const AcyclicNodeHandle& node) const { - // Const version delegates to non-const version - return const_cast*>(this)->getChildren(node); + + outgoing.clear(); + + // Remove incoming edges + auto& incoming = _edges[nodeIndex].incoming; + + // For each incoming edge, remove from source's outgoing edges + for (uint32_t source : incoming) { + auto& sourceOutgoing = _edges[source].outgoing; + sourceOutgoing.erase(std::remove(sourceOutgoing.begin(), sourceOutgoing.end(), nodeIndex), + sourceOutgoing.end()); } - std::vector> getChildren(const AcyclicNodeHandle& node) { - std::vector> children; - if (!isHandleValid(node)) return children; - - auto childIndices = getOutgoingEdges(node.handleIndex()); - children.reserve(childIndices.size()); - - for (uint32_t childIndex : childIndices) { - if (childIndex < _nodes.size() && _nodes[childIndex].occupied) { - children.emplace_back(this, childIndex, _nodes[childIndex].generation.load()); - } + incoming.clear(); + } + + /** + * @brief Gets outgoing edges for a node + * + * @param nodeIndex Index of the node + * @return Span of indices this node points to + * + * @code + * DirectedAcyclicGraph graph; + * auto n1 = graph.addNode(1); + * auto n2 = graph.addNode(2); + * auto n3 = graph.addNode(3); + * graph.addEdge(n1, n2); + * graph.addEdge(n1, n3); + * + * for (uint32_t neighborIndex : graph.getOutgoingEdges(n1.getIndex())) { + * // Process neighborIndex (e.g., get its data) + * std::cout << "Node 1 has outgoing edge to: " << neighborIndex << std::endl; + * } + * @endcode + */ + std::span getOutgoingEdges(uint32_t nodeIndex) const { + const auto& outgoing = _edges[nodeIndex].outgoing; + return std::span{outgoing.data(), outgoing.size()}; + } + + /** + * @brief Gets incoming edges for a node + * + * @param nodeIndex Index of the node + * @return Span of indices that point to this node + * + * @code + * DirectedAcyclicGraph graph; + * auto n1 = graph.addNode(1); + * auto n2 = graph.addNode(2); + * auto n3 = graph.addNode(3); + * graph.addEdge(n1, n3); + * graph.addEdge(n2, n3); + * + * for (uint32_t predecessorIndex : graph.getIncomingEdges(n3.getIndex())) { + * // Process predecessorIndex + * std::cout << "Node 3 has incoming edge from: " << predecessorIndex << std::endl; + * } + * @endcode + */ + std::span getIncomingEdges(uint32_t nodeIndex) const { + const auto& incoming = _edges[nodeIndex].incoming; + return std::span{incoming.data(), incoming.size()}; + } + + /** + * @brief Gets the children of a node as handles + * + * @param node Parent node handle + * @return Vector of child node handles + * + * @code + * auto parent = graph.addNode("Parent"); + * auto child1 = graph.addNode("Child 1"); + * auto child2 = graph.addNode("Child 2"); + * graph.addEdge(parent, child1); + * graph.addEdge(parent, child2); + * + * auto children = graph.getChildren(parent); + * for (auto child : children) { + * std::cout << "Child: " << child.getData()->name << std::endl; + * } + * @endcode + */ + std::vector> getChildren(const AcyclicNodeHandle& node) const { + // Const version delegates to non-const version + return const_cast*>(this)->getChildren(node); + } + + std::vector> getChildren(const AcyclicNodeHandle& node) { + std::vector> children; + if (!isHandleValid(node)) return children; + + auto childIndices = getOutgoingEdges(node.handleIndex()); + children.reserve(childIndices.size()); + + for (uint32_t childIndex : childIndices) { + if (childIndex < _nodes.size() && _nodes[childIndex].occupied) { + children.emplace_back(this, childIndex, _nodes[childIndex].generation.load()); } - - return children; } - /** - * @brief Gets the parents of a node as handles - * - * @param node The child node handle - * @return Vector of parent node handles - * - * @code - * auto parent1 = graph.addNode("Parent 1"); - * auto parent2 = graph.addNode("Parent 2"); - * auto child = graph.addNode("Child"); - * graph.addEdge(parent1, child); - * graph.addEdge(parent2, child); - * - * auto parents = graph.getParents(child); - * for (auto parent : parents) { - * std::cout << "Parent: " << parent.getData()->name << std::endl; - * } - * @endcode - */ - std::vector> getParents(const AcyclicNodeHandle& node) const { - // Const version delegates to non-const version - return const_cast*>(this)->getParents(node); - } + return children; + } - std::vector> getParents(const AcyclicNodeHandle& node) { - std::vector> parents; - if (!isHandleValid(node)) return parents; - - auto parentIndices = getIncomingEdges(node.handleIndex()); - parents.reserve(parentIndices.size()); - - for (uint32_t parentIndex : parentIndices) { - if (parentIndex < _nodes.size() && _nodes[parentIndex].occupied) { - parents.emplace_back(this, parentIndex, _nodes[parentIndex].generation.load()); - } + /** + * @brief Gets the parents of a node as handles + * + * @param node The child node handle + * @return Vector of parent node handles + * + * @code + * auto parent1 = graph.addNode("Parent 1"); + * auto parent2 = graph.addNode("Parent 2"); + * auto child = graph.addNode("Child"); + * graph.addEdge(parent1, child); + * graph.addEdge(parent2, child); + * + * auto parents = graph.getParents(child); + * for (auto parent : parents) { + * std::cout << "Parent: " << parent.getData()->name << std::endl; + * } + * @endcode + */ + std::vector> getParents(const AcyclicNodeHandle& node) const { + // Const version delegates to non-const version + return const_cast*>(this)->getParents(node); + } + + std::vector> getParents(const AcyclicNodeHandle& node) { + std::vector> parents; + if (!isHandleValid(node)) return parents; + + auto parentIndices = getIncomingEdges(node.handleIndex()); + parents.reserve(parentIndices.size()); + + for (uint32_t parentIndex : parentIndices) { + if (parentIndex < _nodes.size() && _nodes[parentIndex].occupied) { + parents.emplace_back(this, parentIndex, _nodes[parentIndex].generation.load()); } - - return parents; } - /** - * @brief Clears all nodes and edges from the graph - * - * Removes all nodes and edges, resetting the graph to an empty state. - * All existing handles become invalid after this operation. - * - * @code - * DirectedAcyclicGraph graph; - * auto n1 = graph.addNode(1); - * auto n2 = graph.addNode(2); - * graph.addEdge(n1, n2); - * - * graph.clear(); - * // Graph is now empty, n1 and n2 handles are invalid - * bool valid = graph.isHandleValid(n1); // false - * @endcode - */ - void clear() { - _nodes.clear(); - _edges.clear(); - // Clear the free list by replacing with empty queue - _freeList = std::queue(); - - // Reserve some initial capacity - _nodes.reserve(64); - _edges.reserve(64); + return parents; + } + + /** + * @brief Clears all nodes and edges from the graph + * + * Removes all nodes and edges, resetting the graph to an empty state. + * All existing handles become invalid after this operation. + * + * @code + * DirectedAcyclicGraph graph; + * auto n1 = graph.addNode(1); + * auto n2 = graph.addNode(2); + * graph.addEdge(n1, n2); + * + * graph.clear(); + * // Graph is now empty, n1 and n2 handles are invalid + * bool valid = graph.isHandleValid(n1); // false + * @endcode + */ + void clear() { + _nodes.clear(); + _edges.clear(); + // Clear the free list by replacing with empty queue + _freeList = std::queue(); + + // Reserve some initial capacity + _nodes.reserve(64); + _edges.reserve(64); + } + + /** + * @brief Gets the number of active nodes in the graph + * + * @return Count of occupied node slots + */ + size_t nodeCount() const { + size_t count = 0; + for (const auto& node : _nodes) { + if (node.occupied) ++count; } + return count; + } - /** - * @brief Gets the number of active nodes in the graph - * - * @return Count of occupied node slots - */ - size_t nodeCount() const { - size_t count = 0; - for (const auto& node : _nodes) { - if (node.occupied) ++count; - } - return count; + /** + * @brief Performs topological sort on the graph + * + * Returns nodes in dependency order using Kahn's algorithm. For every edge + * u -> v, node u appears before v in the result. + * + * @return Vector of node indices in topological order + * + * @code + * DirectedAcyclicGraph taskGraph; + * auto compile = taskGraph.addNode("Compile Source"); + * auto link = taskGraph.addNode("Link Executable"); + * auto test = taskGraph.addNode("Run Tests"); + * auto deploy = taskGraph.addNode("Deploy Application"); + * + * taskGraph.addEdge(compile, link); + * taskGraph.addEdge(link, test); + * taskGraph.addEdge(test, deploy); + * + * std::vector order = taskGraph.topologicalSort(); + * for (uint32_t nodeIndex : order) { + * // In a real scenario, you'd get the node data and execute the task + * std::cout << "Executing: " << *taskGraph.getNodeData(AcyclicNodeHandle(nullptr, nodeIndex, 0)) + * << std::endl; + * } + * // Expected output order: Compile Source, Link Executable, Run Tests, Deploy Application + * @endcode + */ + std::vector topologicalSort() const { + std::vector result; + result.reserve(_nodes.size()); + + // Use compact in-degree array + std::vector inDegrees(_nodes.size(), 0); + + // Calculate in-degrees + for (uint32_t i = 0; i < _nodes.size(); ++i) { + if (!_nodes[i].occupied) continue; + inDegrees[i] = _edges[i].getInDegree(); } - /** - * @brief Performs topological sort on the graph - * - * Returns nodes in dependency order using Kahn's algorithm. For every edge - * u -> v, node u appears before v in the result. - * - * @return Vector of node indices in topological order - * - * @code - * DirectedAcyclicGraph taskGraph; - * auto compile = taskGraph.addNode("Compile Source"); - * auto link = taskGraph.addNode("Link Executable"); - * auto test = taskGraph.addNode("Run Tests"); - * auto deploy = taskGraph.addNode("Deploy Application"); - * - * taskGraph.addEdge(compile, link); - * taskGraph.addEdge(link, test); - * taskGraph.addEdge(test, deploy); - * - * std::vector order = taskGraph.topologicalSort(); - * for (uint32_t nodeIndex : order) { - * // In a real scenario, you'd get the node data and execute the task - * std::cout << "Executing: " << *taskGraph.getNodeData(AcyclicNodeHandle(nullptr, nodeIndex, 0)) << std::endl; - * } - * // Expected output order: Compile Source, Link Executable, Run Tests, Deploy Application - * @endcode - */ - std::vector topologicalSort() const { - std::vector result; - result.reserve(_nodes.size()); - - // Use compact in-degree array - std::vector inDegrees(_nodes.size(), 0); - - // Calculate in-degrees - for (uint32_t i = 0; i < _nodes.size(); ++i) { - if (!_nodes[i].occupied) continue; - inDegrees[i] = _edges[i].getInDegree(); - } - - // Find nodes with zero in-degree - std::vector zeroInDegree; - zeroInDegree.reserve(_nodes.size() / 4); - - for (uint32_t i = 0; i < _nodes.size(); ++i) { - if (inDegrees[i] == 0 && _nodes[i].occupied) { - zeroInDegree.push_back(i); - } - } - - // Process with good locality - size_t processIdx = 0; - while (processIdx < zeroInDegree.size()) { - uint32_t current = zeroInDegree[processIdx++]; - result.push_back(current); - - const auto& outgoing = _edges[current].outgoing; - - // Sequential edge access - for (uint32_t target : outgoing) { - if (--inDegrees[target] == 0) { - zeroInDegree.push_back(target); - } - } + // Find nodes with zero in-degree + std::vector zeroInDegree; + zeroInDegree.reserve(_nodes.size() / 4); + + for (uint32_t i = 0; i < _nodes.size(); ++i) { + if (inDegrees[i] == 0 && _nodes[i].occupied) { + zeroInDegree.push_back(i); } - - return result; } - }; + // Process with good locality + size_t processIdx = 0; + while (processIdx < zeroInDegree.size()) { + uint32_t current = zeroInDegree[processIdx++]; + result.push_back(current); + const auto& outgoing = _edges[current].outgoing; + // Sequential edge access + for (uint32_t target : outgoing) { + if (--inDegrees[target] == 0) { + zeroInDegree.push_back(target); + } + } + } -} // namespace Graph -} // namespace Core -} // namespace EntropyEngine + return result; + } +}; +} // namespace Graph +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Logging/CLogger.cpp b/src/Logging/CLogger.cpp index 647ed88..cb6eda6 100644 --- a/src/Logging/CLogger.cpp +++ b/src/Logging/CLogger.cpp @@ -1,24 +1,33 @@ /* C logger shim forwarding to C++ Logger backend */ #include "Logging/CLogger.h" -#include "Logging/Logger.h" -#include "Logging/LogLevel.h" + #include #include #include #include +#include "Logging/LogLevel.h" +#include "Logging/Logger.h" + using ::EntropyEngine::Core::Logging::Logger; using ::EntropyEngine::Core::Logging::LogLevel; static LogLevel map_level(EntropyLogLevelC lvl) noexcept { switch (lvl) { - case ENTROPY_LOG_TRACE_C: return LogLevel::Trace; - case ENTROPY_LOG_DEBUG_C: return LogLevel::Debug; - case ENTROPY_LOG_INFO_C: return LogLevel::Info; - case ENTROPY_LOG_WARN_C: return LogLevel::Warning; - case ENTROPY_LOG_ERROR_C: return LogLevel::Error; - case ENTROPY_LOG_FATAL_C: return LogLevel::Fatal; - default: return LogLevel::Info; + case ENTROPY_LOG_TRACE_C: + return LogLevel::Trace; + case ENTROPY_LOG_DEBUG_C: + return LogLevel::Debug; + case ENTROPY_LOG_INFO_C: + return LogLevel::Info; + case ENTROPY_LOG_WARN_C: + return LogLevel::Warning; + case ENTROPY_LOG_ERROR_C: + return LogLevel::Error; + case ENTROPY_LOG_FATAL_C: + return LogLevel::Fatal; + default: + return LogLevel::Info; } } @@ -55,15 +64,17 @@ void entropy_log_vwrite_cat(EntropyLogLevelC level, const char* category, const } void entropy_log_write(EntropyLogLevelC level, const char* fmt, ...) { - va_list args; va_start(args, fmt); + va_list args; + va_start(args, fmt); vwrite_internal(level, "C", fmt, args); va_end(args); } void entropy_log_write_cat(EntropyLogLevelC level, const char* category, const char* fmt, ...) { - va_list args; va_start(args, fmt); + va_list args; + va_start(args, fmt); vwrite_internal(level, category, fmt, args); va_end(args); } -} // extern "C" +} // extern "C" diff --git a/src/Logging/CLogger.h b/src/Logging/CLogger.h index 75b1ea3..e3a36f1 100644 --- a/src/Logging/CLogger.h +++ b/src/Logging/CLogger.h @@ -9,13 +9,14 @@ extern "C" { #endif // C-visible log levels (keep values in sync with Logging::LogLevel) -typedef enum EntropyLogLevelC { - ENTROPY_LOG_TRACE_C = 0, - ENTROPY_LOG_DEBUG_C = 1, - ENTROPY_LOG_INFO_C = 2, - ENTROPY_LOG_WARN_C = 3, - ENTROPY_LOG_ERROR_C = 4, - ENTROPY_LOG_FATAL_C = 5 +typedef enum EntropyLogLevelC +{ + ENTROPY_LOG_TRACE_C = 0, + ENTROPY_LOG_DEBUG_C = 1, + ENTROPY_LOG_INFO_C = 2, + ENTROPY_LOG_WARN_C = 3, + ENTROPY_LOG_ERROR_C = 4, + ENTROPY_LOG_FATAL_C = 5 } EntropyLogLevelC; // Core C APIs (printf-style) @@ -27,7 +28,7 @@ void entropy_log_vwrite(EntropyLogLevelC level, const char* fmt, va_list args); void entropy_log_vwrite_cat(EntropyLogLevelC level, const char* category, const char* fmt, va_list args); #ifdef __cplusplus -} // extern "C" +} // extern "C" #endif // ------------------------------------------------------------ @@ -35,19 +36,25 @@ void entropy_log_vwrite_cat(EntropyLogLevelC level, const char* category, const // By default, non-category macros use __func__ as the category. // ------------------------------------------------------------ #ifndef ENTROPY_LOG_CATEGORY_DEFAULT -# define ENTROPY_LOG_CATEGORY_DEFAULT __func__ +#define ENTROPY_LOG_CATEGORY_DEFAULT __func__ #endif -#define ENTROPY_LOG_TRACE_F(fmt, ...) entropy_log_write_cat(ENTROPY_LOG_TRACE_C, ENTROPY_LOG_CATEGORY_DEFAULT, (fmt) , ##__VA_ARGS__) -#define ENTROPY_LOG_DEBUG_F(fmt, ...) entropy_log_write_cat(ENTROPY_LOG_DEBUG_C, ENTROPY_LOG_CATEGORY_DEFAULT, (fmt) , ##__VA_ARGS__) -#define ENTROPY_LOG_INFO_F(fmt, ...) entropy_log_write_cat(ENTROPY_LOG_INFO_C, ENTROPY_LOG_CATEGORY_DEFAULT, (fmt) , ##__VA_ARGS__) -#define ENTROPY_LOG_WARNING_F(fmt, ...) entropy_log_write_cat(ENTROPY_LOG_WARN_C, ENTROPY_LOG_CATEGORY_DEFAULT, (fmt) , ##__VA_ARGS__) -#define ENTROPY_LOG_ERROR_F(fmt, ...) entropy_log_write_cat(ENTROPY_LOG_ERROR_C, ENTROPY_LOG_CATEGORY_DEFAULT, (fmt) , ##__VA_ARGS__) -#define ENTROPY_LOG_FATAL_F(fmt, ...) entropy_log_write_cat(ENTROPY_LOG_FATAL_C, ENTROPY_LOG_CATEGORY_DEFAULT, (fmt) , ##__VA_ARGS__) - -#define ENTROPY_LOG_TRACE_CAT_F(cat, fmt, ...) entropy_log_write_cat(ENTROPY_LOG_TRACE_C, (cat), (fmt), ##__VA_ARGS__) -#define ENTROPY_LOG_DEBUG_CAT_F(cat, fmt, ...) entropy_log_write_cat(ENTROPY_LOG_DEBUG_C, (cat), (fmt), ##__VA_ARGS__) -#define ENTROPY_LOG_INFO_CAT_F(cat, fmt, ...) entropy_log_write_cat(ENTROPY_LOG_INFO_C, (cat), (fmt), ##__VA_ARGS__) -#define ENTROPY_LOG_WARNING_CAT_F(cat, fmt, ...) entropy_log_write_cat(ENTROPY_LOG_WARN_C, (cat), (fmt), ##__VA_ARGS__) -#define ENTROPY_LOG_ERROR_CAT_F(cat, fmt, ...) entropy_log_write_cat(ENTROPY_LOG_ERROR_C, (cat), (fmt), ##__VA_ARGS__) -#define ENTROPY_LOG_FATAL_CAT_F(cat, fmt, ...) entropy_log_write_cat(ENTROPY_LOG_FATAL_C, (cat), (fmt), ##__VA_ARGS__) +#define ENTROPY_LOG_TRACE_F(fmt, ...) \ + entropy_log_write_cat(ENTROPY_LOG_TRACE_C, ENTROPY_LOG_CATEGORY_DEFAULT, (fmt), ##__VA_ARGS__) +#define ENTROPY_LOG_DEBUG_F(fmt, ...) \ + entropy_log_write_cat(ENTROPY_LOG_DEBUG_C, ENTROPY_LOG_CATEGORY_DEFAULT, (fmt), ##__VA_ARGS__) +#define ENTROPY_LOG_INFO_F(fmt, ...) \ + entropy_log_write_cat(ENTROPY_LOG_INFO_C, ENTROPY_LOG_CATEGORY_DEFAULT, (fmt), ##__VA_ARGS__) +#define ENTROPY_LOG_WARNING_F(fmt, ...) \ + entropy_log_write_cat(ENTROPY_LOG_WARN_C, ENTROPY_LOG_CATEGORY_DEFAULT, (fmt), ##__VA_ARGS__) +#define ENTROPY_LOG_ERROR_F(fmt, ...) \ + entropy_log_write_cat(ENTROPY_LOG_ERROR_C, ENTROPY_LOG_CATEGORY_DEFAULT, (fmt), ##__VA_ARGS__) +#define ENTROPY_LOG_FATAL_F(fmt, ...) \ + entropy_log_write_cat(ENTROPY_LOG_FATAL_C, ENTROPY_LOG_CATEGORY_DEFAULT, (fmt), ##__VA_ARGS__) + +#define ENTROPY_LOG_TRACE_CAT_F(cat, fmt, ...) entropy_log_write_cat(ENTROPY_LOG_TRACE_C, (cat), (fmt), ##__VA_ARGS__) +#define ENTROPY_LOG_DEBUG_CAT_F(cat, fmt, ...) entropy_log_write_cat(ENTROPY_LOG_DEBUG_C, (cat), (fmt), ##__VA_ARGS__) +#define ENTROPY_LOG_INFO_CAT_F(cat, fmt, ...) entropy_log_write_cat(ENTROPY_LOG_INFO_C, (cat), (fmt), ##__VA_ARGS__) +#define ENTROPY_LOG_WARNING_CAT_F(cat, fmt, ...) entropy_log_write_cat(ENTROPY_LOG_WARN_C, (cat), (fmt), ##__VA_ARGS__) +#define ENTROPY_LOG_ERROR_CAT_F(cat, fmt, ...) entropy_log_write_cat(ENTROPY_LOG_ERROR_C, (cat), (fmt), ##__VA_ARGS__) +#define ENTROPY_LOG_FATAL_CAT_F(cat, fmt, ...) entropy_log_write_cat(ENTROPY_LOG_FATAL_C, (cat), (fmt), ##__VA_ARGS__) diff --git a/src/Logging/ConsoleSink.cpp b/src/Logging/ConsoleSink.cpp index cda0053..5f2e92b 100644 --- a/src/Logging/ConsoleSink.cpp +++ b/src/Logging/ConsoleSink.cpp @@ -8,106 +8,115 @@ */ #include "ConsoleSink.h" + #include #include -namespace EntropyEngine { -namespace Core { -namespace Logging { - - void ConsoleSink::write(const LogEntry& entry) { - if (!shouldLog(entry.level)) return; - - std::lock_guard lock(_mutex); - - // Error and Fatal go to stderr, others to stdout - auto& stream = (entry.level >= LogLevel::Error) ? std::cerr : std::cout; - formatAndWrite(stream, entry); - } - - void ConsoleSink::flush() { - std::lock_guard lock(_mutex); - std::cout.flush(); - std::cerr.flush(); - } - - bool ConsoleSink::shouldLog(LogLevel level) const { - return level >= _minLevel; - } - - void ConsoleSink::setMinLevel(LogLevel level) { - _minLevel = level; - } - - const char* ConsoleSink::getColorForLevel(LogLevel level) const { - if (!_useColor) return ""; - - switch (level) { - case LogLevel::Trace: return GRAY; - case LogLevel::Debug: return CYAN; - case LogLevel::Info: return GREEN; - case LogLevel::Warning: return YELLOW; - case LogLevel::Error: return RED; - case LogLevel::Fatal: return MAGENTA; - default: return RESET; - } +namespace EntropyEngine +{ +namespace Core +{ +namespace Logging +{ + +void ConsoleSink::write(const LogEntry& entry) { + if (!shouldLog(entry.level)) return; + + std::lock_guard lock(_mutex); + + // Error and Fatal go to stderr, others to stdout + auto& stream = (entry.level >= LogLevel::Error) ? std::cerr : std::cout; + formatAndWrite(stream, entry); +} + +void ConsoleSink::flush() { + std::lock_guard lock(_mutex); + std::cout.flush(); + std::cerr.flush(); +} + +bool ConsoleSink::shouldLog(LogLevel level) const { + return level >= _minLevel; +} + +void ConsoleSink::setMinLevel(LogLevel level) { + _minLevel = level; +} + +const char* ConsoleSink::getColorForLevel(LogLevel level) const { + if (!_useColor) return ""; + + switch (level) { + case LogLevel::Trace: + return GRAY; + case LogLevel::Debug: + return CYAN; + case LogLevel::Info: + return GREEN; + case LogLevel::Warning: + return YELLOW; + case LogLevel::Error: + return RED; + case LogLevel::Fatal: + return MAGENTA; + default: + return RESET; } - - void ConsoleSink::formatAndWrite(std::ostream& stream, const LogEntry& entry) { - // Format: [TIMESTAMP] [LEVEL] [THREAD?] [CATEGORY] MESSAGE [LOCATION?] - - // Timestamp - auto time_t = std::chrono::system_clock::to_time_t(entry.timestamp); - auto ms = std::chrono::duration_cast( - entry.timestamp.time_since_epoch()) % 1000; - +} + +void ConsoleSink::formatAndWrite(std::ostream& stream, const LogEntry& entry) { + // Format: [TIMESTAMP] [LEVEL] [THREAD?] [CATEGORY] MESSAGE [LOCATION?] + + // Timestamp + auto time_t = std::chrono::system_clock::to_time_t(entry.timestamp); + auto ms = std::chrono::duration_cast(entry.timestamp.time_since_epoch()) % 1000; + #ifdef _WIN32 - std::tm tm_buf; - localtime_s(&tm_buf, &time_t); - stream << "[" << std::put_time(&tm_buf, "%H:%M:%S"); + std::tm tm_buf; + localtime_s(&tm_buf, &time_t); + stream << "[" << std::put_time(&tm_buf, "%H:%M:%S"); #else - std::tm tm_buf; - localtime_r(&time_t, &tm_buf); - stream << "[" << std::put_time(&tm_buf, "%H:%M:%S"); + std::tm tm_buf; + localtime_r(&time_t, &tm_buf); + stream << "[" << std::put_time(&tm_buf, "%H:%M:%S"); #endif - stream << "." << std::setfill('0') << std::setw(3) << ms.count() << "] "; - - // Level with color - if (_useColor) stream << getColorForLevel(entry.level); - stream << "[" << logLevelToString(entry.level) << "]"; - if (_useColor) stream << RESET; - stream << " "; - - // Thread ID (optional) - if (_showThreadId) { - std::ostringstream threadStr; - threadStr << entry.threadId; - auto threadIdStr = threadStr.str(); - - // Truncate thread ID to last 4 characters for readability - if (threadIdStr.length() > 4) { - threadIdStr = threadIdStr.substr(threadIdStr.length() - 4); - } - stream << "[" << std::setw(4) << threadIdStr << "] "; - } - - // Category - if (!entry.category.empty()) { - stream << "[" << entry.category << "] "; - } - - // Message - stream << entry.message; - - // Source location (optional) - if (_showLocation && entry.location.line() != 0) { - stream << " (" << entry.location.file_name() - << ":" << entry.location.line() << ")"; + stream << "." << std::setfill('0') << std::setw(3) << ms.count() << "] "; + + // Level with color + if (_useColor) stream << getColorForLevel(entry.level); + stream << "[" << logLevelToString(entry.level) << "]"; + if (_useColor) stream << RESET; + stream << " "; + + // Thread ID (optional) + if (_showThreadId) { + std::ostringstream threadStr; + threadStr << entry.threadId; + auto threadIdStr = threadStr.str(); + + // Truncate thread ID to last 4 characters for readability + if (threadIdStr.length() > 4) { + threadIdStr = threadIdStr.substr(threadIdStr.length() - 4); } - - stream << std::endl; + stream << "[" << std::setw(4) << threadIdStr << "] "; + } + + // Category + if (!entry.category.empty()) { + stream << "[" << entry.category << "] "; } -} // namespace Logging -} // namespace Core -} // namespace EntropyEngine \ No newline at end of file + // Message + stream << entry.message; + + // Source location (optional) + if (_showLocation && entry.location.line() != 0) { + stream << " (" << entry.location.file_name() << ":" << entry.location.line() << ")"; + } + + stream << std::endl; +} + +} // namespace Logging +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Logging/ConsoleSink.h b/src/Logging/ConsoleSink.h index 5dca679..97ae032 100644 --- a/src/Logging/ConsoleSink.h +++ b/src/Logging/ConsoleSink.h @@ -10,7 +10,7 @@ /** * @file ConsoleSink.h * @brief Console log sink with color support - * + * * This file implements a console sink that outputs logs to stdout/stderr with * optional ANSI color codes for improved readability and visual distinction * between log levels. @@ -18,92 +18,101 @@ #pragma once -#include "ILogSink.h" -#include #include +#include + +#include "ILogSink.h" -namespace EntropyEngine { -namespace Core { -namespace Logging { +namespace EntropyEngine +{ +namespace Core +{ +namespace Logging +{ + +/** + * @brief Log sink that outputs to console + * + * ConsoleSink provides formatted output to the terminal with optional color + * coding based on log severity. Error and Fatal messages are directed to + * stderr while other levels use stdout, allowing flexible output redirection. + * + * Features: + * - Color-coded output by log level for visual distinction + * - Thread-safe to prevent garbled output from concurrent logging + * - Configurable format options (thread IDs, source locations) + * - Intelligent stream selection (stderr for errors, stdout for others) + * + * Usage tip: Redirect stdout while preserving error visibility: + * ./myapp > output.log # Errors remain visible on console + */ +class ConsoleSink : public ILogSink +{ +private: + mutable std::mutex _mutex; + LogLevel _minLevel = LogLevel::Trace; + bool _useColor = true; + bool _showThreadId = true; + bool _showLocation = false; + + /// ANSI color codes for different log levels + static constexpr const char* RESET = "\033[0m"; + static constexpr const char* RED = "\033[31m"; + static constexpr const char* YELLOW = "\033[33m"; + static constexpr const char* GREEN = "\033[32m"; + static constexpr const char* CYAN = "\033[36m"; + static constexpr const char* MAGENTA = "\033[35m"; + static constexpr const char* GRAY = "\033[90m"; + +public: + explicit ConsoleSink(bool useColor = true, bool showThreadId = true) + : _useColor(useColor), _showThreadId(showThreadId) {} + + void write(const LogEntry& entry) override; + void flush() override; + bool shouldLog(LogLevel level) const override; + void setMinLevel(LogLevel level) override; + + /** + * @brief Enable/disable color output + * + * Disable when terminal doesn't support ANSI or piping to file. + * + * @param useColor true to enable colors, false for plain text + */ + void setUseColor(bool useColor) { + _useColor = useColor; + } + + /** + * @brief Enable/disable thread ID in output + * + * Useful for multithreaded debugging but noisy in single-threaded apps. + * + * @param show true to include thread IDs, false to hide them + */ + void setShowThreadId(bool show) { + _showThreadId = show; + } /** - * @brief Log sink that outputs to console - * - * ConsoleSink provides formatted output to the terminal with optional color - * coding based on log severity. Error and Fatal messages are directed to - * stderr while other levels use stdout, allowing flexible output redirection. - * - * Features: - * - Color-coded output by log level for visual distinction - * - Thread-safe to prevent garbled output from concurrent logging - * - Configurable format options (thread IDs, source locations) - * - Intelligent stream selection (stderr for errors, stdout for others) - * - * Usage tip: Redirect stdout while preserving error visibility: - * ./myapp > output.log # Errors remain visible on console + * @brief Enable/disable source location in output + * + * Shows file:line info. Helpful for debugging but verbose for production. + * + * @param show true to include file:line info, false to hide it */ - class ConsoleSink : public ILogSink { - private: - mutable std::mutex _mutex; - LogLevel _minLevel = LogLevel::Trace; - bool _useColor = true; - bool _showThreadId = true; - bool _showLocation = false; - - /// ANSI color codes for different log levels - static constexpr const char* RESET = "\033[0m"; - static constexpr const char* RED = "\033[31m"; - static constexpr const char* YELLOW = "\033[33m"; - static constexpr const char* GREEN = "\033[32m"; - static constexpr const char* CYAN = "\033[36m"; - static constexpr const char* MAGENTA = "\033[35m"; - static constexpr const char* GRAY = "\033[90m"; - - public: - explicit ConsoleSink(bool useColor = true, bool showThreadId = true) - : _useColor(useColor) - , _showThreadId(showThreadId) {} - - void write(const LogEntry& entry) override; - void flush() override; - bool shouldLog(LogLevel level) const override; - void setMinLevel(LogLevel level) override; - - /** - * @brief Enable/disable color output - * - * Disable when terminal doesn't support ANSI or piping to file. - * - * @param useColor true to enable colors, false for plain text - */ - void setUseColor(bool useColor) { _useColor = useColor; } - - /** - * @brief Enable/disable thread ID in output - * - * Useful for multithreaded debugging but noisy in single-threaded apps. - * - * @param show true to include thread IDs, false to hide them - */ - void setShowThreadId(bool show) { _showThreadId = show; } - - /** - * @brief Enable/disable source location in output - * - * Shows file:line info. Helpful for debugging but verbose for production. - * - * @param show true to include file:line info, false to hide it - */ - void setShowLocation(bool show) { _showLocation = show; } - - private: - /// Get ANSI color code for a log level - const char* getColorForLevel(LogLevel level) const; - /// Format and write a log entry to the appropriate stream - void formatAndWrite(std::ostream& stream, const LogEntry& entry); - }; + void setShowLocation(bool show) { + _showLocation = show; + } -} // namespace Logging -} // namespace Core -} // namespace EntropyEngine +private: + /// Get ANSI color code for a log level + const char* getColorForLevel(LogLevel level) const; + /// Format and write a log entry to the appropriate stream + void formatAndWrite(std::ostream& stream, const LogEntry& entry); +}; +} // namespace Logging +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Logging/ILogSink.h b/src/Logging/ILogSink.h index 06b8be1..a053537 100644 --- a/src/Logging/ILogSink.h +++ b/src/Logging/ILogSink.h @@ -10,7 +10,7 @@ /** * @file ILogSink.h * @brief Log sink interface for output destinations - * + * * This file defines the interface for log sinks - the destinations where log * messages are sent. Sinks can output to console, files, network services, * databases, or any other destination. @@ -18,106 +18,110 @@ #pragma once -#include "LogEntry.h" #include -namespace EntropyEngine { -namespace Core { -namespace Logging { +#include "LogEntry.h" + +namespace EntropyEngine +{ +namespace Core +{ +namespace Logging +{ + +/** + * @brief Interface for log sinks that handle log output + * + * Log sinks receive LogEntry objects and output them to their designated + * destination. Each sink can implement its own formatting, filtering, + * and delivery mechanisms. + * + * Common sink implementations: + * - Console: Output to stdout/stderr with optional color + * - File: Persistent storage with rotation support + * - Network: Remote logging servers + * - Database: Structured storage for analysis + * - Memory: In-memory ring buffer + * + * Multiple sinks can operate independently with different configurations, + * allowing flexible log routing based on severity or category. + */ +class ILogSink +{ +public: + virtual ~ILogSink() = default; + + /** + * @brief Write a log entry to this sink + * + * Processes and outputs a log entry according to the sink's implementation. + * Must be thread-safe. May buffer output for performance. + * + * @param entry The log entry to write + * + * @code + * void MyCustomSink::write(const LogEntry& entry) { + * if (!shouldLog(entry.level)) return; + * + * auto formatted = formatEntry(entry); + * sendToDestination(formatted); + * } + * @endcode + */ + virtual void write(const LogEntry& entry) = 0; + + /** + * @brief Flush any buffered data + * + * Forces immediate write of buffered data. Called after critical messages + * and during shutdown. Sinks without buffering can implement as no-op. + */ + virtual void flush() = 0; + + /** + * @brief Check if this sink accepts logs at the given level + * + * Allows different sinks to filter messages independently based on + * their configuration. + * + * @param level The log level to check + * @return true if the sink will process logs at this level + * + * @code + * // Console shows only warnings and above + * consoleSink->setMinLevel(LogLevel::Warning); + * + * // File captures all messages including trace + * fileSink->setMinLevel(LogLevel::Trace); + * @endcode + */ + virtual bool shouldLog(LogLevel level) const = 0; /** - * @brief Interface for log sinks that handle log output - * - * Log sinks receive LogEntry objects and output them to their designated - * destination. Each sink can implement its own formatting, filtering, - * and delivery mechanisms. - * - * Common sink implementations: - * - Console: Output to stdout/stderr with optional color - * - File: Persistent storage with rotation support - * - Network: Remote logging servers - * - Database: Structured storage for analysis - * - Memory: In-memory ring buffer - * - * Multiple sinks can operate independently with different configurations, - * allowing flexible log routing based on severity or category. + * @brief Set the minimum log level for this sink + * + * Controls verbosity of this specific sink. Each sink maintains its + * own level setting for flexible log routing. + * + * @param level The minimum level to accept (inclusive) + * + * @code + * // Development configuration + * sink->setMinLevel(LogLevel::Debug); + * + * // Production configuration + * sink->setMinLevel(LogLevel::Warning); + * + * // Debugging mode + * sink->setMinLevel(LogLevel::Trace); + * @endcode */ - class ILogSink { - public: - virtual ~ILogSink() = default; - - /** - * @brief Write a log entry to this sink - * - * Processes and outputs a log entry according to the sink's implementation. - * Must be thread-safe. May buffer output for performance. - * - * @param entry The log entry to write - * - * @code - * void MyCustomSink::write(const LogEntry& entry) { - * if (!shouldLog(entry.level)) return; - * - * auto formatted = formatEntry(entry); - * sendToDestination(formatted); - * } - * @endcode - */ - virtual void write(const LogEntry& entry) = 0; - - /** - * @brief Flush any buffered data - * - * Forces immediate write of buffered data. Called after critical messages - * and during shutdown. Sinks without buffering can implement as no-op. - */ - virtual void flush() = 0; - - /** - * @brief Check if this sink accepts logs at the given level - * - * Allows different sinks to filter messages independently based on - * their configuration. - * - * @param level The log level to check - * @return true if the sink will process logs at this level - * - * @code - * // Console shows only warnings and above - * consoleSink->setMinLevel(LogLevel::Warning); - * - * // File captures all messages including trace - * fileSink->setMinLevel(LogLevel::Trace); - * @endcode - */ - virtual bool shouldLog(LogLevel level) const = 0; - - /** - * @brief Set the minimum log level for this sink - * - * Controls verbosity of this specific sink. Each sink maintains its - * own level setting for flexible log routing. - * - * @param level The minimum level to accept (inclusive) - * - * @code - * // Development configuration - * sink->setMinLevel(LogLevel::Debug); - * - * // Production configuration - * sink->setMinLevel(LogLevel::Warning); - * - * // Debugging mode - * sink->setMinLevel(LogLevel::Trace); - * @endcode - */ - virtual void setMinLevel(LogLevel level) = 0; - }; - - /// Convenience typedef for shared sink pointers - using LogSinkPtr = std::shared_ptr; + virtual void setMinLevel(LogLevel level) = 0; +}; -} // namespace Logging -} // namespace Core -} // namespace EntropyEngine +/// Convenience typedef for shared sink pointers +using LogSinkPtr = std::shared_ptr; +} // namespace Logging +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Logging/LogEntry.h b/src/Logging/LogEntry.h index 0a46024..8b36a86 100644 --- a/src/Logging/LogEntry.h +++ b/src/Logging/LogEntry.h @@ -10,7 +10,7 @@ /** * @file LogEntry.h * @brief Log entry structure for comprehensive log information - * + * * This file defines LogEntry, the fundamental data structure that carries all * information about a single log event, including timestamp, severity, source * location, and contextual details. @@ -18,83 +18,85 @@ #pragma once -#include "LogLevel.h" #include -#include -#include #include +#include +#include -namespace EntropyEngine { -namespace Core { -namespace Logging { +#include "LogLevel.h" + +namespace EntropyEngine +{ +namespace Core +{ +namespace Logging +{ + +/** + * @brief A single log entry containing all metadata and message + * + * LogEntry captures comprehensive information about each log event including + * the message content, timestamp, source location, thread ID, severity level, + * and category. This provides complete context for debugging and analysis. + * + * Design principles: + * - Complete: Contains all necessary debugging context + * - Flexible: Sinks determine final formatting and presentation + * - Thread-safe: Safe to pass between threads + * + * Formatting is deferred to sinks. The entry captures raw data, + * allowing background threads to handle formatting operations. + */ +struct LogEntry +{ + /// Timestamp when the log was created + std::chrono::system_clock::time_point timestamp; + + /// Thread ID that created the log + std::thread::id threadId; + + /// Log severity level + LogLevel level; + + /// Category/module name (e.g., "WorkGraph", "Renderer") + std::string_view category; + + /// The actual log message + std::string message; + + /// Source location information (file:line) + std::source_location location; + + /// Optional: Thread-local context (e.g., current work item) + void* context = nullptr; /** - * @brief A single log entry containing all metadata and message - * - * LogEntry captures comprehensive information about each log event including - * the message content, timestamp, source location, thread ID, severity level, - * and category. This provides complete context for debugging and analysis. - * - * Design principles: - * - Complete: Contains all necessary debugging context - * - Flexible: Sinks determine final formatting and presentation - * - Thread-safe: Safe to pass between threads - * - * Formatting is deferred to sinks. The entry captures raw data, - * allowing background threads to handle formatting operations. + * @brief Construct a log entry with current timestamp and thread ID + * + * Automatically captures time, thread ID, and source location using + * C++20 source_location. + * + * @param lvl Log severity level + * @param cat Category/subsystem name + * @param msg Log message content + * @param loc Source location (captured automatically) + * + * @code + * // Location is captured automatically at the call site + * LogEntry entry(LogLevel::Error, "Database", "Connection failed"); + * // entry.location contains the file:line information + * @endcode */ - struct LogEntry { - /// Timestamp when the log was created - std::chrono::system_clock::time_point timestamp; - - /// Thread ID that created the log - std::thread::id threadId; - - /// Log severity level - LogLevel level; - - /// Category/module name (e.g., "WorkGraph", "Renderer") - std::string_view category; - - /// The actual log message - std::string message; - - /// Source location information (file:line) - std::source_location location; - - /// Optional: Thread-local context (e.g., current work item) - void* context = nullptr; - - /** - * @brief Construct a log entry with current timestamp and thread ID - * - * Automatically captures time, thread ID, and source location using - * C++20 source_location. - * - * @param lvl Log severity level - * @param cat Category/subsystem name - * @param msg Log message content - * @param loc Source location (captured automatically) - * - * @code - * // Location is captured automatically at the call site - * LogEntry entry(LogLevel::Error, "Database", "Connection failed"); - * // entry.location contains the file:line information - * @endcode - */ - LogEntry(LogLevel lvl, - std::string_view cat, - std::string msg, - const std::source_location& loc = std::source_location::current()) - : timestamp(std::chrono::system_clock::now()) - , threadId(std::this_thread::get_id()) - , level(lvl) - , category(cat) - , message(std::move(msg)) - , location(loc) {} - }; - -} // namespace Logging -} // namespace Core -} // namespace EntropyEngine + LogEntry(LogLevel lvl, std::string_view cat, std::string msg, + const std::source_location& loc = std::source_location::current()) + : timestamp(std::chrono::system_clock::now()), + threadId(std::this_thread::get_id()), + level(lvl), + category(cat), + message(std::move(msg)), + location(loc) {} +}; +} // namespace Logging +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Logging/LogLevel.h b/src/Logging/LogLevel.h index c9d5749..c481835 100644 --- a/src/Logging/LogLevel.h +++ b/src/Logging/LogLevel.h @@ -10,124 +10,141 @@ /** * @file LogLevel.h * @brief Log severity levels for controlling output verbosity - * + * * This file defines severity levels from Trace to Fatal that control log verbosity. */ #pragma once #include -#include #include +#include -namespace EntropyEngine { -namespace Core { -namespace Logging { +namespace EntropyEngine +{ +namespace Core +{ +namespace Logging +{ - /** - * @brief Log severity levels for controlling output verbosity - * - * Ordered from least to most severe. Setting a minimum level includes - * all more severe levels. E.g., Warning includes Warning, Error, Fatal. - */ - enum class LogLevel : uint8_t { - Trace = 0, ///< Detailed trace information for deep debugging - Debug = 1, ///< Debug information useful during development - Info = 2, ///< Informational messages about normal operation - Warning = 3, ///< Warning conditions that might require attention - Error = 4, ///< Error conditions that require immediate attention - Fatal = 5, ///< Fatal errors that will terminate the application - Off = 6 ///< Disable all logging output - }; +/** + * @brief Log severity levels for controlling output verbosity + * + * Ordered from least to most severe. Setting a minimum level includes + * all more severe levels. E.g., Warning includes Warning, Error, Fatal. + */ +enum class LogLevel : uint8_t +{ + Trace = 0, ///< Detailed trace information for deep debugging + Debug = 1, ///< Debug information useful during development + Info = 2, ///< Informational messages about normal operation + Warning = 3, ///< Warning conditions that might require attention + Error = 4, ///< Error conditions that require immediate attention + Fatal = 5, ///< Fatal errors that will terminate the application + Off = 6 ///< Disable all logging output +}; - /** - * @brief Convert log level to string representation - * - * Returns fixed-width strings (5 chars) for consistent alignment. - * - * @param level The log level to convert - * @return Level name ("TRACE", "DEBUG", etc.) - * - * @code - * std::cout << "[" << logLevelToString(LogLevel::Error) << "] " - * << "Failed to open file" << std::endl; - * // Output: "[ERROR] Failed to open file" - * @endcode - */ - inline constexpr std::string_view logLevelToString(LogLevel level) { - switch (level) { - case LogLevel::Trace: return "TRACE"; - case LogLevel::Debug: return "DEBUG"; - case LogLevel::Info: return "INFO "; - case LogLevel::Warning: return "WARN "; - case LogLevel::Error: return "ERROR"; - case LogLevel::Fatal: return "FATAL"; - case LogLevel::Off: return "OFF "; - } - return "UNKNOWN"; +/** + * @brief Convert log level to string representation + * + * Returns fixed-width strings (5 chars) for consistent alignment. + * + * @param level The log level to convert + * @return Level name ("TRACE", "DEBUG", etc.) + * + * @code + * std::cout << "[" << logLevelToString(LogLevel::Error) << "] " + * << "Failed to open file" << std::endl; + * // Output: "[ERROR] Failed to open file" + * @endcode + */ +inline constexpr std::string_view logLevelToString(LogLevel level) { + switch (level) { + case LogLevel::Trace: + return "TRACE"; + case LogLevel::Debug: + return "DEBUG"; + case LogLevel::Info: + return "INFO "; + case LogLevel::Warning: + return "WARN "; + case LogLevel::Error: + return "ERROR"; + case LogLevel::Fatal: + return "FATAL"; + case LogLevel::Off: + return "OFF "; } + return "UNKNOWN"; +} - /** - * @brief Convert log level to single character - * - * Useful for compact formats and grep operations. - * - * @param level The log level to convert - * @return Single character ('T', 'D', 'I', 'W', 'E', 'F', 'O') - * - * @code - * // Compact format for high-volume logging - * std::cout << "[" << logLevelToChar(level) << "] " - * << timestamp << " " << message << std::endl; - * // Output: "[E] 2024-01-15 10:30:45 Connection lost" - * @endcode - */ - inline constexpr char logLevelToChar(LogLevel level) { - switch (level) { - case LogLevel::Trace: return 'T'; - case LogLevel::Debug: return 'D'; - case LogLevel::Info: return 'I'; - case LogLevel::Warning: return 'W'; - case LogLevel::Error: return 'E'; - case LogLevel::Fatal: return 'F'; - case LogLevel::Off: return 'O'; - } - return '?'; - } - - /** - * @brief Convert string to LogLevel - * - * Case-insensitive. Accepts "WARN" for "WARNING". Returns Info as - * default for unrecognized strings. - * - * @param str The string to parse - * @return Corresponding LogLevel, or Info if parsing fails - * - * @code - * // Reading from configuration - * auto level = stringToLogLevel(config["log_level"]); - * logger.setLevel(level); - * - * // Accepted formats: - * stringToLogLevel("debug"); // LogLevel::Debug - * stringToLogLevel("DEBUG"); // LogLevel::Debug - * stringToLogLevel("WARN"); // LogLevel::Warning - * stringToLogLevel("invalid"); // LogLevel::Info (default) - * @endcode - */ - inline LogLevel stringToLogLevel(const std::string& str) { - if (str == "Trace" || str == "TRACE") return LogLevel::Trace; - if (str == "Debug" || str == "DEBUG") return LogLevel::Debug; - if (str == "Info" || str == "INFO") return LogLevel::Info; - if (str == "Warning" || str == "WARNING" || str == "WARN") return LogLevel::Warning; - if (str == "Error" || str == "ERROR") return LogLevel::Error; - if (str == "Fatal" || str == "FATAL") return LogLevel::Fatal; - if (str == "Off" || str == "OFF") return LogLevel::Off; - return LogLevel::Info; // Default +/** + * @brief Convert log level to single character + * + * Useful for compact formats and grep operations. + * + * @param level The log level to convert + * @return Single character ('T', 'D', 'I', 'W', 'E', 'F', 'O') + * + * @code + * // Compact format for high-volume logging + * std::cout << "[" << logLevelToChar(level) << "] " + * << timestamp << " " << message << std::endl; + * // Output: "[E] 2024-01-15 10:30:45 Connection lost" + * @endcode + */ +inline constexpr char logLevelToChar(LogLevel level) { + switch (level) { + case LogLevel::Trace: + return 'T'; + case LogLevel::Debug: + return 'D'; + case LogLevel::Info: + return 'I'; + case LogLevel::Warning: + return 'W'; + case LogLevel::Error: + return 'E'; + case LogLevel::Fatal: + return 'F'; + case LogLevel::Off: + return 'O'; } + return '?'; +} -} // namespace Logging -} // namespace Core -} // namespace EntropyEngine +/** + * @brief Convert string to LogLevel + * + * Case-insensitive. Accepts "WARN" for "WARNING". Returns Info as + * default for unrecognized strings. + * + * @param str The string to parse + * @return Corresponding LogLevel, or Info if parsing fails + * + * @code + * // Reading from configuration + * auto level = stringToLogLevel(config["log_level"]); + * logger.setLevel(level); + * + * // Accepted formats: + * stringToLogLevel("debug"); // LogLevel::Debug + * stringToLogLevel("DEBUG"); // LogLevel::Debug + * stringToLogLevel("WARN"); // LogLevel::Warning + * stringToLogLevel("invalid"); // LogLevel::Info (default) + * @endcode + */ +inline LogLevel stringToLogLevel(const std::string& str) { + if (str == "Trace" || str == "TRACE") return LogLevel::Trace; + if (str == "Debug" || str == "DEBUG") return LogLevel::Debug; + if (str == "Info" || str == "INFO") return LogLevel::Info; + if (str == "Warning" || str == "WARNING" || str == "WARN") return LogLevel::Warning; + if (str == "Error" || str == "ERROR") return LogLevel::Error; + if (str == "Fatal" || str == "FATAL") return LogLevel::Fatal; + if (str == "Off" || str == "OFF") return LogLevel::Off; + return LogLevel::Info; // Default +} +} // namespace Logging +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Logging/Logger.cpp b/src/Logging/Logger.cpp index 50e5cd9..2cee67d 100644 --- a/src/Logging/Logger.cpp +++ b/src/Logging/Logger.cpp @@ -8,74 +8,79 @@ */ #include "Logger.h" -#include "ConsoleSink.h" + #include -namespace EntropyEngine { -namespace Core { -namespace Logging { +#include "ConsoleSink.h" - // Static members - Logger* Logger::s_globalLogger = nullptr; - std::mutex Logger::s_globalMutex; - - Logger& Logger::global() { - std::lock_guard lock(s_globalMutex); - if (!s_globalLogger) { - s_globalLogger = new Logger("Global"); - // Add default console sink - s_globalLogger->addSink(std::make_shared()); - } - return *s_globalLogger; - } - - void Logger::setGlobal(Logger* logger) { - std::lock_guard lock(s_globalMutex); - if (s_globalLogger && s_globalLogger != logger) { - delete s_globalLogger; - } - s_globalLogger = logger; - } - - void Logger::addSink(LogSinkPtr sink) { - std::unique_lock lock(_sinkMutex); - _sinks.push_back(std::move(sink)); +namespace EntropyEngine +{ +namespace Core +{ +namespace Logging +{ + +// Static members +Logger* Logger::s_globalLogger = nullptr; +std::mutex Logger::s_globalMutex; + +Logger& Logger::global() { + std::lock_guard lock(s_globalMutex); + if (!s_globalLogger) { + s_globalLogger = new Logger("Global"); + // Add default console sink + s_globalLogger->addSink(std::make_shared()); } - - void Logger::removeSink(const LogSinkPtr& sink) { - std::unique_lock lock(_sinkMutex); - _sinks.erase(std::remove(_sinks.begin(), _sinks.end(), sink), _sinks.end()); + return *s_globalLogger; +} + +void Logger::setGlobal(Logger* logger) { + std::lock_guard lock(s_globalMutex); + if (s_globalLogger && s_globalLogger != logger) { + delete s_globalLogger; } - - void Logger::clearSinks() { - std::unique_lock lock(_sinkMutex); - _sinks.clear(); + s_globalLogger = logger; +} + +void Logger::addSink(LogSinkPtr sink) { + std::unique_lock lock(_sinkMutex); + _sinks.push_back(std::move(sink)); +} + +void Logger::removeSink(const LogSinkPtr& sink) { + std::unique_lock lock(_sinkMutex); + _sinks.erase(std::remove(_sinks.begin(), _sinks.end(), sink), _sinks.end()); +} + +void Logger::clearSinks() { + std::unique_lock lock(_sinkMutex); + _sinks.clear(); +} + +void Logger::flush() { + std::shared_lock lock(_sinkMutex); + for (auto& sink : _sinks) { + sink->flush(); } - - void Logger::flush() { - std::shared_lock lock(_sinkMutex); - for (auto& sink : _sinks) { - sink->flush(); +} + +void Logger::writeToSinks(const LogEntry& entry) { + std::shared_lock lock(_sinkMutex); + + for (auto& sink : _sinks) { + if (sink->shouldLog(entry.level)) { + sink->write(entry); } } - - void Logger::writeToSinks(const LogEntry& entry) { - std::shared_lock lock(_sinkMutex); - + + // Auto-flush for error and fatal levels + if (entry.level >= LogLevel::Error) { for (auto& sink : _sinks) { - if (sink->shouldLog(entry.level)) { - sink->write(entry); - } - } - - // Auto-flush for error and fatal levels - if (entry.level >= LogLevel::Error) { - for (auto& sink : _sinks) { - sink->flush(); - } + sink->flush(); } } +} -} // namespace Logging -} // namespace Core -} // namespace EntropyEngine \ No newline at end of file +} // namespace Logging +} // namespace Core +} // namespace EntropyEngine diff --git a/src/Logging/Logger.h b/src/Logging/Logger.h index 67f22c7..445b135 100644 --- a/src/Logging/Logger.h +++ b/src/Logging/Logger.h @@ -10,7 +10,7 @@ /** * @file Logger.h * @brief Main logger class for centralized logging management - * + * * This file contains the Logger class, which serves as the central component of * the logging system. It manages multiple sinks, provides thread-safe logging, * and offers both a direct API and convenient macros for application-wide logging. @@ -18,382 +18,379 @@ #pragma once -#include "LogEntry.h" -#include "ILogSink.h" -#include -#include -#include #include +#include +#include #include #include +#include + +#include "ILogSink.h" +#include "LogEntry.h" -namespace EntropyEngine { -namespace Core { -namespace Logging { +namespace EntropyEngine +{ +namespace Core +{ +namespace Logging +{ + +/** + * @brief Main logger class that manages sinks and provides logging interface + * + * Logger serves as the central hub of the logging system. It receives log messages, + * formats them using std::format, and distributes them to all registered sinks. + * The logger coordinates multiple output destinations while maintaining thread + * safety and performance. + * + * Key features: + * - Thread-safe: Supports concurrent logging from multiple threads + * - Multiple sinks: Route logs to various destinations simultaneously + * - Runtime configuration: Modify log levels and sinks dynamically + * - Can be compiled out if needed + * - Modern C++20: Leverages std::format for type-safe formatting + * - Source tracking: Automatic capture of log origin locations + * + * While a single global logger suffices for most applications, multiple + * loggers can be created for subsystem-specific logging needs. + */ +class Logger +{ +private: + std::string _name; + std::vector _sinks; + mutable std::shared_mutex _sinkMutex; + LogLevel _minLevel = LogLevel::Trace; + + // Category filtering + std::unordered_set _disabledCategories; + mutable std::shared_mutex _categoryMutex; + + // Global logger instance + static Logger* s_globalLogger; + static std::mutex s_globalMutex; + +public: + explicit Logger(std::string name) : _name(std::move(name)) {} + + /** + * @brief Get the global logger instance + * + * Lazy-initialized on first access. Persists for program lifetime. + * + * @return Reference to the global logger + * + * @code + * // Direct access + * Logger::global().info("System", "Application started"); + * + * // Preferred macro usage: + * ENTROPY_LOG_INFO("Application started"); + * @endcode + */ + static Logger& global(); + + /** + * @brief Set a custom global logger + * + * Replaces default logger. Useful for testing and custom implementations. + * + * @param logger Pointer to the new global logger (takes ownership) + */ + static void setGlobal(Logger* logger); + + /** + * @brief Add a sink to this logger + * + * Multiple sinks can be active simultaneously for various destinations. + * + * @param sink The sink to add (shared ownership) + * + * @code + * auto& logger = Logger::global(); + * logger.addSink(std::make_shared()); + * logger.addSink(std::make_shared("app.log")); + * // Logs now output to both console and file + * @endcode + */ + void addSink(LogSinkPtr sink); + + /** + * @brief Remove a sink from this logger + * + * Sink remains valid but no longer receives messages. + * + * @param sink The sink to remove + */ + void removeSink(const LogSinkPtr& sink); + + /** + * @brief Clear all sinks + * + * Messages will be processed but not output. Useful for reconfiguration. + */ + void clearSinks(); + + /** + * @brief Set minimum log level for this logger + * + * Messages below this level are discarded before reaching sinks. + * + * @param level The minimum level to process + */ + void setMinLevel(LogLevel level) { + _minLevel = level; + } + + /** + * @brief Get minimum log level + * + * @return The current minimum log level + */ + LogLevel getMinLevel() const { + return _minLevel; + } + + /** + * @brief Disable logging for a specific category + * + * Messages from this category will be filtered out regardless of level. + * + * @param category The category name to disable + * + * @code + * Logger::global().disableCategory("ShaderAsset"); + * Logger::global().disableCategory("ShaderService"); + * @endcode + */ + void disableCategory(const std::string& category) { + std::unique_lock lock(_categoryMutex); + _disabledCategories.insert(category); + } /** - * @brief Main logger class that manages sinks and provides logging interface - * - * Logger serves as the central hub of the logging system. It receives log messages, - * formats them using std::format, and distributes them to all registered sinks. - * The logger coordinates multiple output destinations while maintaining thread - * safety and performance. - * - * Key features: - * - Thread-safe: Supports concurrent logging from multiple threads - * - Multiple sinks: Route logs to various destinations simultaneously - * - Runtime configuration: Modify log levels and sinks dynamically - * - Can be compiled out if needed - * - Modern C++20: Leverages std::format for type-safe formatting - * - Source tracking: Automatic capture of log origin locations - * - * While a single global logger suffices for most applications, multiple - * loggers can be created for subsystem-specific logging needs. + * @brief Re-enable logging for a specific category + * + * @param category The category name to enable */ - class Logger { - private: - std::string _name; - std::vector _sinks; - mutable std::shared_mutex _sinkMutex; - LogLevel _minLevel = LogLevel::Trace; - - // Category filtering - std::unordered_set _disabledCategories; - mutable std::shared_mutex _categoryMutex; - - // Global logger instance - static Logger* s_globalLogger; - static std::mutex s_globalMutex; - - public: - explicit Logger(std::string name) : _name(std::move(name)) {} - - /** - * @brief Get the global logger instance - * - * Lazy-initialized on first access. Persists for program lifetime. - * - * @return Reference to the global logger - * - * @code - * // Direct access - * Logger::global().info("System", "Application started"); - * - * // Preferred macro usage: - * ENTROPY_LOG_INFO("Application started"); - * @endcode - */ - static Logger& global(); - - /** - * @brief Set a custom global logger - * - * Replaces default logger. Useful for testing and custom implementations. - * - * @param logger Pointer to the new global logger (takes ownership) - */ - static void setGlobal(Logger* logger); - - /** - * @brief Add a sink to this logger - * - * Multiple sinks can be active simultaneously for various destinations. - * - * @param sink The sink to add (shared ownership) - * - * @code - * auto& logger = Logger::global(); - * logger.addSink(std::make_shared()); - * logger.addSink(std::make_shared("app.log")); - * // Logs now output to both console and file - * @endcode - */ - void addSink(LogSinkPtr sink); - - /** - * @brief Remove a sink from this logger - * - * Sink remains valid but no longer receives messages. - * - * @param sink The sink to remove - */ - void removeSink(const LogSinkPtr& sink); - - /** - * @brief Clear all sinks - * - * Messages will be processed but not output. Useful for reconfiguration. - */ - void clearSinks(); - - /** - * @brief Set minimum log level for this logger - * - * Messages below this level are discarded before reaching sinks. - * - * @param level The minimum level to process - */ - void setMinLevel(LogLevel level) { _minLevel = level; } - - /** - * @brief Get minimum log level - * - * @return The current minimum log level - */ - LogLevel getMinLevel() const { return _minLevel; } - - /** - * @brief Disable logging for a specific category - * - * Messages from this category will be filtered out regardless of level. - * - * @param category The category name to disable - * - * @code - * Logger::global().disableCategory("ShaderAsset"); - * Logger::global().disableCategory("ShaderService"); - * @endcode - */ - void disableCategory(const std::string& category) { - std::unique_lock lock(_categoryMutex); - _disabledCategories.insert(category); - } - - /** - * @brief Re-enable logging for a specific category - * - * @param category The category name to enable - */ - void enableCategory(const std::string& category) { - std::unique_lock lock(_categoryMutex); - _disabledCategories.erase(category); - } - - /** - * @brief Check if a category is enabled for logging - * - * @param category The category to check - * @return true if the category is enabled, false if disabled - */ - bool isCategoryEnabled(std::string_view category) const { - std::shared_lock lock(_categoryMutex); - return _disabledCategories.find(std::string(category)) == _disabledCategories.end(); - } - - /** - * @brief Clear all disabled categories - * - * Re-enables all previously disabled categories. - */ - void clearDisabledCategories() { - std::unique_lock lock(_categoryMutex); - _disabledCategories.clear(); - } - - /** - * @brief Core logging function with pre-formatted message - * - * Source location captured automatically using C++20. - * - * @param level Message severity level - * @param category Subsystem/module category - * @param message The log message - * @param location Source location (auto-captured) - */ - void log(LogLevel level, - std::string_view category, - const std::string& message, - const std::source_location& location = std::source_location::current()) { - - // Early exit if level is too low - if (level < _minLevel) return; - - // Early exit if category is disabled - if (!isCategoryEnabled(category)) return; - - // Create log entry - LogEntry entry(level, category, message, location); - - // Send to all sinks - writeToSinks(entry); - } - - /** - * @brief Core logging function with format string - * - * Uses std::format for type-safe message formatting. - * - * @tparam Args Format argument types (deduced automatically) - * @param level Message severity level - * @param category Subsystem/module category - * @param fmt Format string (std::format syntax) - * @param args Arguments to format - * @param location Source location (auto-captured) - * - * @code - * logger.log(LogLevel::Info, "Network", "Connected to {} on port {}", - * serverName, portNumber); - * @endcode - */ - template - void log(LogLevel level, - std::string_view category, - std::format_string fmt, - Args&&... args, - const std::source_location& location = std::source_location::current()) { - - // Early exit if level is too low - if (level < _minLevel) return; - - // Early exit if category is disabled - if (!isCategoryEnabled(category)) return; - - // Format the message - std::string message = std::format(fmt, std::forward(args)...); - - // Create log entry - LogEntry entry(level, category, std::move(message), location); - - // Send to all sinks - writeToSinks(entry); - } - - /** - * @brief Convenience methods for each log level - * - * Direct methods for each severity without specifying LogLevel. - * Supports both format strings and plain strings. - */ - template - void trace(std::string_view category, std::format_string fmt, Args&&... args, - const std::source_location& loc = std::source_location::current()) { - log(LogLevel::Trace, category, fmt, std::forward(args)..., loc); - } - - void trace(std::string_view category, const std::string& message, - const std::source_location& loc = std::source_location::current()) { - log(LogLevel::Trace, category, message, loc); - } - - template - void debug(std::string_view category, std::format_string fmt, Args&&... args, - const std::source_location& loc = std::source_location::current()) { - log(LogLevel::Debug, category, fmt, std::forward(args)..., loc); - } - - void debug(std::string_view category, const std::string& message, - const std::source_location& loc = std::source_location::current()) { - log(LogLevel::Debug, category, message, loc); - } - - template - void info(std::string_view category, std::format_string fmt, Args&&... args, + void enableCategory(const std::string& category) { + std::unique_lock lock(_categoryMutex); + _disabledCategories.erase(category); + } + + /** + * @brief Check if a category is enabled for logging + * + * @param category The category to check + * @return true if the category is enabled, false if disabled + */ + bool isCategoryEnabled(std::string_view category) const { + std::shared_lock lock(_categoryMutex); + return _disabledCategories.find(std::string(category)) == _disabledCategories.end(); + } + + /** + * @brief Clear all disabled categories + * + * Re-enables all previously disabled categories. + */ + void clearDisabledCategories() { + std::unique_lock lock(_categoryMutex); + _disabledCategories.clear(); + } + + /** + * @brief Core logging function with pre-formatted message + * + * Source location captured automatically using C++20. + * + * @param level Message severity level + * @param category Subsystem/module category + * @param message The log message + * @param location Source location (auto-captured) + */ + void log(LogLevel level, std::string_view category, const std::string& message, + const std::source_location& location = std::source_location::current()) { + // Early exit if level is too low + if (level < _minLevel) return; + + // Early exit if category is disabled + if (!isCategoryEnabled(category)) return; + + // Create log entry + LogEntry entry(level, category, message, location); + + // Send to all sinks + writeToSinks(entry); + } + + /** + * @brief Core logging function with format string + * + * Uses std::format for type-safe message formatting. + * + * @tparam Args Format argument types (deduced automatically) + * @param level Message severity level + * @param category Subsystem/module category + * @param fmt Format string (std::format syntax) + * @param args Arguments to format + * @param location Source location (auto-captured) + * + * @code + * logger.log(LogLevel::Info, "Network", "Connected to {} on port {}", + * serverName, portNumber); + * @endcode + */ + template + void log(LogLevel level, std::string_view category, std::format_string fmt, Args&&... args, + const std::source_location& location = std::source_location::current()) { + // Early exit if level is too low + if (level < _minLevel) return; + + // Early exit if category is disabled + if (!isCategoryEnabled(category)) return; + + // Format the message + std::string message = std::format(fmt, std::forward(args)...); + + // Create log entry + LogEntry entry(level, category, std::move(message), location); + + // Send to all sinks + writeToSinks(entry); + } + + /** + * @brief Convenience methods for each log level + * + * Direct methods for each severity without specifying LogLevel. + * Supports both format strings and plain strings. + */ + template + void trace(std::string_view category, std::format_string fmt, Args&&... args, + const std::source_location& loc = std::source_location::current()) { + log(LogLevel::Trace, category, fmt, std::forward(args)..., loc); + } + + void trace(std::string_view category, const std::string& message, + const std::source_location& loc = std::source_location::current()) { + log(LogLevel::Trace, category, message, loc); + } + + template + void debug(std::string_view category, std::format_string fmt, Args&&... args, + const std::source_location& loc = std::source_location::current()) { + log(LogLevel::Debug, category, fmt, std::forward(args)..., loc); + } + + void debug(std::string_view category, const std::string& message, + const std::source_location& loc = std::source_location::current()) { + log(LogLevel::Debug, category, message, loc); + } + + template + void info(std::string_view category, std::format_string fmt, Args&&... args, + const std::source_location& loc = std::source_location::current()) { + log(LogLevel::Info, category, fmt, std::forward(args)..., loc); + } + + void info(std::string_view category, const std::string& message, + const std::source_location& loc = std::source_location::current()) { + log(LogLevel::Info, category, message, loc); + } + + template + void warning(std::string_view category, std::format_string fmt, Args&&... args, const std::source_location& loc = std::source_location::current()) { - log(LogLevel::Info, category, fmt, std::forward(args)..., loc); - } - - void info(std::string_view category, const std::string& message, + log(LogLevel::Warning, category, fmt, std::forward(args)..., loc); + } + + void warning(std::string_view category, const std::string& message, const std::source_location& loc = std::source_location::current()) { - log(LogLevel::Info, category, message, loc); - } - - template - void warning(std::string_view category, std::format_string fmt, Args&&... args, - const std::source_location& loc = std::source_location::current()) { - log(LogLevel::Warning, category, fmt, std::forward(args)..., loc); - } - - void warning(std::string_view category, const std::string& message, - const std::source_location& loc = std::source_location::current()) { - log(LogLevel::Warning, category, message, loc); - } - - template - void error(std::string_view category, std::format_string fmt, Args&&... args, - const std::source_location& loc = std::source_location::current()) { - log(LogLevel::Error, category, fmt, std::forward(args)..., loc); - } - - void error(std::string_view category, const std::string& message, - const std::source_location& loc = std::source_location::current()) { - log(LogLevel::Error, category, message, loc); - } - - template - void fatal(std::string_view category, std::format_string fmt, Args&&... args, - const std::source_location& loc = std::source_location::current()) { - log(LogLevel::Fatal, category, fmt, std::forward(args)..., loc); - // Fatal should flush all sinks - flush(); - } - - void fatal(std::string_view category, const std::string& message, - const std::source_location& loc = std::source_location::current()) { - log(LogLevel::Fatal, category, message, loc); - // Fatal should flush all sinks - flush(); - } - - /** - * @brief Flush all sinks - * - * Forces immediate write of buffered data. Called automatically after - * Fatal messages. - */ - void flush(); - - private: - void writeToSinks(const LogEntry& entry); - }; - -} // namespace Logging -} // namespace Core -} // namespace EntropyEngine + log(LogLevel::Warning, category, message, loc); + } + + template + void error(std::string_view category, std::format_string fmt, Args&&... args, + const std::source_location& loc = std::source_location::current()) { + log(LogLevel::Error, category, fmt, std::forward(args)..., loc); + } + + void error(std::string_view category, const std::string& message, + const std::source_location& loc = std::source_location::current()) { + log(LogLevel::Error, category, message, loc); + } + + template + void fatal(std::string_view category, std::format_string fmt, Args&&... args, + const std::source_location& loc = std::source_location::current()) { + log(LogLevel::Fatal, category, fmt, std::forward(args)..., loc); + // Fatal should flush all sinks + flush(); + } + + void fatal(std::string_view category, const std::string& message, + const std::source_location& loc = std::source_location::current()) { + log(LogLevel::Fatal, category, message, loc); + // Fatal should flush all sinks + flush(); + } + + /** + * @brief Flush all sinks + * + * Forces immediate write of buffered data. Called automatically after + * Fatal messages. + */ + void flush(); + +private: + void writeToSinks(const LogEntry& entry); +}; + +} // namespace Logging +} // namespace Core +} // namespace EntropyEngine /** * @brief Convenience macros for simplified logging - * + * * These macros provide the easiest way to add logging to your code. * They automatically: * - Use the global logger instance * - Capture source location information * - Use function name as category (non-CAT variants) * - Support both format strings and plain strings - * + * * Available variants: * - ENTROPY_LOG_XXX: Uses current function name as category * - ENTROPY_LOG_XXX_CAT: Allows explicit category specification - * + * * @code * void processData() { * ENTROPY_LOG_DEBUG("Starting processing"); - * + * * if (data.empty()) { * ENTROPY_LOG_WARNING("No data to process"); * return; * } - * + * * ENTROPY_LOG_INFO_CAT("DataProcessor", "Processing {} items", data.size()); * } * @endcode */ -#define ENTROPY_LOG_TRACE(fmt, ...) \ - ::EntropyEngine::Core::Logging::Logger::global().trace(__func__, fmt, ##__VA_ARGS__) +#define ENTROPY_LOG_TRACE(fmt, ...) ::EntropyEngine::Core::Logging::Logger::global().trace(__func__, fmt, ##__VA_ARGS__) -#define ENTROPY_LOG_DEBUG(fmt, ...) \ - ::EntropyEngine::Core::Logging::Logger::global().debug(__func__, fmt, ##__VA_ARGS__) +#define ENTROPY_LOG_DEBUG(fmt, ...) ::EntropyEngine::Core::Logging::Logger::global().debug(__func__, fmt, ##__VA_ARGS__) -#define ENTROPY_LOG_INFO(fmt, ...) \ - ::EntropyEngine::Core::Logging::Logger::global().info(__func__, fmt, ##__VA_ARGS__) +#define ENTROPY_LOG_INFO(fmt, ...) ::EntropyEngine::Core::Logging::Logger::global().info(__func__, fmt, ##__VA_ARGS__) #define ENTROPY_LOG_WARNING(fmt, ...) \ ::EntropyEngine::Core::Logging::Logger::global().warning(__func__, fmt, ##__VA_ARGS__) -#define ENTROPY_LOG_ERROR(fmt, ...) \ - ::EntropyEngine::Core::Logging::Logger::global().error(__func__, fmt, ##__VA_ARGS__) +#define ENTROPY_LOG_ERROR(fmt, ...) ::EntropyEngine::Core::Logging::Logger::global().error(__func__, fmt, ##__VA_ARGS__) -#define ENTROPY_LOG_FATAL(fmt, ...) \ - ::EntropyEngine::Core::Logging::Logger::global().fatal(__func__, fmt, ##__VA_ARGS__) +#define ENTROPY_LOG_FATAL(fmt, ...) ::EntropyEngine::Core::Logging::Logger::global().fatal(__func__, fmt, ##__VA_ARGS__) // Category-specific macros for explicit category specification #define ENTROPY_LOG_TRACE_CAT(category, fmt, ...) \ @@ -413,4 +410,3 @@ namespace Logging { #define ENTROPY_LOG_FATAL_CAT(category, fmt, ...) \ ::EntropyEngine::Core::Logging::Logger::global().fatal(category, fmt, ##__VA_ARGS__) - diff --git a/src/TypeSystem/GenericHandle.h b/src/TypeSystem/GenericHandle.h index 2b57b8f..63a0082 100644 --- a/src/TypeSystem/GenericHandle.h +++ b/src/TypeSystem/GenericHandle.h @@ -9,288 +9,293 @@ #pragma once -#include #include +#include #include #include -namespace EntropyEngine { - namespace Core { - namespace TypeSystem { - /** - * @class GenericHandle - * @brief Base class for type-safe handle implementations with generation-based validation. - * - * Provides a handle validation system that prevents use-after-free bugs - * through generation counting. Each handle contains an index and generation packed into - * a single 64-bit value, plus an optional owner reference. - * - * Focuses solely on handle representation and basic validation - does not - * dictate storage mechanisms. Storage classes (pools, arrays, maps, etc.) are responsible - * for implementing their own validation logic using the handle's index and generation. - * - * Features: - * - Handle validation with owner reference support - * - Generation-based validation to detect stale handles - * - Type safety through templated derived classes - * - Support for up to 4 billion objects with 4 billion generations each - * - Storage-agnostic design for maximum flexibility - * - * The handle is packed as: [32-bit generation][32-bit index] - * - Index 0 with generation 0 is reserved as invalid handle - * - Maximum index: 4,294,967,295 (0xFFFFFFFF) - * - Maximum generation: 4,294,967,295 (0xFFFFFFFF) - * - * Example usage with custom storage: - * @code - * class MyObjectManager { - * struct Slot { MyObject obj; uint32_t generation; bool occupied; }; - * std::vector slots; - * - * bool isHandleValid(const MyObjectHandle& handle) const { - * uint32_t index = handle.getIndex(); - * return index < slots.size() && - * slots[index].occupied && - * slots[index].generation == handle.getGeneration(); - * } - * }; - * @endcode - */ - template - class GenericHandle { - protected: - OwnerType* _owner = nullptr; - uint64_t _data; - - static constexpr uint64_t INDEX_MASK = 0xFFFFFFFF; - static constexpr uint64_t GENERATION_MASK = 0xFFFFFFFF00000000; - static constexpr uint32_t GENERATION_SHIFT = 32; - static constexpr uint64_t INVALID_HANDLE = 0; - - /** - * @brief Packs index and generation into a single 64-bit value - */ - static constexpr uint64_t pack(uint32_t index, uint32_t generation) { - return (static_cast(generation) << GENERATION_SHIFT) | index; - } - - public: - /** - * @brief Default constructor creates an invalid handle - */ - constexpr GenericHandle() : _owner(nullptr), _data(INVALID_HANDLE) {} - - /** - * @brief Constructs a handle with the specified owner, index and generation - * @param owner Pointer to the owning container - * @param index Object index in the container - * @param generation Generation counter for validation - */ - constexpr GenericHandle(OwnerType* owner, uint32_t index, uint32_t generation) - : _owner(owner), _data(pack(index, generation)) {} - - /** - * @brief Constructs a handle with the specified owner and raw ID (for non-generation use) - * @param owner Pointer to the owning container - * @param id Raw 64-bit identifier - */ - constexpr GenericHandle(OwnerType* owner, uint64_t id) - : _owner(owner), _data(id) {} - - /** - * @brief Gets the index component of this handle - * @return The 32-bit index value - */ - constexpr uint32_t getIndex() const { - return static_cast(_data & INDEX_MASK); - } - - /** - * @brief Gets the generation component of this handle - * @return The 32-bit generation value - */ - constexpr uint32_t getGeneration() const { - return static_cast(_data >> GENERATION_SHIFT); - } - - /** - * @brief Checks if this handle is potentially valid - * @return true if the handle is not the invalid handle value and owner validation passes - * @note For handles without owners, this only checks if the handle is non-null - */ - constexpr bool isValid() const { - if constexpr (std::is_same_v) { - return _data != INVALID_HANDLE; - } else { - return _owner && _data != INVALID_HANDLE; - } - } - - /** - * @brief Invalidates this handle - */ - void invalidate() { - _data = INVALID_HANDLE; - } - - /** - * @brief Gets the raw packed data - * @return The 64-bit packed handle data - */ - constexpr uint64_t getRawData() const { - return _data; - } - - /** - * @brief Gets the raw data as a 64-bit ID (for non-generation use cases) - * @return The 64-bit ID value - */ - constexpr uint64_t getId() const { - return _data; - } - - /** - * @brief Gets the owner of this handle - * @return Pointer to the owning container - */ - constexpr OwnerType* getOwner() const { - return _owner; - } - - /** - * @brief Equality comparison - */ - constexpr bool operator==(const GenericHandle& other) const { - return _owner == other._owner && _data == other._data; - } - - /** - * @brief Inequality comparison - */ - constexpr bool operator!=(const GenericHandle& other) const { - return !(*this == other); - } - - /** - * @brief Less-than comparison for use in sorted containers - */ - constexpr bool operator<(const GenericHandle& other) const { - if (_owner != other._owner) { - return _owner < other._owner; - } - return _data < other._data; - } - - /** - * @brief Hash support for use in unordered containers - */ - struct Hash { - size_t operator()(const GenericHandle& handle) const { - auto h1 = std::hash{}(handle._owner); - auto h2 = std::hash{}(handle._data); - return h1 ^ (h2 << 1); - } - }; - }; - - /** - * @class TypedHandle - * @brief Type-safe handle template that derives from GenericHandle - * @tparam T The tag type this handle represents (usually an empty struct) - * @tparam OwnerType The type of the owning container (optional) - * - * This template provides type safety on top of GenericHandle, preventing - * accidental mixing of handles to different types of objects. The template - * parameter T is typically a tag struct that exists solely for type safety. - * - * The handle is storage-agnostic - it only provides the index/generation - * validation mechanism. Storage classes implement their own validation - * logic using the handle's getIndex() and getGeneration() methods. - * - * Example usage: - * @code - * struct EntityTag {}; - * struct ComponentTag {}; - * - * using EntityHandle = TypedHandle; - * using ComponentHandle = TypedHandle; - * - * EntityManager entityMgr; - * EntityHandle entity(&entityMgr, 5, 1); - * ComponentHandle component(&componentMgr, 5, 1); - * // entity == component would be a compile error (different types) - * - * // Usage with custom storage: - * class EntityManager { - * bool isHandleValid(const EntityHandle& handle) const { - * return validateWithGeneration(handle.getIndex(), handle.getGeneration()); - * } - * }; - * @endcode - */ - template - class TypedHandle : public GenericHandle { - public: - using Type = T; - - /** - * @brief Default constructor creates an invalid handle - */ - constexpr TypedHandle() : GenericHandle() {} - - /** - * @brief Constructs a typed handle with the specified owner, index and generation - */ - constexpr TypedHandle(OwnerType* owner, uint32_t index, uint32_t generation) - : GenericHandle(owner, index, generation) {} - - /** - * @brief Constructs a typed handle with the specified owner and raw ID - */ - constexpr TypedHandle(OwnerType* owner, uint64_t id) - : GenericHandle(owner, id) {} - - /** - * @brief Constructs from a generic handle (explicit to prevent accidental conversion) - */ - explicit constexpr TypedHandle(const GenericHandle& generic) - : GenericHandle(generic) {} - - /** - * @brief Creates an invalid handle of this type - */ - static constexpr TypedHandle invalid() { - return TypedHandle(); - } - - /** - * @brief Gets a debug identifier for this handle (for logging/debugging) - * @return Unique identifier combining index and generation - */ - uint64_t getDebugId() const { - return GenericHandle::getRawData(); - } - - /** - * @brief Hash support for use in unordered containers - */ - struct Hash { - size_t operator()(const TypedHandle& handle) const { - return typename GenericHandle::Hash{}(handle); - } - }; - }; +namespace EntropyEngine +{ +namespace Core +{ +namespace TypeSystem +{ +/** + * @class GenericHandle + * @brief Base class for type-safe handle implementations with generation-based validation. + * + * Provides a handle validation system that prevents use-after-free bugs + * through generation counting. Each handle contains an index and generation packed into + * a single 64-bit value, plus an optional owner reference. + * + * Focuses solely on handle representation and basic validation - does not + * dictate storage mechanisms. Storage classes (pools, arrays, maps, etc.) are responsible + * for implementing their own validation logic using the handle's index and generation. + * + * Features: + * - Handle validation with owner reference support + * - Generation-based validation to detect stale handles + * - Type safety through templated derived classes + * - Support for up to 4 billion objects with 4 billion generations each + * - Storage-agnostic design for maximum flexibility + * + * The handle is packed as: [32-bit generation][32-bit index] + * - Index 0 with generation 0 is reserved as invalid handle + * - Maximum index: 4,294,967,295 (0xFFFFFFFF) + * - Maximum generation: 4,294,967,295 (0xFFFFFFFF) + * + * Example usage with custom storage: + * @code + * class MyObjectManager { + * struct Slot { MyObject obj; uint32_t generation; bool occupied; }; + * std::vector slots; + * + * bool isHandleValid(const MyObjectHandle& handle) const { + * uint32_t index = handle.getIndex(); + * return index < slots.size() && + * slots[index].occupied && + * slots[index].generation == handle.getGeneration(); + * } + * }; + * @endcode + */ +template +class GenericHandle +{ +protected: + OwnerType* _owner = nullptr; + uint64_t _data; + + static constexpr uint64_t INDEX_MASK = 0xFFFFFFFF; + static constexpr uint64_t GENERATION_MASK = 0xFFFFFFFF00000000; + static constexpr uint32_t GENERATION_SHIFT = 32; + static constexpr uint64_t INVALID_HANDLE = 0; + + /** + * @brief Packs index and generation into a single 64-bit value + */ + static constexpr uint64_t pack(uint32_t index, uint32_t generation) { + return (static_cast(generation) << GENERATION_SHIFT) | index; + } + +public: + /** + * @brief Default constructor creates an invalid handle + */ + constexpr GenericHandle() : _owner(nullptr), _data(INVALID_HANDLE) {} + + /** + * @brief Constructs a handle with the specified owner, index and generation + * @param owner Pointer to the owning container + * @param index Object index in the container + * @param generation Generation counter for validation + */ + constexpr GenericHandle(OwnerType* owner, uint32_t index, uint32_t generation) + : _owner(owner), _data(pack(index, generation)) {} + + /** + * @brief Constructs a handle with the specified owner and raw ID (for non-generation use) + * @param owner Pointer to the owning container + * @param id Raw 64-bit identifier + */ + constexpr GenericHandle(OwnerType* owner, uint64_t id) : _owner(owner), _data(id) {} + + /** + * @brief Gets the index component of this handle + * @return The 32-bit index value + */ + constexpr uint32_t getIndex() const { + return static_cast(_data & INDEX_MASK); + } + + /** + * @brief Gets the generation component of this handle + * @return The 32-bit generation value + */ + constexpr uint32_t getGeneration() const { + return static_cast(_data >> GENERATION_SHIFT); + } + + /** + * @brief Checks if this handle is potentially valid + * @return true if the handle is not the invalid handle value and owner validation passes + * @note For handles without owners, this only checks if the handle is non-null + */ + constexpr bool isValid() const { + if constexpr (std::is_same_v) { + return _data != INVALID_HANDLE; + } else { + return _owner && _data != INVALID_HANDLE; } } -} -// Add standard library hash support for TypedHandle -namespace std { - template - struct hash> { - size_t operator()(const EntropyEngine::Core::TypeSystem::TypedHandle& handle) const { - return typename EntropyEngine::Core::TypeSystem::TypedHandle::Hash{}(handle); + /** + * @brief Invalidates this handle + */ + void invalidate() { + _data = INVALID_HANDLE; + } + + /** + * @brief Gets the raw packed data + * @return The 64-bit packed handle data + */ + constexpr uint64_t getRawData() const { + return _data; + } + + /** + * @brief Gets the raw data as a 64-bit ID (for non-generation use cases) + * @return The 64-bit ID value + */ + constexpr uint64_t getId() const { + return _data; + } + + /** + * @brief Gets the owner of this handle + * @return Pointer to the owning container + */ + constexpr OwnerType* getOwner() const { + return _owner; + } + + /** + * @brief Equality comparison + */ + constexpr bool operator==(const GenericHandle& other) const { + return _owner == other._owner && _data == other._data; + } + + /** + * @brief Inequality comparison + */ + constexpr bool operator!=(const GenericHandle& other) const { + return !(*this == other); + } + + /** + * @brief Less-than comparison for use in sorted containers + */ + constexpr bool operator<(const GenericHandle& other) const { + if (_owner != other._owner) { + return _owner < other._owner; + } + return _data < other._data; + } + + /** + * @brief Hash support for use in unordered containers + */ + struct Hash + { + size_t operator()(const GenericHandle& handle) const { + auto h1 = std::hash{}(handle._owner); + auto h2 = std::hash{}(handle._data); + return h1 ^ (h2 << 1); + } + }; +}; + +/** + * @class TypedHandle + * @brief Type-safe handle template that derives from GenericHandle + * @tparam T The tag type this handle represents (usually an empty struct) + * @tparam OwnerType The type of the owning container (optional) + * + * This template provides type safety on top of GenericHandle, preventing + * accidental mixing of handles to different types of objects. The template + * parameter T is typically a tag struct that exists solely for type safety. + * + * The handle is storage-agnostic - it only provides the index/generation + * validation mechanism. Storage classes implement their own validation + * logic using the handle's getIndex() and getGeneration() methods. + * + * Example usage: + * @code + * struct EntityTag {}; + * struct ComponentTag {}; + * + * using EntityHandle = TypedHandle; + * using ComponentHandle = TypedHandle; + * + * EntityManager entityMgr; + * EntityHandle entity(&entityMgr, 5, 1); + * ComponentHandle component(&componentMgr, 5, 1); + * // entity == component would be a compile error (different types) + * + * // Usage with custom storage: + * class EntityManager { + * bool isHandleValid(const EntityHandle& handle) const { + * return validateWithGeneration(handle.getIndex(), handle.getGeneration()); + * } + * }; + * @endcode + */ +template +class TypedHandle : public GenericHandle +{ +public: + using Type = T; + + /** + * @brief Default constructor creates an invalid handle + */ + constexpr TypedHandle() : GenericHandle() {} + + /** + * @brief Constructs a typed handle with the specified owner, index and generation + */ + constexpr TypedHandle(OwnerType* owner, uint32_t index, uint32_t generation) + : GenericHandle(owner, index, generation) {} + + /** + * @brief Constructs a typed handle with the specified owner and raw ID + */ + constexpr TypedHandle(OwnerType* owner, uint64_t id) : GenericHandle(owner, id) {} + + /** + * @brief Constructs from a generic handle (explicit to prevent accidental conversion) + */ + explicit constexpr TypedHandle(const GenericHandle& generic) : GenericHandle(generic) {} + + /** + * @brief Creates an invalid handle of this type + */ + static constexpr TypedHandle invalid() { + return TypedHandle(); + } + + /** + * @brief Gets a debug identifier for this handle (for logging/debugging) + * @return Unique identifier combining index and generation + */ + uint64_t getDebugId() const { + return GenericHandle::getRawData(); + } + + /** + * @brief Hash support for use in unordered containers + */ + struct Hash + { + size_t operator()(const TypedHandle& handle) const { + return typename GenericHandle::Hash{}(handle); } }; -} +}; +} // namespace TypeSystem +} // namespace Core +} // namespace EntropyEngine +// Add standard library hash support for TypedHandle +namespace std +{ +template +struct hash> +{ + size_t operator()(const EntropyEngine::Core::TypeSystem::TypedHandle& handle) const { + return typename EntropyEngine::Core::TypeSystem::TypedHandle::Hash{}(handle); + } +}; +} // namespace std diff --git a/src/TypeSystem/Reflection.h b/src/TypeSystem/Reflection.h index dccddd0..17de3ac 100644 --- a/src/TypeSystem/Reflection.h +++ b/src/TypeSystem/Reflection.h @@ -1,11 +1,11 @@ /** * @file Reflection.h * @brief Compile-time reflection system for Entropy Engine - * + * * This header provides a comprehensive reflection system that generates type information * at compile-time while maintaining runtime API compatibility. The system supports * field introspection, type metadata, and safe field value access. - * + * * Key Features: * - Compile-time type registration via macros * - Runtime field introspection and value access @@ -24,16 +24,16 @@ * class MyClass { * public: * ENTROPY_REGISTER_TYPE(MyClass); - * + * * ENTROPY_FIELD(int, value); * ENTROPY_FIELD(double, ratio) = 0.5d; * ENTROPY_FIELD(std::string, name); * }; - * + * * // Access reflection information * const auto* typeInfo = TypeInfo::get(); * const auto& fields = typeInfo->getFields(); - * + * * MyClass instance; * for (const auto& field : fields) { * if (field.name == "value") { @@ -48,392 +48,415 @@ #pragma once -#include "TypeID.h" -#include -#include -#include +#include // For std::reverse +#include +#include #include #include -#include #include -#include // For std::reverse -#include +#include +#include +#include -namespace EntropyEngine { - namespace Core { - namespace TypeSystem { +#include "TypeID.h" - /** - * @brief Information about a reflected field in a type - * - * Contains metadata to access a field: name, type, and memory offset. - * Created automatically by ENTROPY_FIELD macro. - */ - struct FieldInfo { - /** - * @brief The name of the field as a string view - */ - std::string_view name; - - /** - * @brief Type identification for the field - */ - TypeID type; - - /** - * @brief Memory offset of the field within the containing object - */ - size_t offset; - }; +namespace EntropyEngine +{ +namespace Core +{ +namespace TypeSystem +{ - /** - * @brief Internal implementation details for the reflection system - */ - namespace detail { - /** - * @brief Placeholder for future constexpr field infrastructure - * - * This space is reserved for compile-time field accumulation - * systems. - */ - // Future: constexpr field infrastructure will go here +/** + * @brief Information about a reflected field in a type + * + * Contains metadata to access a field: name, type, and memory offset. + * Created automatically by ENTROPY_FIELD macro. + */ +struct FieldInfo +{ + /** + * @brief The name of the field as a string view + */ + std::string_view name; - /** - * @brief Node in the linked list of field information - * - * Used during static initialization to collect field metadata - * registered via ENTROPY_FIELD macros. Forms a singly-linked - * list that gets converted to a vector at runtime. - * - * @note This is an implementation detail and may change in future versions - */ - struct FieldInfoNode { - const FieldInfo data; ///< Field metadata - FieldInfoNode* next = nullptr; ///< Pointer to next node in list - }; + /** + * @brief Type identification for the field + */ + TypeID type; - /** - * @brief Head pointer for the field registration linked list - * @tparam T The type whose fields are being registered - * - * Each type T gets its own static linked list head for collecting - * field information during static initialization. - */ - template - struct FieldListHead { - inline static FieldInfoNode* head = nullptr; - }; - } // namespace detail + /** + * @brief Memory offset of the field within the containing object + */ + size_t offset; +}; - /** - * @brief Compile-time type information collector - * @tparam T The type to collect information for - * - * This template provides access to type metadata by combining - * compile-time type name resolution with runtime field collection. - * Types using ENTROPY_REGISTER_TYPE will have their information - * generated at compile time where possible. - * - * @note This is an internal helper structure used by TypeInfo - */ - template - struct compile_time_type_info { - /** - * @brief Get the TypeID for this type - * @return TypeID object uniquely identifying type T - */ - static TypeID get_type_id() { return createTypeId(); } - - /** - * @brief Compile-time type name from ENTROPY_REGISTER_TYPE - */ - static constexpr std::string_view name = T::getStaticTypeName(); - - /** - * @brief Collect field information from the registration system - * @return Vector of FieldInfo objects for all registered fields - * - * Traverses linked list and reverses to match declaration order. - * O(n), called once per type. - */ - static std::vector get_fields() { - std::vector fields; - for (auto* node = detail::FieldListHead::head; node != nullptr; node = node->next) { - fields.push_back(node->data); - } - std::reverse(fields.begin(), fields.end()); - return fields; - } - }; +/** + * @brief Internal implementation details for the reflection system + */ +namespace detail +{ +/** + * @brief Placeholder for future constexpr field infrastructure + * + * This space is reserved for compile-time field accumulation + * systems. + */ +// Future: constexpr field infrastructure will go here - /** - * @brief Runtime type information container with field introspection - * - * Primary interface for runtime reflection. Features static instance - * caching, type-safe field access, and automatic offset calculation. - */ - class TypeInfo { - private: - TypeID m_id; ///< Unique type identifier - std::string_view m_name; ///< Human-readable type name - std::vector m_fields; ///< List of reflected fields +/** + * @brief Node in the linked list of field information + * + * Used during static initialization to collect field metadata + * registered via ENTROPY_FIELD macros. Forms a singly-linked + * list that gets converted to a vector at runtime. + * + * @note This is an implementation detail and may change in future versions + */ +struct FieldInfoNode +{ + const FieldInfo data; ///< Field metadata + FieldInfoNode* next = nullptr; ///< Pointer to next node in list +}; - // Legacy runtime registration (for backward compatibility during transition) - inline static std::map> s_registry; - inline static std::map()>> s_factories; +/** + * @brief Head pointer for the field registration linked list + * @tparam T The type whose fields are being registered + * + * Each type T gets its own static linked list head for collecting + * field information during static initialization. + */ +template +struct FieldListHead +{ + inline static FieldInfoNode* head = nullptr; +}; +} // namespace detail - /** - * @brief Private constructor for TypeInfo instances - */ - TypeInfo(TypeID id, std::string_view name, std::vector&& fields) - : m_id(id), m_name(name), m_fields(std::move(fields)) {} +/** + * @brief Compile-time type information collector + * @tparam T The type to collect information for + * + * This template provides access to type metadata by combining + * compile-time type name resolution with runtime field collection. + * Types using ENTROPY_REGISTER_TYPE will have their information + * generated at compile time where possible. + * + * @note This is an internal helper structure used by TypeInfo + */ +template +struct compile_time_type_info +{ + /** + * @brief Get the TypeID for this type + * @return TypeID object uniquely identifying type T + */ + static TypeID get_type_id() { + return createTypeId(); + } - /** - * @brief Create TypeInfo from compile-time type information - * @tparam T The type to create TypeInfo for - * @return Constructed TypeInfo instance - */ - template - static TypeInfo create_from_constexpr() { - return TypeInfo{compile_time_type_info::get_type_id(), - compile_time_type_info::name, - compile_time_type_info::get_fields()}; - } + /** + * @brief Compile-time type name from ENTROPY_REGISTER_TYPE + */ + static constexpr std::string_view name = T::getStaticTypeName(); - public: - /** - * @brief Get the unique type identifier - * @return TypeID for this type - */ - TypeID getID() const { return m_id; } - - /** - * @brief Get the human-readable type name - * @return Type name as string_view (zero allocation) - */ - std::string_view getName() const { return m_name; } - - /** - * @brief Get all reflected fields for this type - * @return Const reference to vector of FieldInfo objects - * - * Fields are returned in declaration order. The vector is - * constructed once per type and cached for efficiency. - */ - const std::vector& getFields() const { return m_fields; } + /** + * @brief Collect field information from the registration system + * @return Vector of FieldInfo objects for all registered fields + * + * Traverses linked list and reverses to match declaration order. + * O(n), called once per type. + */ + static std::vector get_fields() { + std::vector fields; + for (auto* node = detail::FieldListHead::head; node != nullptr; node = node->next) { + fields.push_back(node->data); + } + std::reverse(fields.begin(), fields.end()); + return fields; + } +}; - /** - * @brief Get TypeInfo for a specific type T - * @tparam T The type to get reflection information for - * @return Pointer to TypeInfo instance, or nullptr if not registered - * - * This is the primary entry point for accessing type reflection information. - * For types registered with ENTROPY_REGISTER_TYPE, this function uses a - * compile-time path with static instance caching. Legacy types - * fall back to the runtime registration system. - * - * @code - * // Get reflection info for a registered type - * const auto* info = TypeInfo::get(); - * if (info) { - * std::cout << "Type: " << info->getName() << std::endl; - * std::cout << "Fields: " << info->getFields().size() << std::endl; - * } - * @endcode - */ - template - static const TypeInfo* get() { - // Use compile-time approach for types with getStaticTypeName - if constexpr (requires { T::getStaticTypeName(); }) { - static const TypeInfo instance = create_from_constexpr(); - return &instance; - } - - // Fallback to legacy runtime registration for old types - const auto typeId = createTypeId(); - if (const auto it = s_registry.find(typeId); it != s_registry.end()) { - return it->second.get(); - } +/** + * @brief Runtime type information container with field introspection + * + * Primary interface for runtime reflection. Features static instance + * caching, type-safe field access, and automatic offset calculation. + */ +class TypeInfo +{ +private: + TypeID m_id; ///< Unique type identifier + std::string_view m_name; ///< Human-readable type name + std::vector m_fields; ///< List of reflected fields - if (const auto it = s_factories.find(typeId); it != s_factories.end()) { - std::unique_ptr newInstance = it->second(); - const TypeInfo* ptr = newInstance.get(); - s_registry[typeId] = std::move(newInstance); - return ptr; - } - return nullptr; - } + // Legacy runtime registration (for backward compatibility during transition) + inline static std::map> s_registry; + inline static std::map()>> s_factories; - /** - * @brief Safely retrieve a field value from an object instance - * @tparam T The expected type of the field value - * @param obj Pointer to the object instance (must not be null) - * @param field FieldInfo describing the field to access - * @return Optional containing the field value, or nullopt if type mismatch - * - * This function provides type-safe access to field values using the - * field offset information. Type safety is enforced by comparing the - * requested type T with the field's registered type. - * - * Safety Features: - * - Type validation prevents incorrect casts - * - Uses std::optional to handle type mismatches gracefully - * - Direct memory access - * - * @warning The object pointer must be valid and point to an instance - * of the type that owns the field. No bounds checking is performed. - * - * @code - * MyClass instance; - * const auto* typeInfo = TypeInfo::get(); - * - * for (const auto& field : typeInfo->getFields()) { - * if (field.name == "myIntField") { - * auto value = TypeInfo::get_field_value(&instance, field); - * if (value) { - * std::cout << "Field value: " << *value << std::endl; - * } else { - * std::cout << "Type mismatch!" << std::endl; - * } - * } - * } - * @endcode - */ - template - static std::optional get_field_value(const void* obj, const FieldInfo& field) { - if (createTypeId() != field.type) { - return std::nullopt; - } - const char* obj_bytes = static_cast(obj); - return *reinterpret_cast(obj_bytes + field.offset); - } + /** + * @brief Private constructor for TypeInfo instances + */ + TypeInfo(TypeID id, std::string_view name, std::vector&& fields) + : m_id(id), m_name(name), m_fields(std::move(fields)) {} - template - friend struct _EntropyTypeRegistrar; - }; + /** + * @brief Create TypeInfo from compile-time type information + * @tparam T The type to create TypeInfo for + * @return Constructed TypeInfo instance + */ + template + static TypeInfo create_from_constexpr() { + return TypeInfo{compile_time_type_info::get_type_id(), compile_time_type_info::name, + compile_time_type_info::get_fields()}; + } + +public: + /** + * @brief Get the unique type identifier + * @return TypeID for this type + */ + TypeID getID() const { + return m_id; + } + + /** + * @brief Get the human-readable type name + * @return Type name as string_view (zero allocation) + */ + std::string_view getName() const { + return m_name; + } + + /** + * @brief Get all reflected fields for this type + * @return Const reference to vector of FieldInfo objects + * + * Fields are returned in declaration order. The vector is + * constructed once per type and cached for efficiency. + */ + const std::vector& getFields() const { + return m_fields; + } + + /** + * @brief Get TypeInfo for a specific type T + * @tparam T The type to get reflection information for + * @return Pointer to TypeInfo instance, or nullptr if not registered + * + * This is the primary entry point for accessing type reflection information. + * For types registered with ENTROPY_REGISTER_TYPE, this function uses a + * compile-time path with static instance caching. Legacy types + * fall back to the runtime registration system. + * + * @code + * // Get reflection info for a registered type + * const auto* info = TypeInfo::get(); + * if (info) { + * std::cout << "Type: " << info->getName() << std::endl; + * std::cout << "Fields: " << info->getFields().size() << std::endl; + * } + * @endcode + */ + template + static const TypeInfo* get() { + // Use compile-time approach for types with getStaticTypeName + if constexpr (requires { T::getStaticTypeName(); }) { + static const TypeInfo instance = create_from_constexpr(); + return &instance; + } - /** - * @brief Legacy type registrar for backward compatibility - * @tparam T The type to register using the old runtime system - * - * Fallback for types without compile-time support. Will be deprecated. - */ - template - struct _EntropyTypeRegistrar { - _EntropyTypeRegistrar() { - // Only register if type doesn't support the new compile-time approach - if constexpr (!requires { T::getStaticTypeName(); }) { - auto typeId = createTypeId(); - TypeInfo::s_factories[typeId] = [typeId]() { - std::vector fields; - for (auto* node = detail::FieldListHead::head; node != nullptr; node = node->next) { - fields.push_back(node->data); - } - std::reverse(fields.begin(), fields.end()); - return std::unique_ptr(new TypeInfo(typeId, "Unknown", std::move(fields))); - }; - } + // Fallback to legacy runtime registration for old types + const auto typeId = createTypeId(); + if (const auto it = s_registry.find(typeId); it != s_registry.end()) { + return it->second.get(); + } + + if (const auto it = s_factories.find(typeId); it != s_factories.end()) { + std::unique_ptr newInstance = it->second(); + const TypeInfo* ptr = newInstance.get(); + s_registry[typeId] = std::move(newInstance); + return ptr; + } + return nullptr; + } + + /** + * @brief Safely retrieve a field value from an object instance + * @tparam T The expected type of the field value + * @param obj Pointer to the object instance (must not be null) + * @param field FieldInfo describing the field to access + * @return Optional containing the field value, or nullopt if type mismatch + * + * This function provides type-safe access to field values using the + * field offset information. Type safety is enforced by comparing the + * requested type T with the field's registered type. + * + * Safety Features: + * - Type validation prevents incorrect casts + * - Uses std::optional to handle type mismatches gracefully + * - Direct memory access + * + * @warning The object pointer must be valid and point to an instance + * of the type that owns the field. No bounds checking is performed. + * + * @code + * MyClass instance; + * const auto* typeInfo = TypeInfo::get(); + * + * for (const auto& field : typeInfo->getFields()) { + * if (field.name == "myIntField") { + * auto value = TypeInfo::get_field_value(&instance, field); + * if (value) { + * std::cout << "Field value: " << *value << std::endl; + * } else { + * std::cout << "Type mismatch!" << std::endl; + * } + * } + * } + * @endcode + */ + template + static std::optional get_field_value(const void* obj, const FieldInfo& field) { + if (createTypeId() != field.type) { + return std::nullopt; + } + const char* obj_bytes = static_cast(obj); + return *reinterpret_cast(obj_bytes + field.offset); + } + + template + friend struct _EntropyTypeRegistrar; +}; + +/** + * @brief Legacy type registrar for backward compatibility + * @tparam T The type to register using the old runtime system + * + * Fallback for types without compile-time support. Will be deprecated. + */ +template +struct _EntropyTypeRegistrar +{ + _EntropyTypeRegistrar() { + // Only register if type doesn't support the new compile-time approach + if constexpr (!requires { T::getStaticTypeName(); }) { + auto typeId = createTypeId(); + TypeInfo::s_factories[typeId] = [typeId]() { + std::vector fields; + for (auto* node = detail::FieldListHead::head; node != nullptr; node = node->next) { + fields.push_back(node->data); } + std::reverse(fields.begin(), fields.end()); + return std::unique_ptr(new TypeInfo(typeId, "Unknown", std::move(fields))); }; + } + } +}; - /** - * @def ENTROPY_REGISTER_TYPE(TypeName) - * @brief Register a type for compile-time reflection - * @param TypeName The name of the type to register (unquoted) - * - * This macro must be placed in the public section of any class that - * wants to participate in the reflection system. It provides: - * - Compile-time type name access via getStaticTypeName() - * - Instance method type() for getting TypeID - * - Automatic integration with the TypeInfo system - * - * Features: - * - Compile-time type name access - * - Automatic TypeInfo generation with static caching - * - Full namespace qualification to avoid naming conflicts - * - * Usage: - * @code - * class MyClass { - * public: - * ENTROPY_REGISTER_TYPE(MyClass); - * - * // Your class members here... - * }; - * @endcode - * - * Generated members: - * - `static constexpr std::string_view getStaticTypeName()` - * - `TypeID type() const` - * - `using OwnerType = TypeName` (private, for field registration) - */ - #define ENTROPY_REGISTER_TYPE(TypeName) \ - private: \ - using OwnerType = TypeName; \ - public: \ - static constexpr std::string_view getStaticTypeName() { return #TypeName; } \ - ::EntropyEngine::Core::TypeSystem::TypeID type() const { return ::EntropyEngine::Core::TypeSystem::createTypeId(); } - - /** - * @def ENTROPY_FIELD(Type, Name) - * @brief Register a field for runtime reflection - * @param Type The type of the field (fully qualified if needed) - * @param Name The name of the field (unquoted) - * - * This macro must be placed where you would normally declare a class member. - * It simultaneously declares the member variable and registers it for reflection. - * The macro must be used within a class that has ENTROPY_REGISTER_TYPE. - * - * Features: - * - Automatic field offset calculation using offsetof() - * - Type-safe field registration with full type information - * - Integration with the linked list collection system - * - Maintains declaration order in reflection metadata - * - * Requirements: - * - Must be used in a class with ENTROPY_REGISTER_TYPE - * - Field names must be valid C++ identifiers - * - Types must be complete at the point of registration - * - * Usage: - * @code - * class MyClass { - * public: - * ENTROPY_REGISTER_TYPE(MyClass); - * - * ENTROPY_FIELD(int, health); - * ENTROPY_FIELD(std::string, name); - * ENTROPY_FIELD(glm::vec3, position); - * - * private: - * ENTROPY_FIELD(bool, isActive); // Private fields also supported - * }; - * @endcode - * - * Generated components: - * - The actual member variable: `Type Name` - * - Static registration helper structures (private) - * - Automatic insertion into the field linked list - * - * @warning Field types containing commas (like `std::map`) may require - * careful handling or typedef declarations - */ - #define ENTROPY_FIELD(Type, Name) \ - private: \ - struct _EntropyFieldRegistrar_##Name { \ - _EntropyFieldRegistrar_##Name() { \ - static ::EntropyEngine::Core::TypeSystem::detail::FieldInfoNode node { \ - { #Name, ::EntropyEngine::Core::TypeSystem::createTypeId(), offsetof(OwnerType, Name) } \ - }; \ - node.next = ::EntropyEngine::Core::TypeSystem::detail::FieldListHead::head; \ - ::EntropyEngine::Core::TypeSystem::detail::FieldListHead::head = &node; \ - } \ - }; \ - inline static _EntropyFieldRegistrar_##Name _entropy_field_registrar_##Name; \ - public: \ - Type Name +/** + * @def ENTROPY_REGISTER_TYPE(TypeName) + * @brief Register a type for compile-time reflection + * @param TypeName The name of the type to register (unquoted) + * + * This macro must be placed in the public section of any class that + * wants to participate in the reflection system. It provides: + * - Compile-time type name access via getStaticTypeName() + * - Instance method type() for getting TypeID + * - Automatic integration with the TypeInfo system + * + * Features: + * - Compile-time type name access + * - Automatic TypeInfo generation with static caching + * - Full namespace qualification to avoid naming conflicts + * + * Usage: + * @code + * class MyClass { + * public: + * ENTROPY_REGISTER_TYPE(MyClass); + * + * // Your class members here... + * }; + * @endcode + * + * Generated members: + * - `static constexpr std::string_view getStaticTypeName()` + * - `TypeID type() const` + * - `using OwnerType = TypeName` (private, for field registration) + */ +#define ENTROPY_REGISTER_TYPE(TypeName) \ +private: \ + using OwnerType = TypeName; \ + \ +public: \ + static constexpr std::string_view getStaticTypeName() { \ + return #TypeName; \ + } \ + ::EntropyEngine::Core::TypeSystem::TypeID type() const { \ + return ::EntropyEngine::Core::TypeSystem::createTypeId(); \ + } - } // namespace TypeSystem - } // namespace Core -} // namespace EntropyEngine +/** + * @def ENTROPY_FIELD(Type, Name) + * @brief Register a field for runtime reflection + * @param Type The type of the field (fully qualified if needed) + * @param Name The name of the field (unquoted) + * + * This macro must be placed where you would normally declare a class member. + * It simultaneously declares the member variable and registers it for reflection. + * The macro must be used within a class that has ENTROPY_REGISTER_TYPE. + * + * Features: + * - Automatic field offset calculation using offsetof() + * - Type-safe field registration with full type information + * - Integration with the linked list collection system + * - Maintains declaration order in reflection metadata + * + * Requirements: + * - Must be used in a class with ENTROPY_REGISTER_TYPE + * - Field names must be valid C++ identifiers + * - Types must be complete at the point of registration + * + * Usage: + * @code + * class MyClass { + * public: + * ENTROPY_REGISTER_TYPE(MyClass); + * + * ENTROPY_FIELD(int, health); + * ENTROPY_FIELD(std::string, name); + * ENTROPY_FIELD(glm::vec3, position); + * + * private: + * ENTROPY_FIELD(bool, isActive); // Private fields also supported + * }; + * @endcode + * + * Generated components: + * - The actual member variable: `Type Name` + * - Static registration helper structures (private) + * - Automatic insertion into the field linked list + * + * @warning Field types containing commas (like `std::map`) may require + * careful handling or typedef declarations + */ +#define ENTROPY_FIELD(Type, Name) \ +private: \ + struct _EntropyFieldRegistrar_##Name \ + { \ + _EntropyFieldRegistrar_##Name() { \ + static ::EntropyEngine::Core::TypeSystem::detail::FieldInfoNode node{ \ + {#Name, ::EntropyEngine::Core::TypeSystem::createTypeId(), offsetof(OwnerType, Name)}}; \ + node.next = ::EntropyEngine::Core::TypeSystem::detail::FieldListHead::head; \ + ::EntropyEngine::Core::TypeSystem::detail::FieldListHead::head = &node; \ + } \ + }; \ + inline static _EntropyFieldRegistrar_##Name _entropy_field_registrar_##Name; \ + \ +public: \ + Type Name +} // namespace TypeSystem +} // namespace Core +} // namespace EntropyEngine diff --git a/src/TypeSystem/TypeID.h b/src/TypeSystem/TypeID.h index 21510c8..48ecdf9 100644 --- a/src/TypeSystem/TypeID.h +++ b/src/TypeSystem/TypeID.h @@ -1,7 +1,7 @@ /** * @file TypeID.h * @brief Cross-platform type identification system for Entropy Engine - * + * * This header provides a stable, hashable type identification system using boost::type_index. * TypeID objects can be used for type comparison, hashing, and runtime type identification * while maintaining consistent behavior across different platforms and compilers. @@ -10,10 +10,10 @@ #pragma once #include -#include #include -#include #include +#include +#include #ifndef ENTROPY_ENABLE_RTTI #define ENTROPY_ENABLE_RTTI 0 @@ -23,181 +23,190 @@ #define ENTROPY_TYPEID_INCLUDE_NAME 0 #endif -namespace EntropyEngine { - namespace Core { - namespace TypeSystem { +namespace EntropyEngine +{ +namespace Core +{ +namespace TypeSystem +{ - /** - * @brief A cross-platform type identifier with stable hashing and comparison - * - * TypeID provides a consistent way to identify types at runtime across different - * platforms and compilers. It uses boost::type_index internally to generate - * stable hash codes and human-readable type names. - * - * Features: - * - Stable hash codes across compilation units - * - Human-readable type names with template parameters (optionally compiled in) - * - Supports comparison operations - * - Compatible with standard containers (via std::hash specialization) - * - * @note TypeID objects are immutable and safe to use across thread boundaries - * - * Example usage: - * @code - * auto intTypeId = createTypeId(); - * auto floatTypeId = createTypeId(); - * - * if (intTypeId == floatTypeId) { - * // Never executed - different types - * } - * @endcode - */ - struct TypeID { - /** - * @brief 64-bit canonical hash identifier for the type - * - * Derived from boost::type_index::hash_code() and normalized to - * uint64_t for cross-ABI stability within a build. - */ - uint64_t id; - - /** - * @brief Human-readable name of the type - * - * Contains the canonical type name including template parameters. - * For example: "glm::vec<3, float>" or "std::vector". - */ - std::string name; +/** + * @brief A cross-platform type identifier with stable hashing and comparison + * + * TypeID provides a consistent way to identify types at runtime across different + * platforms and compilers. It uses boost::type_index internally to generate + * stable hash codes and human-readable type names. + * + * Features: + * - Stable hash codes across compilation units + * - Human-readable type names with template parameters (optionally compiled in) + * - Supports comparison operations + * - Compatible with standard containers (via std::hash specialization) + * + * @note TypeID objects are immutable and safe to use across thread boundaries + * + * Example usage: + * @code + * auto intTypeId = createTypeId(); + * auto floatTypeId = createTypeId(); + * + * if (intTypeId == floatTypeId) { + * // Never executed - different types + * } + * @endcode + */ +struct TypeID +{ + /** + * @brief 64-bit canonical hash identifier for the type + * + * Derived from boost::type_index::hash_code() and normalized to + * uint64_t for cross-ABI stability within a build. + */ + uint64_t id; - /** - * @brief Three-way comparison operator for ordering TypeIDs - * @param other The TypeID to compare against - * @return std::strong_ordering result based on hash comparison - */ - auto operator<=>(const TypeID& other) const { return id <=> other.id; } - - /** - * @brief Equality comparison operator - * @param other The TypeID to compare against - * @return true if both TypeIDs represent the same type - */ - bool operator==(const TypeID& other) const { return id == other.id; } + /** + * @brief Human-readable name of the type + * + * Contains the canonical type name including template parameters. + * For example: "glm::vec<3, float>" or "std::vector". + */ + std::string name; - /** - * @brief Get the human-readable type name - * @return The pretty-printed name of the type with template parameters - * - * This method returns the same value as the `name` member but provides - * a method-based interface for consistency with other APIs. - */ - [[nodiscard]] std::string prettyName() const { - return name; - } - }; + /** + * @brief Three-way comparison operator for ordering TypeIDs + * @param other The TypeID to compare against + * @return std::strong_ordering result based on hash comparison + */ + auto operator<=>(const TypeID& other) const { + return id <=> other.id; + } - /** - * @brief Create a TypeID for a given type T - * @tparam T The type to create an identifier for - * @return TypeID object uniquely identifying type T - * @noexcept This function never throws exceptions - * - * This function generates a stable TypeID for any given type using boost::type_index. - * The resulting TypeID will be identical for the same type across different - * compilation units and function calls. - * - * Template parameters and typedefs are resolved to their canonical forms: - * - `std::string` becomes the underlying template instantiation - * - `glm::quat` becomes `glm::qua` - * - Template parameters are preserved in the name - * - * Performance characteristics: - * - O(1) hash generation - * - Small string allocation for type name - * - Inlined for zero function call overhead - * - * @code - * // Basic types - * auto intId = createTypeId(); - * auto floatId = createTypeId(); - * - * // Template types - * auto vectorId = createTypeId>(); - * auto quatId = createTypeId(); // Shows as "glm::qua" - * - * // Custom types - * auto customId = createTypeId(); - * @endcode - */ - // Cached, allocation-free type id for T. This avoids RTTI/string work in hot paths. - template - [[nodiscard]] inline const TypeID& typeIdOf() noexcept { - const auto index = boost::typeindex::type_id(); + /** + * @brief Equality comparison operator + * @param other The TypeID to compare against + * @return true if both TypeIDs represent the same type + */ + bool operator==(const TypeID& other) const { + return id == other.id; + } + + /** + * @brief Get the human-readable type name + * @return The pretty-printed name of the type with template parameters + * + * This method returns the same value as the `name` member but provides + * a method-based interface for consistency with other APIs. + */ + [[nodiscard]] std::string prettyName() const { + return name; + } +}; + +/** + * @brief Create a TypeID for a given type T + * @tparam T The type to create an identifier for + * @return TypeID object uniquely identifying type T + * @noexcept This function never throws exceptions + * + * This function generates a stable TypeID for any given type using boost::type_index. + * The resulting TypeID will be identical for the same type across different + * compilation units and function calls. + * + * Template parameters and typedefs are resolved to their canonical forms: + * - `std::string` becomes the underlying template instantiation + * - `glm::quat` becomes `glm::qua` + * - Template parameters are preserved in the name + * + * Performance characteristics: + * - O(1) hash generation + * - Small string allocation for type name + * - Inlined for zero function call overhead + * + * @code + * // Basic types + * auto intId = createTypeId(); + * auto floatId = createTypeId(); + * + * // Template types + * auto vectorId = createTypeId>(); + * auto quatId = createTypeId(); // Shows as "glm::qua" + * + * // Custom types + * auto customId = createTypeId(); + * @endcode + */ +// Cached, allocation-free type id for T. This avoids RTTI/string work in hot paths. +template +[[nodiscard]] inline const TypeID& typeIdOf() noexcept { + const auto index = boost::typeindex::type_id(); #if ENTROPY_TYPEID_INCLUDE_NAME - static const TypeID k{ static_cast(index.hash_code()), index.pretty_name() }; + static const TypeID k{static_cast(index.hash_code()), index.pretty_name()}; #else - static const TypeID k{ static_cast(index.hash_code()), std::string() }; + static const TypeID k{static_cast(index.hash_code()), std::string()}; #endif - return k; - } + return k; +} - template - [[nodiscard]] inline TypeID createTypeId() noexcept { - // Return the cached instance by value (cheap copy: two words + small string empty) - return typeIdOf(); - } +template +[[nodiscard]] inline TypeID createTypeId() noexcept { + // Return the cached instance by value (cheap copy: two words + small string empty) + return typeIdOf(); +} - /** - * @brief Create a TypeID for the dynamic type of a given object reference - * @tparam T The static type of the object (can be a base class) - * @param obj Reference to the object to inspect at runtime - * @return TypeID representing the dynamic type of obj - */ +/** + * @brief Create a TypeID for the dynamic type of a given object reference + * @tparam T The static type of the object (can be a base class) + * @param obj Reference to the object to inspect at runtime + * @return TypeID representing the dynamic type of obj + */ #if ENTROPY_ENABLE_RTTI - template - [[nodiscard]] inline TypeID createTypeIdRuntime(const T& obj) noexcept { - const auto& index = boost::typeindex::type_id_runtime(obj); - std::string pretty_name = index.pretty_name(); - return { index.hash_code(), std::move(pretty_name) }; - } +template +[[nodiscard]] inline TypeID createTypeIdRuntime(const T& obj) noexcept { + const auto& index = boost::typeindex::type_id_runtime(obj); + std::string pretty_name = index.pretty_name(); + return {index.hash_code(), std::move(pretty_name)}; +} #endif - } // namespace TypeSystem - } // namespace Core -} // namespace EntropyEngine +} // namespace TypeSystem +} // namespace Core +} // namespace EntropyEngine /** * @brief Standard library hash specialization for TypeID - * + * * This specialization allows TypeID objects to be used as keys in standard * containers like std::unordered_map and std::unordered_set. */ -namespace std { +namespace std +{ +/** + * @brief Hash function specialization for EntropyEngine::Core::TypeSystem::TypeID + * + * Provides a hash function for TypeID objects by using the pre-computed + * hash value from boost::type_index. This ensures consistent hashing behavior. + * + * Example usage: + * @code + * std::unordered_map typeNames; + * typeNames[createTypeId()] = "Integer Type"; + * + * std::unordered_set registeredTypes; + * registeredTypes.insert(createTypeId()); + * @endcode + */ +template <> +struct hash +{ /** - * @brief Hash function specialization for EntropyEngine::Core::TypeSystem::TypeID - * - * Provides a hash function for TypeID objects by using the pre-computed - * hash value from boost::type_index. This ensures consistent hashing behavior. - * - * Example usage: - * @code - * std::unordered_map typeNames; - * typeNames[createTypeId()] = "Integer Type"; - * - * std::unordered_set registeredTypes; - * registeredTypes.insert(createTypeId()); - * @endcode + * @brief Compute hash value for a TypeID + * @param typeId The TypeID to hash + * @return Pre-computed hash value from boost::type_index + * @noexcept This operation never throws */ - template <> - struct hash { - /** - * @brief Compute hash value for a TypeID - * @param typeId The TypeID to hash - * @return Pre-computed hash value from boost::type_index - * @noexcept This operation never throws - */ - size_t operator()(const EntropyEngine::Core::TypeSystem::TypeID& typeId) const noexcept { - return static_cast(typeId.id); - } - }; -} // namespace std - + size_t operator()(const EntropyEngine::Core::TypeSystem::TypeID& typeId) const noexcept { + return static_cast(typeId.id); + } +}; +} // namespace std diff --git a/src/VirtualFileSystem/DirectoryHandle.cpp b/src/VirtualFileSystem/DirectoryHandle.cpp index b918730..04b3c36 100644 --- a/src/VirtualFileSystem/DirectoryHandle.cpp +++ b/src/VirtualFileSystem/DirectoryHandle.cpp @@ -3,13 +3,15 @@ * @brief Implementation of DirectoryHandle */ #include "DirectoryHandle.h" -#include "VirtualFileSystem.h" + #include -namespace EntropyEngine::Core::IO { +#include "VirtualFileSystem.h" + +namespace EntropyEngine::Core::IO +{ -DirectoryHandle::DirectoryHandle(VirtualFileSystem* vfs, std::string path) - : _vfs(vfs) { +DirectoryHandle::DirectoryHandle(VirtualFileSystem* vfs, std::string path) : _vfs(vfs) { // Do not resolve or attach a backend here; DirectoryHandle is a dumb value handle. // Backend attachment and normalized key computation are performed by VirtualFileSystem::createDirectoryHandle. @@ -35,7 +37,7 @@ FileOperationHandle DirectoryHandle::create(bool createParents) const { assert(_backend && "DirectoryHandle must have backend (constructed by VFS)"); // Use backend's createDirectory, which should handle createParents appropriately // For LocalFileSystemBackend, this calls std::filesystem::create_directories (always creates parents) - (void)createParents; // Currently not used - backend decides behavior + (void)createParents; // Currently not used - backend decides behavior return _backend->createDirectory(_meta.path); } @@ -43,7 +45,7 @@ FileOperationHandle DirectoryHandle::remove(bool recursive) const { assert(_backend && "DirectoryHandle must have backend (constructed by VFS)"); // Backend's removeDirectory should handle recursive flag appropriately // For LocalFileSystemBackend, this calls std::filesystem::remove_all (always recursive) - (void)recursive; // Currently not used - backend decides behavior + (void)recursive; // Currently not used - backend decides behavior return _backend->removeDirectory(_meta.path); } @@ -60,4 +62,4 @@ FileOperationHandle DirectoryHandle::getMetadata() const { return _backend->getMetadata(_meta.path); } -} // namespace EntropyEngine::Core::IO +} // namespace EntropyEngine::Core::IO diff --git a/src/VirtualFileSystem/DirectoryHandle.h b/src/VirtualFileSystem/DirectoryHandle.h index 3c332c5..a06bd56 100644 --- a/src/VirtualFileSystem/DirectoryHandle.h +++ b/src/VirtualFileSystem/DirectoryHandle.h @@ -7,15 +7,17 @@ * equality and hashing are backend-aware via a normalized key. */ #pragma once -#include #include +#include + #include "FileOperationHandle.h" #include "IFileSystemBackend.h" -namespace EntropyEngine::Core::IO { +namespace EntropyEngine::Core::IO +{ -class VirtualFileSystem; // fwd -class IFileSystemBackend; // fwd +class VirtualFileSystem; // fwd +class IFileSystemBackend; // fwd /** * @brief Copyable handle to a directory path routed through a backend @@ -39,9 +41,11 @@ class IFileSystemBackend; // fwd * dh.remove(true).wait(); // recursive * @endcode */ -class DirectoryHandle { +class DirectoryHandle +{ public: - struct Metadata { + struct Metadata + { std::string path; // full path as provided std::string directory; // parent directory (may be empty) std::string name; // directory name @@ -59,8 +63,8 @@ class DirectoryHandle { */ explicit DirectoryHandle(VirtualFileSystem* vfs, std::string path); DirectoryHandle() = delete; -public: +public: // Directory operations /** * @brief Creates the directory at this path @@ -108,43 +112,50 @@ class DirectoryHandle { * @brief Returns static metadata captured at handle construction * @return Reference to directory metadata (existence at creation time, etc.) */ - const Metadata& metadata() const noexcept { return _meta; } + const Metadata& metadata() const noexcept { + return _meta; + } /** * @brief Backend-aware normalized key for identity/locking * @return Normalized key string used for equality */ - const std::string& normalizedKey() const noexcept { return _normKey; } - + const std::string& normalizedKey() const noexcept { + return _normKey; + } // Equality based on backend identity and normalized key friend bool operator==(const DirectoryHandle& a, const DirectoryHandle& b) noexcept { return a._backend.get() == b._backend.get() && a._normKey == b._normKey; } - friend bool operator!=(const DirectoryHandle& a, const DirectoryHandle& b) noexcept { return !(a == b); } + friend bool operator!=(const DirectoryHandle& a, const DirectoryHandle& b) noexcept { + return !(a == b); + } private: VirtualFileSystem* _vfs; std::shared_ptr _backend; // Backend for this directory (ref-counted for safety) - Metadata _meta; // associated metadata for this handle - std::string _normKey; // backend-normalized key captured at creation + Metadata _meta; // associated metadata for this handle + std::string _normKey; // backend-normalized key captured at creation friend class VirtualFileSystem; // Allow hasher to access private members without exposing opaque pointers publicly friend struct std::hash; }; -} // namespace EntropyEngine::Core::IO +} // namespace EntropyEngine::Core::IO // Hash support for DirectoryHandle -namespace std { - template<> - struct hash { - size_t operator()(const EntropyEngine::Core::IO::DirectoryHandle& dh) const noexcept { - // Combine backend identity pointer and normalized key - size_t h1 = hash()(dh._backend.get()); - size_t h2 = hash()(dh.normalizedKey()); - return h1 ^ (h2 << 1); - } - }; -} +namespace std +{ +template <> +struct hash +{ + size_t operator()(const EntropyEngine::Core::IO::DirectoryHandle& dh) const noexcept { + // Combine backend identity pointer and normalized key + size_t h1 = hash()(dh._backend.get()); + size_t h2 = hash()(dh.normalizedKey()); + return h1 ^ (h2 << 1); + } +}; +} // namespace std diff --git a/src/VirtualFileSystem/FileHandle.cpp b/src/VirtualFileSystem/FileHandle.cpp index 0f96868..d7a7db6 100644 --- a/src/VirtualFileSystem/FileHandle.cpp +++ b/src/VirtualFileSystem/FileHandle.cpp @@ -1,19 +1,20 @@ #include "FileHandle.h" -#include "VirtualFileSystem.h" -#include "IFileSystemBackend.h" -#include "LocalFileSystemBackend.h" -#include "FileStream.h" + #include -#include #include +#include -using EntropyEngine::Core::Concurrency::ExecutionType; +#include "FileStream.h" +#include "IFileSystemBackend.h" +#include "LocalFileSystemBackend.h" +#include "VirtualFileSystem.h" -namespace EntropyEngine::Core::IO { +using EntropyEngine::Core::Concurrency::ExecutionType; +namespace EntropyEngine::Core::IO +{ -FileHandle::FileHandle(VirtualFileSystem* vfs, std::string path) - : _vfs(vfs) { +FileHandle::FileHandle(VirtualFileSystem* vfs, std::string path) : _vfs(vfs) { _meta.path = std::move(path); std::filesystem::path pp(_meta.path); _meta.directory = pp.has_parent_path() ? pp.parent_path().string() : std::string(); @@ -57,16 +58,17 @@ FileOperationHandle FileHandle::readLineBinary(size_t lineNumber, uint8_t delimi return FileOperationHandle::immediate(FileOpStatus::Failed); } auto backend = _backend; - return _vfs->submit(_meta.path, [backend, lineNumber, delimiter](FileOperationHandle::OpState& s, const std::string& p, const ExecContext&){ - ReadOptions ro{}; ro.binary = true; // read all bytes + return _vfs->submit(_meta.path, [backend, lineNumber, delimiter](FileOperationHandle::OpState& s, + const std::string& p, const ExecContext&) { + ReadOptions ro{}; + ro.binary = true; // read all bytes auto rh = backend->readFile(p, ro); rh.wait(); if (rh.status() != FileOpStatus::Complete && rh.status() != FileOpStatus::Partial) { // Surface backend error if any const auto& err = rh.errorInfo(); s.setError(err.code == FileError::None ? FileError::IOError : err.code, - err.message.empty() ? std::string("Failed to read for readLineBinary") : err.message, - err.path); + err.message.empty() ? std::string("Failed to read for readLineBinary") : err.message, err.path); s.complete(FileOpStatus::Failed); return; } @@ -75,11 +77,20 @@ FileOperationHandle FileHandle::readLineBinary(size_t lineNumber, uint8_t delimi std::vector line; for (size_t i = 0; i < buf.size(); ++i) { if (buf[i] == delimiter) { - if (idx == lineNumber) break; else { line.clear(); ++idx; continue; } + if (idx == lineNumber) + break; + else { + line.clear(); + ++idx; + continue; + } } line.push_back(buf[i]); } - if (idx != lineNumber) { s.complete(FileOpStatus::Partial); return; } + if (idx != lineNumber) { + s.complete(FileOpStatus::Partial); + return; + } s.bytes.assign(line.begin(), line.end()); s.complete(FileOpStatus::Complete); }); @@ -87,35 +98,14 @@ FileOperationHandle FileHandle::readLineBinary(size_t lineNumber, uint8_t delimi FileOperationHandle FileHandle::writeAll(std::span bytes) const { if (_backend && _vfs) { - WriteOptions opts; opts.truncate = true; - auto data = std::vector(bytes.begin(), bytes.end()); - return _vfs->submitSerialized(_meta.path, [opts, data=std::move(data)](FileOperationHandle::OpState& s, std::shared_ptr backend, const std::string& p, const ExecContext&) mutable { - auto byteSpan = std::span(data.data(), data.size()); - auto inner = backend->writeFile(p, byteSpan, opts); - inner.wait(); - auto st = inner.status(); - if (st == FileOpStatus::Complete || st == FileOpStatus::Partial) { - s.wrote = inner.bytesWritten(); - s.complete(st); - } else { - const auto& err = inner.errorInfo(); - s.setError(err.code == FileError::None ? FileError::IOError : err.code, - err.message, err.path, err.systemError); - s.complete(FileOpStatus::Failed); - } - }); - } - return FileOperationHandle::immediate(FileOpStatus::Failed); -} - -FileOperationHandle FileHandle::writeAll(std::span bytes, const WriteOptions& opts) const { - if (_backend && _vfs) { + WriteOptions opts; + opts.truncate = true; auto data = std::vector(bytes.begin(), bytes.end()); - return _vfs->submitSerialized(_meta.path, [opts, data=std::move(data)](FileOperationHandle::OpState& s, std::shared_ptr backend, const std::string& p, const ExecContext&) mutable { - auto byteSpan = std::span(data.data(), data.size()); - if (auto* local = dynamic_cast(backend.get())) { - local->doWriteFile(s, p, byteSpan, opts); - } else { + return _vfs->submitSerialized( + _meta.path, + [opts, data = std::move(data)](FileOperationHandle::OpState& s, std::shared_ptr backend, + const std::string& p, const ExecContext&) mutable { + auto byteSpan = std::span(data.data(), data.size()); auto inner = backend->writeFile(p, byteSpan, opts); inner.wait(); auto st = inner.status(); @@ -124,68 +114,105 @@ FileOperationHandle FileHandle::writeAll(std::span bytes, const W s.complete(st); } else { const auto& err = inner.errorInfo(); - s.setError(err.code == FileError::None ? FileError::IOError : err.code, - err.message, err.path, err.systemError); + s.setError(err.code == FileError::None ? FileError::IOError : err.code, err.message, err.path, + err.systemError); s.complete(FileOpStatus::Failed); } - } - }); + }); + } + return FileOperationHandle::immediate(FileOpStatus::Failed); +} + +FileOperationHandle FileHandle::writeAll(std::span bytes, const WriteOptions& opts) const { + if (_backend && _vfs) { + auto data = std::vector(bytes.begin(), bytes.end()); + return _vfs->submitSerialized( + _meta.path, + [opts, data = std::move(data)](FileOperationHandle::OpState& s, std::shared_ptr backend, + const std::string& p, const ExecContext&) mutable { + auto byteSpan = std::span(data.data(), data.size()); + if (auto* local = dynamic_cast(backend.get())) { + local->doWriteFile(s, p, byteSpan, opts); + } else { + auto inner = backend->writeFile(p, byteSpan, opts); + inner.wait(); + auto st = inner.status(); + if (st == FileOpStatus::Complete || st == FileOpStatus::Partial) { + s.wrote = inner.bytesWritten(); + s.complete(st); + } else { + const auto& err = inner.errorInfo(); + s.setError(err.code == FileError::None ? FileError::IOError : err.code, err.message, err.path, + err.systemError); + s.complete(FileOpStatus::Failed); + } + } + }); } return FileOperationHandle::immediate(FileOpStatus::Failed); } FileOperationHandle FileHandle::writeRange(uint64_t offset, std::span bytes) const { - WriteOptions opts; opts.offset = offset; opts.truncate = false; + WriteOptions opts; + opts.offset = offset; + opts.truncate = false; if (_backend && _vfs) { auto data = std::vector(bytes.begin(), bytes.end()); - return _vfs->submitSerialized(_meta.path, [opts, data=std::move(data)](FileOperationHandle::OpState& s, std::shared_ptr backend, const std::string& p, const ExecContext&) mutable { - auto byteSpan = std::span(data.data(), data.size()); - if (auto* local = dynamic_cast(backend.get())) { - local->doWriteFile(s, p, byteSpan, opts); - } else { - auto inner = backend->writeFile(p, byteSpan, opts); - inner.wait(); - auto st = inner.status(); - if (st == FileOpStatus::Complete || st == FileOpStatus::Partial) { - s.wrote = inner.bytesWritten(); - s.complete(st); + return _vfs->submitSerialized( + _meta.path, + [opts, data = std::move(data)](FileOperationHandle::OpState& s, std::shared_ptr backend, + const std::string& p, const ExecContext&) mutable { + auto byteSpan = std::span(data.data(), data.size()); + if (auto* local = dynamic_cast(backend.get())) { + local->doWriteFile(s, p, byteSpan, opts); } else { - const auto& err = inner.errorInfo(); - s.setError(err.code == FileError::None ? FileError::IOError : err.code, - err.message, err.path, err.systemError); - s.complete(FileOpStatus::Failed); + auto inner = backend->writeFile(p, byteSpan, opts); + inner.wait(); + auto st = inner.status(); + if (st == FileOpStatus::Complete || st == FileOpStatus::Partial) { + s.wrote = inner.bytesWritten(); + s.complete(st); + } else { + const auto& err = inner.errorInfo(); + s.setError(err.code == FileError::None ? FileError::IOError : err.code, err.message, err.path, + err.systemError); + s.complete(FileOpStatus::Failed); + } } - } - }); + }); } return FileOperationHandle::immediate(FileOpStatus::Failed); } -FileOperationHandle FileHandle::writeRange(uint64_t offset, std::span bytes, const WriteOptions& opts) const { +FileOperationHandle FileHandle::writeRange(uint64_t offset, std::span bytes, + const WriteOptions& opts) const { if (_backend && _vfs) { WriteOptions wopts = opts; wopts.offset = offset; wopts.truncate = false; auto data = std::vector(bytes.begin(), bytes.end()); - return _vfs->submitSerialized(_meta.path, [wopts, data=std::move(data)](FileOperationHandle::OpState& s, std::shared_ptr backend, const std::string& p, const ExecContext&) mutable { - auto byteSpan = std::span(data.data(), data.size()); - if (auto* local = dynamic_cast(backend.get())) { - local->doWriteFile(s, p, byteSpan, wopts); - } else { - auto inner = backend->writeFile(p, byteSpan, wopts); - inner.wait(); - auto st = inner.status(); - if (st == FileOpStatus::Complete || st == FileOpStatus::Partial) { - s.wrote = inner.bytesWritten(); - s.complete(st); + return _vfs->submitSerialized( + _meta.path, [wopts, data = std::move(data)](FileOperationHandle::OpState& s, + std::shared_ptr backend, + const std::string& p, const ExecContext&) mutable { + auto byteSpan = std::span(data.data(), data.size()); + if (auto* local = dynamic_cast(backend.get())) { + local->doWriteFile(s, p, byteSpan, wopts); } else { - const auto& err = inner.errorInfo(); - s.setError(err.code == FileError::None ? FileError::IOError : err.code, - err.message, err.path, err.systemError); - s.complete(FileOpStatus::Failed); + auto inner = backend->writeFile(p, byteSpan, wopts); + inner.wait(); + auto st = inner.status(); + if (st == FileOpStatus::Complete || st == FileOpStatus::Partial) { + s.wrote = inner.bytesWritten(); + s.complete(st); + } else { + const auto& err = inner.errorInfo(); + s.setError(err.code == FileError::None ? FileError::IOError : err.code, err.message, err.path, + err.systemError); + s.complete(FileOpStatus::Failed); + } } - } - }); + }); } return FileOperationHandle::immediate(FileOpStatus::Failed); } @@ -193,51 +220,59 @@ FileOperationHandle FileHandle::writeRange(uint64_t offset, std::spansubmitSerialized(_meta.path, [lineNumber, lineCopy=std::move(lineCopy)](FileOperationHandle::OpState& s, std::shared_ptr backend, const std::string& p, const ExecContext&) mutable { - if (auto* local = dynamic_cast(backend.get())) { - local->doWriteLine(s, p, lineNumber, lineCopy); - } else { - auto inner = backend->writeLine(p, lineNumber, lineCopy); - inner.wait(); - auto st = inner.status(); - if (st == FileOpStatus::Complete || st == FileOpStatus::Partial) { - s.wrote = inner.bytesWritten(); - s.complete(st); + return _vfs->submitSerialized( + _meta.path, [lineNumber, lineCopy = std::move(lineCopy)](FileOperationHandle::OpState& s, + std::shared_ptr backend, + const std::string& p, const ExecContext&) mutable { + if (auto* local = dynamic_cast(backend.get())) { + local->doWriteLine(s, p, lineNumber, lineCopy); } else { - const auto& err = inner.errorInfo(); - s.setError(err.code == FileError::None ? FileError::IOError : err.code, - err.message, err.path, err.systemError); - s.complete(FileOpStatus::Failed); + auto inner = backend->writeLine(p, lineNumber, lineCopy); + inner.wait(); + auto st = inner.status(); + if (st == FileOpStatus::Complete || st == FileOpStatus::Partial) { + s.wrote = inner.bytesWritten(); + s.complete(st); + } else { + const auto& err = inner.errorInfo(); + s.setError(err.code == FileError::None ? FileError::IOError : err.code, err.message, err.path, + err.systemError); + s.complete(FileOpStatus::Failed); + } } - } - }); + }); } return FileOperationHandle::immediate(FileOpStatus::Failed); } FileOperationHandle FileHandle::writeAll(std::string_view text) const { if (_backend && _vfs) { - WriteOptions opts; opts.truncate = true; + WriteOptions opts; + opts.truncate = true; auto textCopy = std::string(text); - return _vfs->submitSerialized(_meta.path, [opts, textCopy=std::move(textCopy)](FileOperationHandle::OpState& s, std::shared_ptr backend, const std::string& p, const ExecContext&) mutable { - auto spanBytes = std::span(reinterpret_cast(textCopy.data()), textCopy.size()); - if (auto* local = dynamic_cast(backend.get())) { - local->doWriteFile(s, p, spanBytes, opts); - } else { - auto inner = backend->writeFile(p, spanBytes, opts); - inner.wait(); - auto st = inner.status(); - if (st == FileOpStatus::Complete || st == FileOpStatus::Partial) { - s.wrote = inner.bytesWritten(); - s.complete(st); + return _vfs->submitSerialized( + _meta.path, [opts, textCopy = std::move(textCopy)](FileOperationHandle::OpState& s, + std::shared_ptr backend, + const std::string& p, const ExecContext&) mutable { + auto spanBytes = + std::span(reinterpret_cast(textCopy.data()), textCopy.size()); + if (auto* local = dynamic_cast(backend.get())) { + local->doWriteFile(s, p, spanBytes, opts); } else { - const auto& err = inner.errorInfo(); - s.setError(err.code == FileError::None ? FileError::IOError : err.code, - err.message, err.path, err.systemError); - s.complete(FileOpStatus::Failed); + auto inner = backend->writeFile(p, spanBytes, opts); + inner.wait(); + auto st = inner.status(); + if (st == FileOpStatus::Complete || st == FileOpStatus::Partial) { + s.wrote = inner.bytesWritten(); + s.complete(st); + } else { + const auto& err = inner.errorInfo(); + s.setError(err.code == FileError::None ? FileError::IOError : err.code, err.message, err.path, + err.systemError); + s.complete(FileOpStatus::Failed); + } } - } - }); + }); } return FileOperationHandle::immediate(FileOpStatus::Failed); } @@ -245,30 +280,35 @@ FileOperationHandle FileHandle::writeAll(std::string_view text) const { FileOperationHandle FileHandle::writeAll(std::string_view text, const WriteOptions& opts) const { if (_backend && _vfs) { auto textCopy = std::string(text); - return _vfs->submitSerialized(_meta.path, [opts, textCopy=std::move(textCopy)](FileOperationHandle::OpState& s, std::shared_ptr backend, const std::string& p, const ExecContext&) mutable { - auto spanBytes = std::span(reinterpret_cast(textCopy.data()), textCopy.size()); - if (auto* local = dynamic_cast(backend.get())) { - local->doWriteFile(s, p, spanBytes, opts); - } else { - auto inner = backend->writeFile(p, spanBytes, opts); - inner.wait(); - auto st = inner.status(); - if (st == FileOpStatus::Complete || st == FileOpStatus::Partial) { - s.wrote = inner.bytesWritten(); - s.complete(st); + return _vfs->submitSerialized( + _meta.path, [opts, textCopy = std::move(textCopy)](FileOperationHandle::OpState& s, + std::shared_ptr backend, + const std::string& p, const ExecContext&) mutable { + auto spanBytes = + std::span(reinterpret_cast(textCopy.data()), textCopy.size()); + if (auto* local = dynamic_cast(backend.get())) { + local->doWriteFile(s, p, spanBytes, opts); } else { - const auto& err = inner.errorInfo(); - s.setError(err.code == FileError::None ? FileError::IOError : err.code, - err.message, err.path, err.systemError); - s.complete(FileOpStatus::Failed); + auto inner = backend->writeFile(p, spanBytes, opts); + inner.wait(); + auto st = inner.status(); + if (st == FileOpStatus::Complete || st == FileOpStatus::Partial) { + s.wrote = inner.bytesWritten(); + s.complete(st); + } else { + const auto& err = inner.errorInfo(); + s.setError(err.code == FileError::None ? FileError::IOError : err.code, err.message, err.path, + err.systemError); + s.complete(FileOpStatus::Failed); + } } - } - }); + }); } return FileOperationHandle::immediate(FileOpStatus::Failed); } -FileOperationHandle FileHandle::writeLine(size_t lineNumber, std::string_view line, const WriteOptions& /*opts*/) const { +FileOperationHandle FileHandle::writeLine(size_t lineNumber, std::string_view line, + const WriteOptions& /*opts*/) const { // Currently forwards to default writeLine; WriteOptions are ignored for line-oriented writes. // Future: route via WriteBatch::commit(opts) for per-op control. return writeLine(lineNumber, line); @@ -276,46 +316,50 @@ FileOperationHandle FileHandle::writeLine(size_t lineNumber, std::string_view li FileOperationHandle FileHandle::createEmpty() const { if (_backend && _vfs) { - return _vfs->submitSerialized(_meta.path, [](FileOperationHandle::OpState& s, std::shared_ptr backend, const std::string& p, const ExecContext&) mutable { - if (auto* local = dynamic_cast(backend.get())) { - local->doCreateFile(s, p); - } else { - auto inner = backend->createFile(p); - inner.wait(); - auto st = inner.status(); - if (st == FileOpStatus::Complete || st == FileOpStatus::Partial) { - s.complete(st); + return _vfs->submitSerialized( + _meta.path, [](FileOperationHandle::OpState& s, std::shared_ptr backend, + const std::string& p, const ExecContext&) mutable { + if (auto* local = dynamic_cast(backend.get())) { + local->doCreateFile(s, p); } else { - const auto& err = inner.errorInfo(); - s.setError(err.code == FileError::None ? FileError::IOError : err.code, - err.message, err.path, err.systemError); - s.complete(FileOpStatus::Failed); + auto inner = backend->createFile(p); + inner.wait(); + auto st = inner.status(); + if (st == FileOpStatus::Complete || st == FileOpStatus::Partial) { + s.complete(st); + } else { + const auto& err = inner.errorInfo(); + s.setError(err.code == FileError::None ? FileError::IOError : err.code, err.message, err.path, + err.systemError); + s.complete(FileOpStatus::Failed); + } } - } - }); + }); } return FileOperationHandle::immediate(FileOpStatus::Failed); } FileOperationHandle FileHandle::remove() const { if (_backend && _vfs) { - return _vfs->submitSerialized(_meta.path, [](FileOperationHandle::OpState& s, std::shared_ptr backend, const std::string& p, const ExecContext&) mutable { - if (auto* local = dynamic_cast(backend.get())) { - local->doDeleteFile(s, p); - } else { - auto inner = backend->deleteFile(p); - inner.wait(); - auto st = inner.status(); - if (st == FileOpStatus::Complete || st == FileOpStatus::Partial) { - s.complete(st); + return _vfs->submitSerialized( + _meta.path, [](FileOperationHandle::OpState& s, std::shared_ptr backend, + const std::string& p, const ExecContext&) mutable { + if (auto* local = dynamic_cast(backend.get())) { + local->doDeleteFile(s, p); } else { - const auto& err = inner.errorInfo(); - s.setError(err.code == FileError::None ? FileError::IOError : err.code, - err.message, err.path, err.systemError); - s.complete(FileOpStatus::Failed); + auto inner = backend->deleteFile(p); + inner.wait(); + auto st = inner.status(); + if (st == FileOpStatus::Complete || st == FileOpStatus::Partial) { + s.complete(st); + } else { + const auto& err = inner.errorInfo(); + s.setError(err.code == FileError::None ? FileError::IOError : err.code, err.message, err.path, + err.systemError); + s.complete(FileOpStatus::Failed); + } } - } - }); + }); } return FileOperationHandle::immediate(FileOpStatus::Failed); } @@ -331,7 +375,7 @@ std::unique_ptr FileHandle::openReadStream() const { } std::unique_ptr FileHandle::openWriteStream(bool append) const { - (void)append; // append semantics can be handled by backend via options in the future + (void)append; // append semantics can be handled by backend via options in the future if (_vfs) { StreamOptions opts; opts.mode = StreamOptions::Write; @@ -358,5 +402,4 @@ std::unique_ptr FileHandle::openBufferedStream(size_t buffer return nullptr; } - -} // namespace EntropyEngine::Core::IO +} // namespace EntropyEngine::Core::IO diff --git a/src/VirtualFileSystem/FileHandle.h b/src/VirtualFileSystem/FileHandle.h index fb3ef21..ef14916 100644 --- a/src/VirtualFileSystem/FileHandle.h +++ b/src/VirtualFileSystem/FileHandle.h @@ -1,38 +1,40 @@ /** * @file FileHandle.h * @brief Value-semantic handle for performing file operations through VFS - * + * * FileHandle provides a convenient, copyable reference to a file path routed through a * VirtualFileSystem backend. Use it to read/write text or bytes, manipulate lines, and * open streams. Equality and hashing are backend-aware via a normalized key. */ #pragma once -#include -#include +#include #include -#include #include +#include +#include #include -#include +#include + #include "FileOperationHandle.h" #include "FileStream.h" -namespace EntropyEngine::Core::IO { +namespace EntropyEngine::Core::IO +{ -class VirtualFileSystem; // fwd -class IFileSystemBackend; // fwd -struct WriteOptions; // fwd +class VirtualFileSystem; // fwd +class IFileSystemBackend; // fwd +struct WriteOptions; // fwd /** * @brief Copyable handle to a file path routed through a backend - * + * * Construct via VirtualFileSystem::createFileHandle(). Operations are asynchronous; * call wait() on the returned FileOperationHandle to block, or chain operations. * * Design note: FileHandle is a dumb handle that delegates all I/O and policy decisions * to the routed backend through the VirtualFileSystem. It avoids filesystem probing and * contains no backend-specific logic; semantics are defined by the backend implementation. - * + * * @code * WorkContractGroup group(2000); * VirtualFileSystem vfs(&group); @@ -42,25 +44,27 @@ struct WriteOptions; // fwd * ENTROPY_LOG_INFO(r.contentsText()); * @endcode */ -class FileHandle { +class FileHandle +{ public: - struct Metadata { - std::string path; // full path as provided - std::string directory; // parent directory (may be empty) - std::string filename; // file name with extension - std::string extension; // extension including leading dot if present - bool exists = false; // whether file exists at construction time - uintmax_t size = 0; // size in bytes if regular file, else 0 - bool canRead = false; // readable by someone (owner/group/others) - bool canWrite = false; // writable by someone - bool canExecute = false; // executable by someone - std::optional owner; // platform-specific; may be empty + struct Metadata + { + std::string path; // full path as provided + std::string directory; // parent directory (may be empty) + std::string filename; // file name with extension + std::string extension; // extension including leading dot if present + bool exists = false; // whether file exists at construction time + uintmax_t size = 0; // size in bytes if regular file, else 0 + bool canRead = false; // readable by someone (owner/group/others) + bool canWrite = false; // writable by someone + bool canExecute = false; // executable by someone + std::optional owner; // platform-specific; may be empty }; private: /** * @brief Constructs a handle bound to a VirtualFileSystem and path - * + * * Use VirtualFileSystem::createFileHandle() to obtain instances. The handle is copyable and * cheap to pass by value. Operations are asynchronous; call wait() on returned handles. * Only VirtualFileSystem may construct FileHandle to ensure a backend is attached. @@ -70,11 +74,10 @@ class FileHandle { explicit FileHandle(VirtualFileSystem* vfs, std::string path); public: - // Reads /** * @brief Reads the entire file into memory - * + * * Asynchronously reads the full contents as bytes. Call contentsText() or contentsBytes() * on the returned handle after wait(). * @return Handle representing the read operation @@ -108,7 +111,7 @@ class FileHandle { // Writes /** * @brief Writes the full text to the file (overwrites by default) - * + * * Uses LF/CRLF policy as implemented by the backend. Use WriteBatch for line-wise edits. * @param text UTF-8 text to write * @return Handle for the asynchronous write @@ -141,7 +144,7 @@ class FileHandle { FileOperationHandle writeRange(uint64_t offset, std::span bytes, const WriteOptions& opts) const; /** * @brief Replaces a single line by index (0-based) - * + * * Extends the file with blank lines if the index is beyond EOF. Line endings are preserved * according to backend policy. * @param lineNumber Line to overwrite @@ -159,13 +162,13 @@ class FileHandle { * @brief Creates an empty file or truncates existing to zero * @return Handle for the asynchronous creation/truncation */ - FileOperationHandle createEmpty() const; // create or truncate to zero length + FileOperationHandle createEmpty() const; // create or truncate to zero length /** * @brief Deletes the file if it exists (idempotent) * @return Handle for the asynchronous delete operation */ - FileOperationHandle remove() const; // delete file if exists (idempotent) - + FileOperationHandle remove() const; // delete file if exists (idempotent) + // Streaming API - FileHandle acts as factory for streams /** * @brief Opens an unbuffered read-only stream @@ -194,41 +197,50 @@ class FileHandle { * @brief Returns static metadata captured at handle construction * @return Reference to file metadata (existence, size at creation time, etc.) */ - const Metadata& metadata() const noexcept { return _meta; } + const Metadata& metadata() const noexcept { + return _meta; + } /** * @brief Backend-aware normalized key for identity/locking * @return Normalized key string used for equality and advisory locks */ - const std::string& normalizedKey() const noexcept { return _normKey; } + const std::string& normalizedKey() const noexcept { + return _normKey; + } // Equality based on backend identity and normalized key friend bool operator==(const FileHandle& a, const FileHandle& b) noexcept { return a._backend.get() == b._backend.get() && a._normKey == b._normKey; } - friend bool operator!=(const FileHandle& a, const FileHandle& b) noexcept { return !(a == b); } + friend bool operator!=(const FileHandle& a, const FileHandle& b) noexcept { + return !(a == b); + } private: VirtualFileSystem* _vfs; std::shared_ptr _backend; // Backend for this file (ref-counted for safety) - Metadata _meta; // associated metadata for this handle - std::string _normKey; // backend-normalized key captured at creation + Metadata _meta; // associated metadata for this handle + std::string _normKey; // backend-normalized key captured at creation friend class VirtualFileSystem; }; -} // namespace EntropyEngine::Core::IO +} // namespace EntropyEngine::Core::IO // Hash support for FileHandle -namespace std { - template<> - struct hash { - size_t operator()(const EntropyEngine::Core::IO::FileHandle& h) const noexcept { - // Combine backend pointer and normalized key - // Note: To avoid accessing private members, hash only the normalized key. - // This satisfies the requirement that equal objects have equal hashes, though it may increase collisions across backends. - size_t seed = std::hash{}(h.normalizedKey()); - return seed; - } - }; -} +namespace std +{ +template <> +struct hash +{ + size_t operator()(const EntropyEngine::Core::IO::FileHandle& h) const noexcept { + // Combine backend pointer and normalized key + // Note: To avoid accessing private members, hash only the normalized key. + // This satisfies the requirement that equal objects have equal hashes, though it may increase collisions across + // backends. + size_t seed = std::hash{}(h.normalizedKey()); + return seed; + } +}; +} // namespace std diff --git a/src/VirtualFileSystem/FileOperationHandle.cpp b/src/VirtualFileSystem/FileOperationHandle.cpp index 8840345..8884ef6 100644 --- a/src/VirtualFileSystem/FileOperationHandle.cpp +++ b/src/VirtualFileSystem/FileOperationHandle.cpp @@ -1,17 +1,20 @@ #include "FileOperationHandle.h" -#include "IFileSystemBackend.h" + #include -namespace EntropyEngine::Core::IO { +#include "IFileSystemBackend.h" + +namespace EntropyEngine::Core::IO +{ void FileOperationHandle::wait() const { if (!_s) return; - + // Fast path - already complete if (_s->isComplete.load(std::memory_order_acquire)) { return; } - + // Slow path - wait for completion with cooperative progress pumping std::unique_lock lock(_s->completionMutex); while (!_s->isComplete.load(std::memory_order_acquire)) { @@ -25,9 +28,8 @@ void FileOperationHandle::wait() const { // Swallow exceptions in progress to avoid breaking wait semantics } lock.lock(); - _s->completionCV.wait_for(lock, std::chrono::milliseconds(1), [this]{ - return _s->isComplete.load(std::memory_order_acquire); - }); + _s->completionCV.wait_for(lock, std::chrono::milliseconds(1), + [this] { return _s->isComplete.load(std::memory_order_acquire); }); } } @@ -65,12 +67,12 @@ std::string FileOperationHandle::contentsText() const { uint64_t FileOperationHandle::bytesWritten() const { if (!_s) return 0ULL; - + // Ensure operation is complete before accessing data if (!_s->isComplete.load(std::memory_order_acquire)) { wait(); } - + return _s->wrote; } @@ -135,4 +137,4 @@ std::shared_ptr FileOperationHandle::makeState() { FileOperationHandle::FileOperationHandle(std::shared_ptr s) : _s(std::move(s)) {} -} // namespace EntropyEngine::Core::IO +} // namespace EntropyEngine::Core::IO diff --git a/src/VirtualFileSystem/FileOperationHandle.h b/src/VirtualFileSystem/FileOperationHandle.h index 2b20e03..b4440df 100644 --- a/src/VirtualFileSystem/FileOperationHandle.h +++ b/src/VirtualFileSystem/FileOperationHandle.h @@ -1,22 +1,30 @@ #pragma once -#include -#include -#include -#include #include -#include -#include +#include #include -#include #include +#include +#include +#include +#include #include +#include +#include +#include #include -#include -#include +#include -namespace EntropyEngine::Core::IO { +namespace EntropyEngine::Core::IO +{ -enum class FileOpStatus { Pending, Running, Partial, Complete, Failed }; +enum class FileOpStatus +{ + Pending, + Running, + Partial, + Complete, + Failed +}; /** * Public error taxonomy surfaced by VFS operations. @@ -30,7 +38,8 @@ enum class FileOpStatus { Pending, Running, Partial, Complete, Failed }; * - Timeout: bounded waits exceeded (advisory lock or backend scope) * - Conflict: contention detected (backend Busy without fallback) */ -enum class FileError { +enum class FileError +{ None = 0, FileNotFound, AccessDenied, @@ -50,7 +59,8 @@ enum class FileError { * Conflict, Timeout from advisory locking, or FileNotFound determined by preconditions, * systemError may be empty; message should remain informative. */ -struct FileErrorInfo { +struct FileErrorInfo +{ FileError code = FileError::None; std::string message; std::optional systemError; @@ -58,7 +68,8 @@ struct FileErrorInfo { }; // File metadata - defined here so OpState can use it -struct FileMetadata { +struct FileMetadata +{ std::string path; bool exists = false; bool isDirectory = false; @@ -73,15 +84,17 @@ struct FileMetadata { }; // Directory entry with metadata - defined here so OpState can use it -struct DirectoryEntry { - std::string name; // Just the filename, not full path - std::string fullPath; // Complete absolute path - FileMetadata metadata; // Full metadata for this entry +struct DirectoryEntry +{ + std::string name; // Just the filename, not full path + std::string fullPath; // Complete absolute path + FileMetadata metadata; // Full metadata for this entry bool isSymlink = false; std::optional symlinkTarget; }; -class FileOperationHandle { +class FileOperationHandle +{ public: FileOperationHandle() = default; @@ -124,7 +137,8 @@ class FileOperationHandle { * return FileOperationHandle(state); * @endcode */ - struct OpState { + struct OpState + { std::atomic st{FileOpStatus::Pending}; mutable std::mutex completionMutex; mutable std::condition_variable completionCV; @@ -134,13 +148,13 @@ class FileOperationHandle { std::function progress; // Result data - only valid after completion - std::vector bytes; // for reads - uint64_t wrote = 0; // for writes - FileErrorInfo error; // error details if failed - std::string text; // for text preview/read operations - std::optional metadata; // for metadata queries + std::vector bytes; // for reads + uint64_t wrote = 0; // for writes + FileErrorInfo error; // error details if failed + std::string text; // for text preview/read operations + std::optional metadata; // for metadata queries std::vector directoryEntries; // for directory listings - std::vector metadataBatch; // for batch metadata queries + std::vector metadataBatch; // for batch metadata queries void complete(FileOpStatus final) noexcept { { @@ -151,9 +165,8 @@ class FileOperationHandle { completionCV.notify_all(); } - void setError(FileError code, const std::string& msg, - const std::string& path = "", - std::optional ec = std::nullopt) { + void setError(FileError code, const std::string& msg, const std::string& path = "", + std::optional ec = std::nullopt) { error.code = code; error.message = msg; error.path = path; @@ -183,4 +196,4 @@ class FileOperationHandle { friend class FileWatchManager; }; -} // namespace EntropyEngine::Core::IO +} // namespace EntropyEngine::Core::IO diff --git a/src/VirtualFileSystem/FileStream.cpp b/src/VirtualFileSystem/FileStream.cpp index 8c3bb2a..1e32a9d 100644 --- a/src/VirtualFileSystem/FileStream.cpp +++ b/src/VirtualFileSystem/FileStream.cpp @@ -1,26 +1,25 @@ #include "FileStream.h" + #include #include -namespace EntropyEngine::Core::IO { +namespace EntropyEngine::Core::IO +{ BufferedFileStream::BufferedFileStream(std::unique_ptr inner, size_t bufferSize) - : _inner(std::move(inner)) - , _readBuffer(bufferSize) - , _writeBuffer(bufferSize) { -} + : _inner(std::move(inner)), _readBuffer(bufferSize), _writeBuffer(bufferSize) {} IoResult BufferedFileStream::read(std::span buffer) { IoResult result; - + if (!good()) { result.error = FileError::IOError; return result; } - + size_t totalRead = 0; size_t remaining = buffer.size(); - + while (remaining > 0) { // If read buffer is empty, fill it if (_readPos >= _readSize) { @@ -30,20 +29,18 @@ IoResult BufferedFileStream::read(std::span buffer) { break; } } - + // Copy from read buffer to output size_t available = _readSize - _readPos; size_t toCopy = std::min(available, remaining); - - std::memcpy(buffer.data() + totalRead, - _readBuffer.data() + _readPos, - toCopy); - + + std::memcpy(buffer.data() + totalRead, _readBuffer.data() + _readPos, toCopy); + _readPos += toCopy; totalRead += toCopy; remaining -= toCopy; } - + result.bytesTransferred = totalRead; result.complete = (totalRead == buffer.size()) || eof(); return result; @@ -51,15 +48,15 @@ IoResult BufferedFileStream::read(std::span buffer) { IoResult BufferedFileStream::write(std::span data) { IoResult result; - + if (!good()) { result.error = FileError::IOError; return result; } - + size_t totalWritten = 0; size_t remaining = data.size(); - + while (remaining > 0) { // If write buffer is full, flush it if (_writePos >= _writeBuffer.size()) { @@ -70,21 +67,19 @@ IoResult BufferedFileStream::write(std::span data) { return result; } } - + // Copy to write buffer size_t available = _writeBuffer.size() - _writePos; size_t toCopy = std::min(available, remaining); - - std::memcpy(_writeBuffer.data() + _writePos, - data.data() + totalWritten, - toCopy); - + + std::memcpy(_writeBuffer.data() + _writePos, data.data() + totalWritten, toCopy); + _writePos += toCopy; totalWritten += toCopy; remaining -= toCopy; _dirty = true; } - + result.bytesTransferred = totalWritten; result.complete = (totalWritten == data.size()); return result; @@ -95,11 +90,11 @@ bool BufferedFileStream::seek(int64_t offset, std::ios_base::seekdir dir) { if (_dirty) { flushWriteBuffer(); } - + // Invalidate read buffer _readPos = 0; _readSize = 0; - + return _inner->seek(offset, dir); } @@ -107,17 +102,17 @@ int64_t BufferedFileStream::tell() const { // Account for buffered position int64_t basePos = _inner->tell(); if (basePos < 0) return basePos; - + // Adjust for buffered reads if (_readSize > 0) { basePos -= (_readSize - _readPos); } - + // Adjust for buffered writes if (_writePos > 0) { basePos += _writePos; } - + return basePos; } @@ -161,4 +156,4 @@ void BufferedFileStream::fillReadBuffer() { _readPos = 0; } -} // namespace EntropyEngine::Core::IO \ No newline at end of file +} // namespace EntropyEngine::Core::IO diff --git a/src/VirtualFileSystem/FileStream.h b/src/VirtualFileSystem/FileStream.h index 4845d4a..c3c9db1 100644 --- a/src/VirtualFileSystem/FileStream.h +++ b/src/VirtualFileSystem/FileStream.h @@ -1,57 +1,66 @@ #pragma once -#include #include #include #include +#include + #include "FileOperationHandle.h" -namespace EntropyEngine::Core::IO { +namespace EntropyEngine::Core::IO +{ // Result structure for I/O operations -struct IoResult { +struct IoResult +{ size_t bytesTransferred = 0; bool complete = false; std::optional error; - - bool success() const { return !error.has_value(); } + + bool success() const { + return !error.has_value(); + } }; // Pure interface for file streaming -class FileStream { +class FileStream +{ public: virtual ~FileStream() = default; - + // Core I/O operations // Read into buffer, returns actual bytes read virtual IoResult read(std::span buffer) = 0; // Write data, returns actual bytes written virtual IoResult write(std::span data) = 0; - + // Positioning virtual bool seek(int64_t offset, std::ios_base::seekdir dir = std::ios_base::beg) = 0; virtual int64_t tell() const = 0; - + // Stream state virtual bool good() const = 0; virtual bool eof() const = 0; virtual bool fail() const = 0; - + // Flush any buffered data virtual void flush() = 0; - + // Close the stream (called automatically by destructor) virtual void close() = 0; - + // Get underlying file path if applicable - virtual std::string path() const { return ""; } + virtual std::string path() const { + return ""; + } }; // Buffered stream wrapper - adds buffering to any stream -class BufferedFileStream : public FileStream { +class BufferedFileStream : public FileStream +{ public: BufferedFileStream(std::unique_ptr inner, size_t bufferSize = 8192); - + IoResult read(std::span buffer) override; IoResult write(std::span data) override; bool seek(int64_t offset, std::ios_base::seekdir dir) override; @@ -61,8 +70,10 @@ class BufferedFileStream : public FileStream { bool fail() const override; void flush() override; void close() override; - std::string path() const override { return _inner->path(); } - + std::string path() const override { + return _inner->path(); + } + private: std::unique_ptr _inner; std::vector _readBuffer; @@ -71,9 +82,9 @@ class BufferedFileStream : public FileStream { size_t _readSize = 0; size_t _writePos = 0; bool _dirty = false; - + void flushWriteBuffer(); void fillReadBuffer(); }; -} // namespace EntropyEngine::Core::IO \ No newline at end of file +} // namespace EntropyEngine::Core::IO diff --git a/src/VirtualFileSystem/FileWatch.cpp b/src/VirtualFileSystem/FileWatch.cpp index c5e771c..eb8dbc3 100644 --- a/src/VirtualFileSystem/FileWatch.cpp +++ b/src/VirtualFileSystem/FileWatch.cpp @@ -3,20 +3,16 @@ * @brief Implementation of FileWatch EntropyObject */ #include "FileWatch.h" -#include "FileWatchManager.h" + #include "../Logging/Logger.h" +#include "FileWatchManager.h" -namespace EntropyEngine::Core::IO { +namespace EntropyEngine::Core::IO +{ -FileWatch::FileWatch(FileWatchManager* owner, const std::string& path, - FileWatchCallback callback, const WatchOptions& options) - : _owner(owner) - , _path(path) - , _callback(std::move(callback)) - , _options(options) - , _efswId(0) - , _watching(false) { -} +FileWatch::FileWatch(FileWatchManager* owner, const std::string& path, FileWatchCallback callback, + const WatchOptions& options) + : _owner(owner), _path(path), _callback(std::move(callback)), _options(options), _efswId(0), _watching(false) {} FileWatch::~FileWatch() noexcept { // Ensure watch is stopped before destruction @@ -26,7 +22,7 @@ FileWatch::~FileWatch() noexcept { void FileWatch::stop() { bool wasWatching = _watching.exchange(false, std::memory_order_acq_rel); if (!wasWatching) { - return; // Already stopped + return; // Already stopped } // Remove from efsw through manager @@ -37,4 +33,4 @@ void FileWatch::stop() { ENTROPY_LOG_INFO("Stopped file watch for: " + _path); } -} // namespace EntropyEngine::Core::IO +} // namespace EntropyEngine::Core::IO diff --git a/src/VirtualFileSystem/FileWatch.h b/src/VirtualFileSystem/FileWatch.h index 027ebc4..e9b00aa 100644 --- a/src/VirtualFileSystem/FileWatch.h +++ b/src/VirtualFileSystem/FileWatch.h @@ -3,20 +3,23 @@ * @brief File system watch object using EntropyObject handle facilities */ #pragma once -#include "../Core/EntropyObject.h" +#include +#include +#include +#include #include #include -#include -#include -#include -#include + +#include "../Core/EntropyObject.h" // Forward declare efsw types -namespace efsw { - using WatchID = long; +namespace efsw +{ +using WatchID = long; } -namespace EntropyEngine::Core::IO { +namespace EntropyEngine::Core::IO +{ // Forward declarations class FileWatchManager; @@ -24,7 +27,8 @@ class FileWatchManager; /** * @brief Types of file system events */ -enum class FileWatchEvent { +enum class FileWatchEvent +{ Created, ///< New file or directory created Modified, ///< File content or directory structure changed Deleted, ///< File or directory deleted @@ -34,11 +38,12 @@ enum class FileWatchEvent { /** * @brief Information about a file system event */ -struct FileWatchInfo { - std::string path; ///< Path of the file/directory that changed - FileWatchEvent event; ///< Type of event that occurred - std::optional oldPath; ///< Previous path (for Renamed events) - std::chrono::system_clock::time_point timestamp; ///< When the event occurred +struct FileWatchInfo +{ + std::string path; ///< Path of the file/directory that changed + FileWatchEvent event; ///< Type of event that occurred + std::optional oldPath; ///< Previous path (for Renamed events) + std::chrono::system_clock::time_point timestamp; ///< When the event occurred }; /** @@ -49,11 +54,12 @@ using FileWatchCallback = std::function; /** * @brief Options for configuring file system watches */ -struct WatchOptions { - bool recursive = true; ///< Watch subdirectories recursively - bool followSymlinks = false; ///< Follow symbolic links when watching - std::vector includePatterns; ///< Include only files matching these patterns (*.cpp, *.h) - std::vector excludePatterns; ///< Exclude files matching these patterns (.git/*, *.tmp) +struct WatchOptions +{ + bool recursive = true; ///< Watch subdirectories recursively + bool followSymlinks = false; ///< Follow symbolic links when watching + std::vector includePatterns; ///< Include only files matching these patterns (*.cpp, *.h) + std::vector excludePatterns; ///< Exclude files matching these patterns (.git/*, *.tmp) }; /** @@ -74,13 +80,14 @@ struct WatchOptions { * watch->release(); // Decrement refcount (may delete if 0) * @endcode */ -class FileWatch : public EntropyObject { +class FileWatch : public EntropyObject +{ public: /** * @brief Constructs a file watch (internal - use FileWatchManager::createWatch) */ - FileWatch(FileWatchManager* owner, const std::string& path, - FileWatchCallback callback, const WatchOptions& options); + FileWatch(FileWatchManager* owner, const std::string& path, FileWatchCallback callback, + const WatchOptions& options); ~FileWatch() noexcept override; @@ -93,30 +100,38 @@ class FileWatch : public EntropyObject { /** * @brief Checks if this watch is currently active */ - bool isWatching() const noexcept { return _watching.load(std::memory_order_acquire); } + bool isWatching() const noexcept { + return _watching.load(std::memory_order_acquire); + } /** * @brief Gets the path being watched */ - const std::string& path() const noexcept { return _path; } + const std::string& path() const noexcept { + return _path; + } /** * @brief Gets the watch options */ - const WatchOptions& options() const noexcept { return _options; } + const WatchOptions& options() const noexcept { + return _options; + } // EntropyObject overrides - const char* className() const noexcept override { return "FileWatch"; } + const char* className() const noexcept override { + return "FileWatch"; + } private: - FileWatchManager* _owner; ///< Owning manager - std::string _path; ///< Watched path - FileWatchCallback _callback; ///< User callback - WatchOptions _options; ///< Watch configuration - efsw::WatchID _efswId = 0; ///< efsw watch ID (0 = invalid) - std::atomic _watching{false}; ///< true if actively watching + FileWatchManager* _owner; ///< Owning manager + std::string _path; ///< Watched path + FileWatchCallback _callback; ///< User callback + WatchOptions _options; ///< Watch configuration + efsw::WatchID _efswId = 0; ///< efsw watch ID (0 = invalid) + std::atomic _watching{false}; ///< true if actively watching friend class FileWatchManager; }; -} // namespace EntropyEngine::Core::IO +} // namespace EntropyEngine::Core::IO diff --git a/src/VirtualFileSystem/FileWatchManager.cpp b/src/VirtualFileSystem/FileWatchManager.cpp index 4780185..051e428 100644 --- a/src/VirtualFileSystem/FileWatchManager.cpp +++ b/src/VirtualFileSystem/FileWatchManager.cpp @@ -3,12 +3,15 @@ * @brief Implementation of FileWatchManager using EntropyObject handle stamping */ #include "FileWatchManager.h" -#include "VirtualFileSystem.h" -#include "../Logging/Logger.h" -#include + #include +#include -namespace EntropyEngine::Core::IO { +#include "../Logging/Logger.h" +#include "VirtualFileSystem.h" + +namespace EntropyEngine::Core::IO +{ // Simple glob matching helper static bool matchGlob(const std::string& str, const std::string& pattern) { @@ -42,17 +45,17 @@ static bool matchGlob(const std::string& str, const std::string& pattern) { /** * @brief efsw listener implementation that dispatches to FileWatchManager */ -class FileWatchListener : public efsw::FileWatchListener { +class FileWatchListener : public efsw::FileWatchListener +{ public: explicit FileWatchListener(FileWatchManager* manager) : _manager(manager) {} - void handleFileAction(efsw::WatchID watchId, const std::string& dir, - const std::string& filename, efsw::Action action, - std::string oldFilename) override { + void handleFileAction(efsw::WatchID watchId, const std::string& dir, const std::string& filename, + efsw::Action action, std::string oldFilename) override { // Find which slot this watch belongs to uint32_t slotIndex = _manager->findSlotByEfswId(watchId); if (slotIndex == UINT32_MAX) { - return; // Watch no longer exists + return; // Watch no longer exists } // Convert efsw action to our event type @@ -71,7 +74,7 @@ class FileWatchListener : public efsw::FileWatchListener { event = FileWatchEvent::Renamed; break; default: - return; // Unknown action + return; // Unknown action } // Build full path @@ -107,9 +110,7 @@ class FileWatchListener : public efsw::FileWatchListener { // FileWatchManager implementation FileWatchManager::FileWatchManager(VirtualFileSystem* vfs) - : _vfs(vfs) - , _listener(std::make_unique(this)) { -} + : _vfs(vfs), _listener(std::make_unique(this)) {} FileWatchManager::~FileWatchManager() { // Phase 1: Stop all watches and collect them (holding _slotMutex) @@ -134,7 +135,7 @@ FileWatchManager::~FileWatchManager() { // Phase 3: Release watch references (no locks needed, refcount is atomic) for (FileWatch* watch : watchesToRelease) { - watch->release(); // Release the manager's reference + watch->release(); // Release the manager's reference } // Phase 4: Clear the ID map @@ -144,9 +145,8 @@ FileWatchManager::~FileWatchManager() { } } -FileWatch* FileWatchManager::createWatch(const std::string& path, - FileWatchCallback callback, - const WatchOptions& options) { +FileWatch* FileWatchManager::createWatch(const std::string& path, FileWatchCallback callback, + const WatchOptions& options) { uint32_t index; uint32_t generation; FileWatch* watch = nullptr; @@ -200,7 +200,7 @@ FileWatch* FileWatchManager::createWatch(const std::string& path, std::lock_guard lock(_slotMutex); freeSlot(index); HandleAccess::clear(*watch); - watch->release(); // Delete the watch + watch->release(); // Delete the watch return nullptr; } @@ -213,7 +213,8 @@ FileWatch* FileWatchManager::createWatch(const std::string& path, _efswIdToSlot[efswId] = index; } - ENTROPY_LOG_INFO("Created file watch for: " + path + " (slot " + std::to_string(index) + ", efswId " + std::to_string(efswId) + ")"); + ENTROPY_LOG_INFO("Created file watch for: " + path + " (slot " + std::to_string(index) + ", efswId " + + std::to_string(efswId) + ")"); // Return with refcount=1 (caller owns the reference) return watch; @@ -225,7 +226,7 @@ void FileWatchManager::destroyWatch(FileWatch* watch) { } watch->stop(); - watch->release(); // Decrement refcount (may delete) + watch->release(); // Decrement refcount (may delete) } bool FileWatchManager::isValid(const FileWatch* watch) const { @@ -243,9 +244,7 @@ bool FileWatchManager::isValid(const FileWatch* watch) const { uint32_t index = watch->handleIndex(); uint32_t generation = watch->handleGeneration(); - return index < _slots.size() && - _slots[index].occupied && - _slots[index].generation == generation && + return index < _slots.size() && _slots[index].occupied && _slots[index].generation == generation && _slots[index].watch == watch; } @@ -275,23 +274,23 @@ void FileWatchManager::freeSlot(uint32_t index) { WatchSlot& slot = _slots[index]; slot.occupied = false; slot.watch = nullptr; - slot.generation++; // Increment generation to invalidate existing handles + slot.generation++; // Increment generation to invalidate existing handles } void FileWatchManager::ensureWatcherInitialized() { if (_watcher) { - return; // Already initialized + return; // Already initialized } _watcher = std::make_unique(); - _watcher->watch(); // Start watching + _watcher->watch(); // Start watching } bool FileWatchManager::matchesFilters(const std::string& path, const WatchOptions& options) const { // Check exclude patterns first for (const auto& pattern : options.excludePatterns) { if (matchGlob(path, pattern)) { - return false; // Excluded + return false; // Excluded } } @@ -303,11 +302,11 @@ bool FileWatchManager::matchesFilters(const std::string& path, const WatchOption // Check include patterns for (const auto& pattern : options.includePatterns) { if (matchGlob(path, pattern)) { - return true; // Included + return true; // Included } } - return false; // Not in include list + return false; // Not in include list } void FileWatchManager::onFileEvent(uint32_t slotIndex, const FileWatchInfo& info) { @@ -315,19 +314,19 @@ void FileWatchManager::onFileEvent(uint32_t slotIndex, const FileWatchInfo& info std::unique_lock lock(_slotMutex); if (slotIndex >= _slots.size() || !_slots[slotIndex].occupied) { - return; // Slot no longer valid + return; // Slot no longer valid } WatchSlot& slot = _slots[slotIndex]; FileWatch* watch = slot.watch; if (!watch || !watch->isWatching()) { - return; // Watch stopped + return; // Watch stopped } // Check filters if (!matchesFilters(info.path, watch->options())) { - return; // Filtered out + return; // Filtered out } // Copy callback (so we can release lock before invoking) @@ -336,10 +335,11 @@ void FileWatchManager::onFileEvent(uint32_t slotIndex, const FileWatchInfo& info // Dispatch to WorkContractGroup via VFS for thread safety if (_vfs && callback) { - _vfs->submit(info.path, [callback, info](FileOperationHandle::OpState& s, const std::string&, const ExecContext&) { - callback(info); - s.complete(FileOpStatus::Complete); - }); + _vfs->submit(info.path, + [callback, info](FileOperationHandle::OpState& s, const std::string&, const ExecContext&) { + callback(info); + s.complete(FileOpStatus::Complete); + }); } } @@ -352,7 +352,7 @@ uint32_t FileWatchManager::findSlotByEfswId(efsw::WatchID efswId) const { return it->second; } - return UINT32_MAX; // Not found + return UINT32_MAX; // Not found } void FileWatchManager::removeEfswWatch(FileWatch* watch) { @@ -395,4 +395,4 @@ void FileWatchManager::removeEfswWatch(FileWatch* watch) { } } -} // namespace EntropyEngine::Core::IO +} // namespace EntropyEngine::Core::IO diff --git a/src/VirtualFileSystem/FileWatchManager.h b/src/VirtualFileSystem/FileWatchManager.h index 59e96ce..1c5ea4b 100644 --- a/src/VirtualFileSystem/FileWatchManager.h +++ b/src/VirtualFileSystem/FileWatchManager.h @@ -4,17 +4,20 @@ */ #pragma once #include -#include #include +#include + #include "FileWatch.h" // Forward declare efsw types -namespace efsw { - class FileWatcher; - class FileWatchListener; -} +namespace efsw +{ +class FileWatcher; +class FileWatchListener; +} // namespace efsw -namespace EntropyEngine::Core::IO { +namespace EntropyEngine::Core::IO +{ // Forward declarations class VirtualFileSystem; @@ -29,7 +32,8 @@ class VirtualFileSystem; * * Thread safety: All public methods are thread-safe via mutex. */ -class FileWatchManager { +class FileWatchManager +{ public: explicit FileWatchManager(VirtualFileSystem* vfs); ~FileWatchManager(); @@ -47,9 +51,7 @@ class FileWatchManager { * @param options Watch configuration options * @return FileWatch* with refcount=1 (caller owns the reference), or nullptr if failed */ - FileWatch* createWatch(const std::string& path, - FileWatchCallback callback, - const WatchOptions& options); + FileWatch* createWatch(const std::string& path, FileWatchCallback callback, const WatchOptions& options); /** * @brief Stops a watch and releases the reference @@ -69,23 +71,24 @@ class FileWatchManager { /** * @brief Storage slot for a FileWatch */ - struct WatchSlot { - FileWatch* watch = nullptr; ///< Pointer to the watch object (or nullptr if free) - uint32_t generation = 0; ///< Generation counter for validation - bool occupied = false; ///< true if slot is in use + struct WatchSlot + { + FileWatch* watch = nullptr; ///< Pointer to the watch object (or nullptr if free) + uint32_t generation = 0; ///< Generation counter for validation + bool occupied = false; ///< true if slot is in use }; - VirtualFileSystem* _vfs; ///< Parent VFS (for thread dispatch) - std::unique_ptr _watcher; ///< efsw file watcher instance (lazy-initialized) - std::unique_ptr _listener; ///< efsw event listener - std::vector _slots; ///< Slot-based storage - mutable std::mutex _slotMutex; ///< Protects slots and watcher + VirtualFileSystem* _vfs; ///< Parent VFS (for thread dispatch) + std::unique_ptr _watcher; ///< efsw file watcher instance (lazy-initialized) + std::unique_ptr _listener; ///< efsw event listener + std::vector _slots; ///< Slot-based storage + mutable std::mutex _slotMutex; ///< Protects slots and watcher // Separate map for efsw ID to slot index lookups // This prevents lock-order-inversion: efsw callbacks (which hold efsw's internal mutex) // need to map IDs to slots, but we can't hold _slotMutex when calling efsw methods. - mutable std::mutex _efswIdMapMutex; ///< Protects _efswIdToSlot map - std::unordered_map _efswIdToSlot; ///< Maps efsw::WatchID to slot index + mutable std::mutex _efswIdMapMutex; ///< Protects _efswIdToSlot map + std::unordered_map _efswIdToSlot; ///< Maps efsw::WatchID to slot index /** * @brief Allocates a new slot for a watch @@ -124,8 +127,8 @@ class FileWatchManager { */ void removeEfswWatch(FileWatch* watch); - friend class FileWatchListener; // efsw listener needs access - friend class FileWatch; // FileWatch needs access to removeEfswWatch + friend class FileWatchListener; // efsw listener needs access + friend class FileWatch; // FileWatch needs access to removeEfswWatch }; -} // namespace EntropyEngine::Core::IO +} // namespace EntropyEngine::Core::IO diff --git a/src/VirtualFileSystem/IFileSystemBackend.h b/src/VirtualFileSystem/IFileSystemBackend.h index 53599ab..efcd6e6 100644 --- a/src/VirtualFileSystem/IFileSystemBackend.h +++ b/src/VirtualFileSystem/IFileSystemBackend.h @@ -1,37 +1,44 @@ /** * @file IFileSystemBackend.h * @brief Backend interface for VirtualFileSystem - * + * * Implementations provide concrete file operations (local filesystem, remote stores, etc.). * VFS routes operations to a backend selected by path mounting. Backends may override * normalizeKey() to define identity/locking keys and can ignore options they do not support. */ #pragma once -#include +#include +#include #include -#include #include -#include -#include -#include +#include +#include #include +#include + #include "FileOperationHandle.h" -namespace EntropyEngine::Core::IO { +namespace EntropyEngine::Core::IO +{ // Forward declarations class FileStream; class VirtualFileSystem; -} +} // namespace EntropyEngine::Core::IO -namespace EntropyEngine::Core::Concurrency { class WorkContractGroup; } +namespace EntropyEngine::Core::Concurrency +{ +class WorkContractGroup; +} -namespace EntropyEngine::Core::IO { +namespace EntropyEngine::Core::IO +{ // Explicit execution context passed through VFS submit paths and backend hooks // Backends should execute inline when ctx.group equals the owning VFS WorkContractGroup // to avoid same-group nested scheduling; otherwise they may schedule via the VFS group. -struct ExecContext { +struct ExecContext +{ EntropyEngine::Core::Concurrency::WorkContractGroup* group = nullptr; }; @@ -42,7 +49,8 @@ struct ExecContext { * @param length Optional max bytes to read (reads to EOF if not set) * @param binary Open in binary mode (platform newline translation off) */ -struct ReadOptions { +struct ReadOptions +{ uint64_t offset = 0; std::optional length; bool binary = true; @@ -57,19 +65,20 @@ struct ReadOptions { * @param createParentDirs Per-op override to create parent directories * @param ensureFinalNewline Force presence/absence of final newline for whole-file rewrites */ -struct WriteOptions { +struct WriteOptions +{ uint64_t offset = 0; bool append = false; bool createIfMissing = true; bool truncate = false; - std::optional createParentDirs; // per-operation override; nullopt => use VFS default - std::optional ensureFinalNewline; // for whole-file rewrites; nullopt => preserve prior - bool fsync = false; // Force data to disk (durability guarantee, Unix/POSIX only) + std::optional createParentDirs; // per-operation override; nullopt => use VFS default + std::optional ensureFinalNewline; // for whole-file rewrites; nullopt => preserve prior + bool fsync = false; // Force data to disk (durability guarantee, Unix/POSIX only) // Optional cross-process serialization via sibling lock file (compatible with atomic replace) - std::optional useLockFile; // If true, acquire + lockSuffix as exclusive lock - std::optional lockTimeout; // Timeout for acquiring lock (overrides VFS default) - std::optional lockSuffix; // Suffix for lock file (default from VFS config) + std::optional useLockFile; // If true, acquire + lockSuffix as exclusive lock + std::optional lockTimeout; // Timeout for acquiring lock (overrides VFS default) + std::optional lockSuffix; // Suffix for lock file (default from VFS config) }; /** @@ -78,8 +87,14 @@ struct WriteOptions { * @param buffered If true, backend may buffer; BufferedFileStream provides explicit buffering * @param bufferSize Suggested buffer size when applicable */ -struct StreamOptions { - enum Mode { Read, Write, ReadWrite }; +struct StreamOptions +{ + enum Mode + { + Read, + Write, + ReadWrite + }; Mode mode = Read; bool buffered = true; size_t bufferSize = 65536; // 64KB default (Phase 2 optimization) @@ -90,7 +105,8 @@ struct StreamOptions { * @brief Capabilities advertised by a backend * @note VFS may adjust behavior based on these (e.g., advisory locking, atomic writes) */ -struct BackendCapabilities { +struct BackendCapabilities +{ bool supportsStreaming = true; bool supportsRandomAccess = true; bool supportsDirectories = true; @@ -115,17 +131,24 @@ struct BackendCapabilities { * @param sortBy Sort order for results (none, name, size, modified) * @param maxResults Maximum number of results (for pagination; 0 = unlimited) */ -struct ListDirectoryOptions { - enum SortOrder { None, ByName, BySize, ByModifiedTime }; +struct ListDirectoryOptions +{ + enum SortOrder + { + None, + ByName, + BySize, + ByModifiedTime + }; bool recursive = false; bool followSymlinks = true; size_t maxDepth = SIZE_MAX; - std::optional globPattern; // Simple glob matching (*.txt, file?.dat, etc) + std::optional globPattern; // Simple glob matching (*.txt, file?.dat, etc) std::function filter; // Optional filter callback - bool includeHidden = false; // Show hidden files/directories - SortOrder sortBy = None; // Sort order for results - size_t maxResults = 0; // Max results for pagination (0 = unlimited) + bool includeHidden = false; // Show hidden files/directories + SortOrder sortBy = None; // Sort order for results + size_t maxResults = 0; // Max results for pagination (0 = unlimited) }; // Options for batch metadata queries @@ -135,7 +158,8 @@ struct ListDirectoryOptions { * @param includeExtendedAttributes Include extended attributes if supported * @param cacheTTL Optional cache TTL (0 = no caching) */ -struct BatchMetadataOptions { +struct BatchMetadataOptions +{ std::vector paths; bool includeExtendedAttributes = false; std::chrono::seconds cacheTTL = std::chrono::seconds(0); // 0 = no caching @@ -149,10 +173,11 @@ struct BatchMetadataOptions { * @param useReflink Use copy-on-write cloning if available * @param progressCallback Optional progress callback; return false to cancel */ -struct CopyOptions { +struct CopyOptions +{ bool overwriteExisting = false; bool preserveAttributes = true; - bool useReflink = true; // Use copy-on-write if available (Linux, APFS) + bool useReflink = true; // Use copy-on-write if available (Linux, APFS) std::function progressCallback; // Return false to cancel }; @@ -162,16 +187,18 @@ struct CopyOptions { * @param chunkSize Preferred chunk size in bytes * @param progressCallback Optional progress callback; return false to cancel */ -struct ProgressOptions { - size_t chunkSize = 1024 * 1024; // 1MB default +struct ProgressOptions +{ + size_t chunkSize = 1024 * 1024; // 1MB default std::function progressCallback; // Return false to cancel }; // Backend interface -class IFileSystemBackend { +class IFileSystemBackend +{ public: virtual ~IFileSystemBackend() = default; - + // Core file operations /** * @brief Reads file contents @@ -197,7 +224,8 @@ class IFileSystemBackend { * @note Special files (FIFO, device, socket) are rejected with FileError::InvalidPath on Unix. * @note Set options.fsync=true for durability guarantee (Unix/POSIX only; forces data to disk). */ - virtual FileOperationHandle writeFile(const std::string& path, std::span data, WriteOptions options = {}) = 0; + virtual FileOperationHandle writeFile(const std::string& path, std::span data, + WriteOptions options = {}) = 0; /** * @brief Deletes a file * @param path Target path @@ -212,7 +240,7 @@ class IFileSystemBackend { * @return Handle representing the create operation */ virtual FileOperationHandle createFile(const std::string& path) = 0; - + // Metadata operations /** * @brief Retrieves metadata for a file @@ -230,9 +258,9 @@ class IFileSystemBackend { // Batch metadata query (Phase 2) virtual FileOperationHandle getMetadataBatch(const BatchMetadataOptions& options) { (void)options; - return FileOperationHandle{}; // Default: not supported + return FileOperationHandle{}; // Default: not supported } - + // Directory operations (optional) /** * @brief Creates a directory at the given path @@ -251,8 +279,8 @@ class IFileSystemBackend { * @return A FileOperationHandle; status() will be Pending for default impl */ virtual FileOperationHandle createDirectory(const std::string& path) { - (void)path; // Suppress unused warning - return FileOperationHandle{}; // Default: not supported + (void)path; // Suppress unused warning + return FileOperationHandle{}; // Default: not supported } /** * @brief Removes a directory at the given path @@ -272,8 +300,8 @@ class IFileSystemBackend { * @return A FileOperationHandle; status() will be Pending for default impl */ virtual FileOperationHandle removeDirectory(const std::string& path) { - (void)path; // Suppress unused warning - return FileOperationHandle{}; // Default: not supported + (void)path; // Suppress unused warning + return FileOperationHandle{}; // Default: not supported } /** * @brief Lists entries in the given directory @@ -294,10 +322,11 @@ class IFileSystemBackend { * @return A FileOperationHandle; status() will be Pending for default impl */ virtual FileOperationHandle listDirectory(const std::string& path, ListDirectoryOptions options = {}) { - (void)path; (void)options; // Suppress unused warnings - return FileOperationHandle{}; // Default: not supported + (void)path; + (void)options; // Suppress unused warnings + return FileOperationHandle{}; // Default: not supported } - + // Stream support /** * @brief Opens a stream for the given path @@ -306,7 +335,7 @@ class IFileSystemBackend { * @return Unique pointer to FileStream, or null on failure */ virtual std::unique_ptr openStream(const std::string& path, StreamOptions options = {}) = 0; - + // Line operations /** * @brief Reads a single line by index (0-based) @@ -325,16 +354,22 @@ class IFileSystemBackend { virtual FileOperationHandle writeLine(const std::string& path, size_t lineNumber, std::string_view line) = 0; // Copy/Move operations (Phase 2) - virtual FileOperationHandle copyFile(const std::string& src, const std::string& dst, const CopyOptions& options = {}) { - (void)src; (void)dst; (void)options; - return FileOperationHandle{}; // Default: not supported + virtual FileOperationHandle copyFile(const std::string& src, const std::string& dst, + const CopyOptions& options = {}) { + (void)src; + (void)dst; + (void)options; + return FileOperationHandle{}; // Default: not supported } - virtual FileOperationHandle moveFile(const std::string& src, const std::string& dst, bool overwriteExisting = false) { - (void)src; (void)dst; (void)overwriteExisting; - return FileOperationHandle{}; // Default: not supported + virtual FileOperationHandle moveFile(const std::string& src, const std::string& dst, + bool overwriteExisting = false) { + (void)src; + (void)dst; + (void)overwriteExisting; + return FileOperationHandle{}; // Default: not supported } - + // Backend info virtual BackendCapabilities getCapabilities() const = 0; virtual std::string getBackendType() const = 0; @@ -342,7 +377,9 @@ class IFileSystemBackend { // Backend-aware path normalization for identity/locking keys // Default: pass-through (no normalization). // Backends should override to implement their own canonicalization (e.g., case-insensitive on Windows local FS). - virtual std::string normalizeKey(const std::string& path) const { return path; } + virtual std::string normalizeKey(const std::string& path) const { + return path; + } // Backend-provided write-scope acquisition with explicit status and timeout options /** @@ -357,38 +394,43 @@ class IFileSystemBackend { // NOTE: Status::NotSupported indicates the backend lacks a native scoping primitive. // VFS will fall back to its in-process advisory lock. This status is slated for deprecation // in a future pass; backends should plan to implement acquireWriteScope. - struct AcquireWriteScopeResult { - enum class Status { + struct AcquireWriteScopeResult + { + enum class Status + { Acquired, Busy, TimedOut, NotSupported, Error } status = Status::NotSupported; - std::unique_ptr token{nullptr, [](void*){}}; // Opaque RAII token - std::error_code errorCode{}; // Provider/system error code if any - std::string message; // Human-readable context - std::chrono::milliseconds suggestedBackoff{0}; // Hint for retry/backoff on Busy + std::unique_ptr token{nullptr, [](void*) {}}; // Opaque RAII token + std::error_code errorCode{}; // Provider/system error code if any + std::string message; // Human-readable context + std::chrono::milliseconds suggestedBackoff{0}; // Hint for retry/backoff on Busy }; - struct AcquireScopeOptions { - std::optional timeout; // nullopt => backend default - bool nonBlocking; // true => do not wait + struct AcquireScopeOptions + { + std::optional timeout; // nullopt => backend default + bool nonBlocking; // true => do not wait AcquireScopeOptions() : timeout(std::nullopt), nonBlocking(false) {} }; // Default implementation: not supported, VFS may fall back to advisory lock virtual AcquireWriteScopeResult acquireWriteScope(const std::string& path, AcquireScopeOptions options = {}) { - (void)path; (void)options; - return AcquireWriteScopeResult{}; // NotSupported by default + (void)path; + (void)options; + return AcquireWriteScopeResult{}; // NotSupported by default } - // Set the parent VFS for callbacks - void setVirtualFileSystem(VirtualFileSystem* vfs) { _vfs = vfs; } - + void setVirtualFileSystem(VirtualFileSystem* vfs) { + _vfs = vfs; + } + protected: VirtualFileSystem* _vfs = nullptr; }; -} // namespace EntropyEngine::Core::IO \ No newline at end of file +} // namespace EntropyEngine::Core::IO diff --git a/src/VirtualFileSystem/LocalFileSystemBackend.cpp b/src/VirtualFileSystem/LocalFileSystemBackend.cpp index 9b63772..8023660 100644 --- a/src/VirtualFileSystem/LocalFileSystemBackend.cpp +++ b/src/VirtualFileSystem/LocalFileSystemBackend.cpp @@ -1,268 +1,267 @@ #include "LocalFileSystemBackend.h" -#include "VirtualFileSystem.h" -#include "FileStream.h" -#include -#include -#include -#include -#include -#include + #include #include #include -#include -#include #include +#include +#include +#include +#include +#include +#include +#include +#include #include "CoreCommon.h" +#include "FileStream.h" +#include "VirtualFileSystem.h" #if defined(_WIN32) #include #endif #if defined(__unix__) || defined(__APPLE__) -#include // flock() #include // open(), fcntl() -#include // close(), access(), fsync() +#include // flock() #include // stat(), chmod() +#include // close(), access(), fsync() #endif -namespace EntropyEngine::Core::IO { +namespace EntropyEngine::Core::IO +{ #if defined(_WIN32) - static std::string toWinLongPath(const std::string& path) { - if (path.rfind("\\\\?\\", 0) == 0) return path; - if (path.rfind("\\\\", 0) == 0) { - // UNC path: \\server\\share -> \\?\\UNC\\server\\share - return std::string("\\\\?\\UNC\\") + path.substr(2); - } - if (path.size() >= 2 && std::isalpha(static_cast(path[0])) && path[1] == ':') { - return std::string("\\\\?\\") + path; - } - return path; +static std::string toWinLongPath(const std::string& path) { + if (path.rfind("\\\\?\\", 0) == 0) return path; + if (path.rfind("\\\\", 0) == 0) { + // UNC path: \\server\\share -> \\?\\UNC\\server\\share + return std::string("\\\\?\\UNC\\") + path.substr(2); } + if (path.size() >= 2 && std::isalpha(static_cast(path[0])) && path[1] == ':') { + return std::string("\\\\?\\") + path; + } + return path; +} #endif // Helper function for simple glob pattern matching // Supports: * (any sequence), ? (single char) -namespace { - bool matchGlob(const std::string& str, const std::string& pattern) { - size_t s = 0, p = 0; - size_t starIdx = std::string::npos, matchIdx = 0; - - while (s < str.size()) { - if (p < pattern.size() && (pattern[p] == '?' || pattern[p] == str[s])) { - // Match single character or exact match - ++s; - ++p; - } else if (p < pattern.size() && pattern[p] == '*') { - // Star matches zero or more characters - starIdx = p; - matchIdx = s; - ++p; - } else if (starIdx != std::string::npos) { - // Backtrack to last star - p = starIdx + 1; - ++matchIdx; - s = matchIdx; - } else { - // No match - return false; - } - } - - // Skip remaining stars in pattern - while (p < pattern.size() && pattern[p] == '*') { +namespace +{ +bool matchGlob(const std::string& str, const std::string& pattern) { + size_t s = 0, p = 0; + size_t starIdx = std::string::npos, matchIdx = 0; + + while (s < str.size()) { + if (p < pattern.size() && (pattern[p] == '?' || pattern[p] == str[s])) { + // Match single character or exact match + ++s; + ++p; + } else if (p < pattern.size() && pattern[p] == '*') { + // Star matches zero or more characters + starIdx = p; + matchIdx = s; ++p; + } else if (starIdx != std::string::npos) { + // Backtrack to last star + p = starIdx + 1; + ++matchIdx; + s = matchIdx; + } else { + // No match + return false; } + } - return p == pattern.size(); + // Skip remaining stars in pattern + while (p < pattern.size() && pattern[p] == '*') { + ++p; } - // Map errno to FileError with platform-specific handling - // Mapping notes: - // - ENOSPC/EDQUOT → DiskFull (out of space or quota exceeded) - // - EACCES/EPERM → AccessDenied (permission denied) - // - ENOENT → FileNotFound (missing file/dir) - // - EINVAL/ENAMETOOLONG/(EISDIR on POSIX) → InvalidPath (malformed or wrong type) - // - ENET*/ETIMEDOUT (POSIX) → NetworkError (remote transport issues) - // - default → IOError (local I/O failures) - // systemError in FileErrorInfo should carry std::error_code when available. - FileError mapErrnoToFileError(int err) { - switch (err) { - case ENOSPC: + return p == pattern.size(); +} + +// Map errno to FileError with platform-specific handling +// Mapping notes: +// - ENOSPC/EDQUOT → DiskFull (out of space or quota exceeded) +// - EACCES/EPERM → AccessDenied (permission denied) +// - ENOENT → FileNotFound (missing file/dir) +// - EINVAL/ENAMETOOLONG/(EISDIR on POSIX) → InvalidPath (malformed or wrong type) +// - ENET*/ETIMEDOUT (POSIX) → NetworkError (remote transport issues) +// - default → IOError (local I/O failures) +// systemError in FileErrorInfo should carry std::error_code when available. +FileError mapErrnoToFileError(int err) { + switch (err) { + case ENOSPC: #if defined(__unix__) || defined(__APPLE__) - case EDQUOT: // Disk quota exceeded (POSIX) + case EDQUOT: // Disk quota exceeded (POSIX) #endif - return FileError::DiskFull; - case EACCES: - case EPERM: - return FileError::AccessDenied; - case ENOENT: - return FileError::FileNotFound; - case EINVAL: - case ENAMETOOLONG: + return FileError::DiskFull; + case EACCES: + case EPERM: + return FileError::AccessDenied; + case ENOENT: + return FileError::FileNotFound; + case EINVAL: + case ENAMETOOLONG: #if defined(__unix__) || defined(__APPLE__) - case EISDIR: + case EISDIR: #endif - return FileError::InvalidPath; + return FileError::InvalidPath; #if defined(__unix__) || defined(__APPLE__) - case ENETUNREACH: - case ENETDOWN: - case ETIMEDOUT: - return FileError::NetworkError; + case ENETUNREACH: + case ENETDOWN: + case ETIMEDOUT: + return FileError::NetworkError; #endif - default: - return FileError::IOError; - } + default: + return FileError::IOError; } +} - // Check if path points to a special file (FIFO, device, socket) - bool isSpecialFile(const std::filesystem::path& p) { - std::error_code ec; - auto status = std::filesystem::status(p, ec); - if (ec) return false; +// Check if path points to a special file (FIFO, device, socket) +bool isSpecialFile(const std::filesystem::path& p) { + std::error_code ec; + auto status = std::filesystem::status(p, ec); + if (ec) return false; - return std::filesystem::is_block_file(status) || - std::filesystem::is_character_file(status) || - std::filesystem::is_fifo(status) || - std::filesystem::is_socket(status); - } + return std::filesystem::is_block_file(status) || std::filesystem::is_character_file(status) || + std::filesystem::is_fifo(status) || std::filesystem::is_socket(status); +} - // Create secure temporary file path - std::filesystem::path createSecureTempPath(const std::filesystem::path& dir, - const std::string& base) { +// Create secure temporary file path +std::filesystem::path createSecureTempPath(const std::filesystem::path& dir, const std::string& base) { #if defined(__unix__) || defined(__APPLE__) - // Use mkstemp for secure temp file creation (avoid mutating std::string buffer) - std::string tmpl = (dir / (base + ".XXXXXX")).string(); - std::vector buf(tmpl.begin(), tmpl.end()); - buf.push_back('\0'); - int fd = ::mkstemp(buf.data()); - if (fd < 0) { - // Fallback to random if mkstemp fails - return dir / (base + ".tmp" + std::to_string(std::random_device{}())); - } - ::close(fd); // We'll reopen with fstream - return std::filesystem::path(buf.data()); -#else - // Windows: use random device + // Use mkstemp for secure temp file creation (avoid mutating std::string buffer) + std::string tmpl = (dir / (base + ".XXXXXX")).string(); + std::vector buf(tmpl.begin(), tmpl.end()); + buf.push_back('\0'); + int fd = ::mkstemp(buf.data()); + if (fd < 0) { + // Fallback to random if mkstemp fails return dir / (base + ".tmp" + std::to_string(std::random_device{}())); -#endif } + ::close(fd); // We'll reopen with fstream + return std::filesystem::path(buf.data()); +#else + // Windows: use random device + return dir / (base + ".tmp" + std::to_string(std::random_device{}())); +#endif +} - // Check current process permissions (platform-specific) - bool checkCurrentProcessPermissions(const std::filesystem::path& p, - bool& canRead, bool& canWrite, bool& canExec) { +// Check current process permissions (platform-specific) +bool checkCurrentProcessPermissions(const std::filesystem::path& p, bool& canRead, bool& canWrite, bool& canExec) { #if defined(__unix__) || defined(__APPLE__) - // Use access() for current process permissions - canRead = (::access(p.c_str(), R_OK) == 0); - canWrite = (::access(p.c_str(), W_OK) == 0); - canExec = (::access(p.c_str(), X_OK) == 0); - return true; + // Use access() for current process permissions + canRead = (::access(p.c_str(), R_OK) == 0); + canWrite = (::access(p.c_str(), W_OK) == 0); + canExec = (::access(p.c_str(), X_OK) == 0); + return true; #else - // Windows: use std::filesystem permissions (checks if anyone can access) - std::error_code ec; - auto st = std::filesystem::status(p, ec); - if (ec) { - canRead = canWrite = canExec = false; - return false; - } - auto perms = st.permissions(); - auto has = [&](std::filesystem::perms perm) { - return (perms & perm) != std::filesystem::perms::none; - }; - canRead = has(std::filesystem::perms::owner_read) || - has(std::filesystem::perms::group_read) || - has(std::filesystem::perms::others_read); - canWrite = has(std::filesystem::perms::owner_write) || - has(std::filesystem::perms::group_write) || - has(std::filesystem::perms::others_write); - canExec = has(std::filesystem::perms::owner_exec) || - has(std::filesystem::perms::group_exec) || - has(std::filesystem::perms::others_exec); - return true; -#endif + // Windows: use std::filesystem permissions (checks if anyone can access) + std::error_code ec; + auto st = std::filesystem::status(p, ec); + if (ec) { + canRead = canWrite = canExec = false; + return false; } + auto perms = st.permissions(); + auto has = [&](std::filesystem::perms perm) { return (perms & perm) != std::filesystem::perms::none; }; + canRead = has(std::filesystem::perms::owner_read) || has(std::filesystem::perms::group_read) || + has(std::filesystem::perms::others_read); + canWrite = has(std::filesystem::perms::owner_write) || has(std::filesystem::perms::group_write) || + has(std::filesystem::perms::others_write); + canExec = has(std::filesystem::perms::owner_exec) || has(std::filesystem::perms::group_exec) || + has(std::filesystem::perms::others_exec); + return true; +#endif } +} // namespace // POSIX lock-file helpers and directory fsync for durability -namespace { +namespace +{ #if defined(__unix__) || defined(__APPLE__) - struct PosixLockFile { - int fd = -1; - std::filesystem::path path; - ~PosixLockFile() { - if (fd >= 0) { - // Best-effort unlock then close - ::flock(fd, LOCK_UN); - ::close(fd); - } +struct PosixLockFile +{ + int fd = -1; + std::filesystem::path path; + ~PosixLockFile() { + if (fd >= 0) { + // Best-effort unlock then close + ::flock(fd, LOCK_UN); + ::close(fd); } - }; + } +}; - static std::unique_ptr acquireSiblingLockPOSIX( - const std::filesystem::path& dst, - std::chrono::milliseconds timeout, - const std::string& suffix, - std::error_code& ecOut, - bool& timedOut) - { - using clock = std::chrono::steady_clock; - timedOut = false; - ecOut.clear(); - auto lockPath = dst.string() + suffix; - int fd = ::open(lockPath.c_str(), O_CREAT | O_CLOEXEC | O_RDWR, 0600); - if (fd < 0) { ecOut = std::error_code(errno, std::generic_category()); return nullptr; } - auto token = std::make_unique(); - token->fd = fd; - token->path = lockPath; - - auto until = clock::now() + timeout; - for (;;) { - if (::flock(fd, LOCK_EX | LOCK_NB) == 0) { - return token; - } - int e = errno; - if (e != EWOULDBLOCK && e != EAGAIN) { - ecOut = std::error_code(e, std::generic_category()); - return nullptr; - } - if (clock::now() >= until) { - timedOut = true; - return nullptr; - } - std::this_thread::sleep_for(std::chrono::milliseconds(10)); +static std::unique_ptr acquireSiblingLockPOSIX(const std::filesystem::path& dst, + std::chrono::milliseconds timeout, + const std::string& suffix, std::error_code& ecOut, + bool& timedOut) { + using clock = std::chrono::steady_clock; + timedOut = false; + ecOut.clear(); + auto lockPath = dst.string() + suffix; + int fd = ::open(lockPath.c_str(), O_CREAT | O_CLOEXEC | O_RDWR, 0600); + if (fd < 0) { + ecOut = std::error_code(errno, std::generic_category()); + return nullptr; + } + auto token = std::make_unique(); + token->fd = fd; + token->path = lockPath; + + auto until = clock::now() + timeout; + for (;;) { + if (::flock(fd, LOCK_EX | LOCK_NB) == 0) { + return token; } + int e = errno; + if (e != EWOULDBLOCK && e != EAGAIN) { + ecOut = std::error_code(e, std::generic_category()); + return nullptr; + } + if (clock::now() >= until) { + timedOut = true; + return nullptr; + } + std::this_thread::sleep_for(std::chrono::milliseconds(10)); } +} - static bool fsyncParentDirectoryPOSIX(const std::filesystem::path& p, std::error_code& ecOut) { - ecOut.clear(); - auto parent = p.parent_path(); - if (parent.empty()) return true; - int dfd = ::open(parent.c_str(), O_RDONLY); - if (dfd < 0) { ecOut = std::error_code(errno, std::generic_category()); return false; } - bool ok = (::fsync(dfd) == 0); - int e = ok ? 0 : errno; - ::close(dfd); - if (!ok) ecOut = std::error_code(e, std::generic_category()); - return ok; +static bool fsyncParentDirectoryPOSIX(const std::filesystem::path& p, std::error_code& ecOut) { + ecOut.clear(); + auto parent = p.parent_path(); + if (parent.empty()) return true; + int dfd = ::open(parent.c_str(), O_RDONLY); + if (dfd < 0) { + ecOut = std::error_code(errno, std::generic_category()); + return false; } -#endif + bool ok = (::fsync(dfd) == 0); + int e = ok ? 0 : errno; + ::close(dfd); + if (!ok) ecOut = std::error_code(e, std::generic_category()); + return ok; } +#endif +} // namespace // Concrete FileStream implementation for local files -class LocalFileStream : public FileStream { +class LocalFileStream : public FileStream +{ private: mutable std::fstream _stream; std::string _path; StreamOptions::Mode _mode; mutable bool _failFlag = false; - + public: - LocalFileStream(const std::string& path, StreamOptions::Mode mode) - : _path(path), _mode(mode) { + LocalFileStream(const std::string& path, StreamOptions::Mode mode) : _path(path), _mode(mode) { std::ios_base::openmode flags = std::ios::binary; - + switch (mode) { case StreamOptions::Read: flags |= std::ios::in; @@ -274,60 +273,60 @@ class LocalFileStream : public FileStream { flags |= std::ios::in | std::ios::out; break; } - + _stream.open(path, flags); if (!_stream.is_open()) { _failFlag = true; } } - + ~LocalFileStream() override { if (_stream.is_open()) { _stream.close(); } } - + IoResult read(std::span buffer) override { IoResult result; - + if (!good() || buffer.empty()) { result.error = FileError::IOError; return result; } - + _stream.read(reinterpret_cast(buffer.data()), buffer.size()); result.bytesTransferred = static_cast(_stream.gcount()); result.complete = (_stream.gcount() == static_cast(buffer.size())) || _stream.eof(); - + if (_stream.bad()) { result.error = FileError::IOError; } - + return result; } - + IoResult write(std::span data) override { IoResult result; - + if (!good() || data.empty()) { result.error = FileError::IOError; return result; } - + auto posBefore = _stream.tellp(); _stream.write(reinterpret_cast(data.data()), data.size()); auto posAfter = _stream.tellp(); - + if (_stream.good()) { result.bytesTransferred = static_cast(posAfter - posBefore); result.complete = (result.bytesTransferred == data.size()); } else { result.error = FileError::IOError; } - + return result; } - + bool seek(int64_t offset, std::ios_base::seekdir dir) override { if (_mode == StreamOptions::Read || _mode == StreamOptions::ReadWrite) { _stream.seekg(offset, dir); @@ -337,7 +336,7 @@ class LocalFileStream : public FileStream { } return _stream.good(); } - + int64_t tell() const override { if (_mode == StreamOptions::Read || _mode == StreamOptions::ReadWrite) { return static_cast(_stream.tellg()); @@ -347,20 +346,32 @@ class LocalFileStream : public FileStream { } return -1; } - - bool good() const override { return _stream.good() && !_failFlag; } - bool eof() const override { return _stream.eof(); } - bool fail() const override { return _stream.fail() || _failFlag; } - void flush() override { _stream.flush(); } - void close() override { _stream.close(); } - std::string path() const override { return _path; } + + bool good() const override { + return _stream.good() && !_failFlag; + } + bool eof() const override { + return _stream.eof(); + } + bool fail() const override { + return _stream.fail() || _failFlag; + } + void flush() override { + _stream.flush(); + } + void close() override { + _stream.close(); + } + std::string path() const override { + return _path; + } }; // LocalFileSystemBackend implementation -LocalFileSystemBackend::LocalFileSystemBackend() { -} +LocalFileSystemBackend::LocalFileSystemBackend() {} -FileOperationHandle LocalFileSystemBackend::submitWork(const std::string& path, +FileOperationHandle LocalFileSystemBackend::submitWork( + const std::string& path, std::function work, const ExecContext& ctx) { if (!_vfs) { @@ -374,7 +385,9 @@ FileOperationHandle LocalFileSystemBackend::submitWork(const std::string& path, // If we are already executing within the same WorkContractGroup, run inline to avoid nested submission if (ctx.group == _vfs->_group) { auto st = std::make_shared(); - st->progress = [grp=_vfs->_group]() { if (grp) grp->executeAllBackgroundWork(); }; + st->progress = [grp = _vfs->_group]() { + if (grp) grp->executeAllBackgroundWork(); + }; try { st->st.store(FileOpStatus::Running, std::memory_order_release); work(*st, path, ctx); @@ -395,22 +408,21 @@ FileOperationHandle LocalFileSystemBackend::submitWork(const std::string& path, } // Otherwise use VFS's submit method to execute work asynchronously - return _vfs->submit(path, [work, ctx](FileOperationHandle::OpState& s, const std::string& p, const ExecContext& /*outer*/){ - work(s, p, ctx); - }); + return _vfs->submit(path, [work, ctx](FileOperationHandle::OpState& s, const std::string& p, + const ExecContext& /*outer*/) { work(s, p, ctx); }); } -FileOperationHandle LocalFileSystemBackend::submitWork(const std::string& path, - std::function work) { +FileOperationHandle LocalFileSystemBackend::submitWork( + const std::string& path, std::function work) { // Delegate to context-aware overload with no executing group (top-level call) - ExecContext ctx{ nullptr }; - return submitWork(path, - [work](FileOperationHandle::OpState& s, const std::string& p, const ExecContext&){ work(s, p); }, - ctx); + ExecContext ctx{nullptr}; + return submitWork( + path, [work](FileOperationHandle::OpState& s, const std::string& p, const ExecContext&) { work(s, p); }, ctx); } // Synchronous operations for FileHandle via submitSerialized -void LocalFileSystemBackend::doWriteFile(FileOperationHandle::OpState& s, const std::string& p, std::span data, WriteOptions options) { +void LocalFileSystemBackend::doWriteFile(FileOperationHandle::OpState& s, const std::string& p, + std::span data, WriteOptions options) { // Check for special files (FIFO, device, socket) that shouldn't be written via file operations if (isSpecialFile(p)) { s.setError(FileError::InvalidPath, "Cannot perform file operations on special files (FIFO, device, socket)", p); @@ -439,7 +451,14 @@ void LocalFileSystemBackend::doWriteFile(FileOperationHandle::OpState& s, const }); // Optional cross-process serialization via lock directory - struct LockDirGuard { std::filesystem::path path; ~LockDirGuard(){ std::error_code rec; if(!path.empty()) std::filesystem::remove(path, rec);} }; + struct LockDirGuard + { + std::filesystem::path path; + ~LockDirGuard() { + std::error_code rec; + if (!path.empty()) std::filesystem::remove(path, rec); + } + }; std::optional lockGuard; { bool useLock = options.useLockFile.value_or(_vfs ? _vfs->_cfg.defaultUseLockFile : false); @@ -467,17 +486,25 @@ void LocalFileSystemBackend::doWriteFile(FileOperationHandle::OpState& s, const auto ms = std::chrono::duration_cast(now).count(); info << "ts=" << ms << "\n"; } - } catch (...) { /* ignore */ } + } catch (...) { /* ignore */ + } break; } if (std::filesystem::exists(lockPath)) { // Check for stale lock based on age >= timeout - std::error_code tec2; auto lwt = std::filesystem::last_write_time(lockPath, tec2); + std::error_code tec2; + auto lwt = std::filesystem::last_write_time(lockPath, tec2); if (!tec2) { - auto age = std::chrono::system_clock::now() - std::chrono::time_point_cast(lwt - std::filesystem::file_time_type::clock::now() + std::chrono::system_clock::now()); + auto age = + std::chrono::system_clock::now() - + std::chrono::time_point_cast( + lwt - std::filesystem::file_time_type::clock::now() + std::chrono::system_clock::now()); if (age >= timeout) { - std::error_code rec; std::filesystem::remove_all(lockPath, rec); - if (!rec) { continue; } + std::error_code rec; + std::filesystem::remove_all(lockPath, rec); + if (!rec) { + continue; + } } } // else: not stale yet; fall through to timeout sleep @@ -488,7 +515,10 @@ void LocalFileSystemBackend::doWriteFile(FileOperationHandle::OpState& s, const return; } if (std::chrono::steady_clock::now() - start >= timeout) { - s.setError(FileError::Timeout, std::string("Lock-file acquisition timed out after ") + std::to_string(timeout.count()) + " ms (lock=" + lockPath.string() + ")", p); + s.setError(FileError::Timeout, + std::string("Lock-file acquisition timed out after ") + std::to_string(timeout.count()) + + " ms (lock=" + lockPath.string() + ")", + p); s.complete(FileOpStatus::Failed); return; } @@ -548,12 +578,22 @@ void LocalFileSystemBackend::doWriteFile(FileOperationHandle::OpState& s, const switch (werr) { case ERROR_INVALID_NAME: case ERROR_FILENAME_EXCED_RANGE: - case ERROR_PATH_NOT_FOUND: code = FileError::InvalidPath; break; + case ERROR_PATH_NOT_FOUND: + code = FileError::InvalidPath; + break; case ERROR_SHARING_VIOLATION: - case ERROR_LOCK_VIOLATION: code = FileError::Conflict; break; - case ERROR_ACCESS_DENIED: code = FileError::AccessDenied; break; - case ERROR_DISK_FULL: code = FileError::DiskFull; break; - default: code = FileError::IOError; break; + case ERROR_LOCK_VIOLATION: + code = FileError::Conflict; + break; + case ERROR_ACCESS_DENIED: + code = FileError::AccessDenied; + break; + case ERROR_DISK_FULL: + code = FileError::DiskFull; + break; + default: + code = FileError::IOError; + break; } s.setError(code, "Cannot open file for writing", p, std::error_code((int)werr, std::system_category())); s.complete(FileOpStatus::Failed); @@ -565,7 +605,8 @@ void LocalFileSystemBackend::doWriteFile(FileOperationHandle::OpState& s, const if (stEc) { s.setError(FileError::InvalidPath, "Invalid path or unsupported filename", p, stEc); } else { - s.setError(FileError::AccessDenied, "Cannot open file for writing", p, std::error_code(saved_errno, std::generic_category())); + s.setError(FileError::AccessDenied, "Cannot open file for writing", p, + std::error_code(saved_errno, std::generic_category())); } } s.complete(FileOpStatus::Failed); @@ -580,13 +621,13 @@ void LocalFileSystemBackend::doWriteFile(FileOperationHandle::OpState& s, const size_t wrote = data.size(); if (options.ensureFinalNewline.value_or(false) && !options.append && (options.truncate || options.offset == 0)) { - #if defined(_WIN32) +#if defined(_WIN32) const char* eol = "\r\n"; const size_t eolLen = 2; - #else +#else const char* eol = "\n"; const size_t eolLen = 1; - #endif +#endif bool endsWithLF = (!data.empty() && reinterpret_cast(data.data())[data.size() - 1] == '\n'); if (!endsWithLF) { out.write(eol, static_cast(eolLen)); @@ -612,7 +653,8 @@ void LocalFileSystemBackend::doWriteFile(FileOperationHandle::OpState& s, const if (::fcntl(fd, F_FULLFSYNC) != 0) { const int sync_errno = errno; ::close(fd); - s.setError(FileError::IOError, "F_FULLFSYNC failed", p, std::error_code(sync_errno, std::generic_category())); + s.setError(FileError::IOError, "F_FULLFSYNC failed", p, + std::error_code(sync_errno, std::generic_category())); s.complete(FileOpStatus::Failed); return; } @@ -627,7 +669,8 @@ void LocalFileSystemBackend::doWriteFile(FileOperationHandle::OpState& s, const if (::fdatasync(fd) != 0) { const int sync_errno = errno; ::close(fd); - s.setError(FileError::IOError, "fdatasync failed", p, std::error_code(sync_errno, std::generic_category())); + s.setError(FileError::IOError, "fdatasync failed", p, + std::error_code(sync_errno, std::generic_category())); s.complete(FileOpStatus::Failed); return; } @@ -681,14 +724,16 @@ void LocalFileSystemBackend::doCreateFile(FileOperationHandle::OpState& s, const std::ofstream out(osPath, std::ios::out | std::ios::binary); if (!out) { const int saved_errno = errno; // Capture errno immediately - s.setError(FileError::AccessDenied, "Cannot create file", p, std::error_code(saved_errno, std::generic_category())); + s.setError(FileError::AccessDenied, "Cannot create file", p, + std::error_code(saved_errno, std::generic_category())); s.complete(FileOpStatus::Failed); } else { s.complete(FileOpStatus::Complete); } } -void LocalFileSystemBackend::doWriteLine(FileOperationHandle::OpState& s, const std::string& p, size_t lineNumber, std::string_view line) { +void LocalFileSystemBackend::doWriteLine(FileOperationHandle::OpState& s, const std::string& p, size_t lineNumber, + std::string_view line) { std::error_code ec; const bool createParents = _vfs ? _vfs->_cfg.defaultCreateParentDirs : false; if (createParents) { @@ -723,44 +768,90 @@ void LocalFileSystemBackend::doWriteLine(FileOperationHandle::OpState& s, const const auto& osTemp = tempPath; #endif std::ofstream out(osTemp, std::ios::out | std::ios::binary | std::ios::trunc); - if (!out) { s.setError(FileError::IOError, "Failed to create temp file for writeLine", p); s.complete(FileOpStatus::Failed); return; } + if (!out) { + s.setError(FileError::IOError, "Failed to create temp file for writeLine", p); + s.complete(FileOpStatus::Failed); + return; + } std::string eol; #if defined(_WIN32) eol = "\r\n"; #else eol = "\n"; #endif - size_t current = 0; bool wroteReplacement = false; std::string buf; + size_t current = 0; + bool wroteReplacement = false; + std::string buf; if (in) { while (std::getline(in, buf)) { if (!buf.empty() && buf.back() == '\r') buf.pop_back(); - if (current == lineNumber) { out.write(line.data(), static_cast(line.size())); out.write(eol.data(), static_cast(eol.size())); wroteReplacement = true; } - else { out.write(buf.data(), static_cast(buf.size())); out.write(eol.data(), static_cast(eol.size())); } + if (current == lineNumber) { + out.write(line.data(), static_cast(line.size())); + out.write(eol.data(), static_cast(eol.size())); + wroteReplacement = true; + } else { + out.write(buf.data(), static_cast(buf.size())); + out.write(eol.data(), static_cast(eol.size())); + } ++current; } } if (!wroteReplacement) { - while (current < lineNumber) { out.write(eol.data(), static_cast(eol.size())); ++current; } + while (current < lineNumber) { + out.write(eol.data(), static_cast(eol.size())); + ++current; + } out.write(line.data(), static_cast(line.size())); // Preserve final newline policy: if original had a trailing newline, keep one out.write(eol.data(), static_cast(eol.size())); } - out.flush(); out.close(); + out.flush(); + out.close(); #if defined(_WIN32) // Windows atomic replace with retries - bool replaced = false; const int maxRetries = 10; const DWORD retryDelayMs = 25; std::wstring wTarget = std::filesystem::path(p).wstring(); std::wstring wTemp = std::filesystem::path(tempPath).wstring(); + bool replaced = false; + const int maxRetries = 10; + const DWORD retryDelayMs = 25; + std::wstring wTarget = std::filesystem::path(p).wstring(); + std::wstring wTemp = std::filesystem::path(tempPath).wstring(); for (int i = 0; i < maxRetries; ++i) { - if (MoveFileExW(wTemp.c_str(), wTarget.c_str(), MOVEFILE_REPLACE_EXISTING | MOVEFILE_COPY_ALLOWED | MOVEFILE_WRITE_THROUGH)) { replaced = true; break; } - DWORD error = GetLastError(); if (error == ERROR_SHARING_VIOLATION || error == ERROR_ACCESS_DENIED || error == ERROR_LOCK_VIOLATION) { Sleep(retryDelayMs); continue; } else { break; } + if (MoveFileExW(wTemp.c_str(), wTarget.c_str(), + MOVEFILE_REPLACE_EXISTING | MOVEFILE_COPY_ALLOWED | MOVEFILE_WRITE_THROUGH)) { + replaced = true; + break; + } + DWORD error = GetLastError(); + if (error == ERROR_SHARING_VIOLATION || error == ERROR_ACCESS_DENIED || error == ERROR_LOCK_VIOLATION) { + Sleep(retryDelayMs); + continue; + } else { + break; + } + } + if (!replaced) { + std::error_code rec; + std::filesystem::remove(tempPath, rec); + s.setError(FileError::IOError, "Failed to replace file (streaming writeLine)", p); + s.complete(FileOpStatus::Failed); + return; } - if (!replaced) { std::error_code rec; std::filesystem::remove(tempPath, rec); s.setError(FileError::IOError, "Failed to replace file (streaming writeLine)", p); s.complete(FileOpStatus::Failed); return; } #else - std::error_code rec; std::filesystem::rename(tempPath, p, rec); if (rec) { std::filesystem::remove(tempPath, rec); s.setError(FileError::IOError, "Failed to replace file (streaming writeLine)", p, rec); s.complete(FileOpStatus::Failed); return; } + std::error_code rec; + std::filesystem::rename(tempPath, p, rec); + if (rec) { + std::filesystem::remove(tempPath, rec); + s.setError(FileError::IOError, "Failed to replace file (streaming writeLine)", p, rec); + s.complete(FileOpStatus::Failed); + return; + } #endif // Report bytes written - std::error_code sec; auto finalSize = std::filesystem::file_size(p, sec); s.wrote = sec ? 0 : finalSize; - s.complete(FileOpStatus::Complete); return; - } while(false); + std::error_code sec; + auto finalSize = std::filesystem::file_size(p, sec); + s.wrote = sec ? 0 : finalSize; + s.complete(FileOpStatus::Complete); + return; + } while (false); #endif auto targetPath = std::filesystem::path(p); @@ -771,8 +862,10 @@ void LocalFileSystemBackend::doWriteLine(FileOperationHandle::OpState& s, const #if defined(__unix__) || defined(__APPLE__) std::unique_ptr lockToken; if (_vfs && _vfs->_cfg.defaultUseLockFile) { - std::error_code lockEc; bool timedOut = false; - lockToken = acquireSiblingLockPOSIX(targetPath, _vfs->_cfg.lockAcquireTimeout, _vfs->_cfg.lockSuffix, lockEc, timedOut); + std::error_code lockEc; + bool timedOut = false; + lockToken = + acquireSiblingLockPOSIX(targetPath, _vfs->_cfg.lockAcquireTimeout, _vfs->_cfg.lockSuffix, lockEc, timedOut); if (!lockToken) { if (timedOut) { s.setError(FileError::Timeout, "Lock acquisition timed out", p); @@ -800,7 +893,8 @@ void LocalFileSystemBackend::doWriteLine(FileOperationHandle::OpState& s, const { std::ifstream inBin(p, std::ios::in | std::ios::binary); if (inBin) { - std::ostringstream ss; ss << inBin.rdbuf(); + std::ostringstream ss; + ss << inBin.rdbuf(); data = ss.str(); if (data.empty() || data.back() == '\n') { originalFinalNewline = true; @@ -808,14 +902,22 @@ void LocalFileSystemBackend::doWriteLine(FileOperationHandle::OpState& s, const size_t crlf = 0, lf = 0; for (size_t i = 0; i < data.size(); ++i) { if (data[i] == '\n') { - if (i > 0 && data[i-1] == '\r') ++crlf; else ++lf; + if (i > 0 && data[i - 1] == '\r') + ++crlf; + else + ++lf; } } - if (crlf > lf) eol = "\r\n"; else if (lf > crlf) eol = "\n"; else eol = platformDefaultEol; + if (crlf > lf) + eol = "\r\n"; + else if (lf > crlf) + eol = "\n"; + else + eol = platformDefaultEol; } else { // File does not exist yet: adopt platform default EOL and default to final newline present eol = platformDefaultEol; - originalFinalNewline = true; // backend default for new files: end with a newline + originalFinalNewline = true; // backend default for new files: end with a newline } } if (eol.empty()) eol = platformDefaultEol; @@ -917,16 +1019,17 @@ FileOperationHandle LocalFileSystemBackend::readFile(const std::string& path, Re return submitWork(path, [options](FileOperationHandle::OpState& s, const std::string& p) { // Check for special files (FIFO, device, socket) if (isSpecialFile(p)) { - s.setError(FileError::InvalidPath, "Cannot perform file operations on special files (FIFO, device, socket)", p); + s.setError(FileError::InvalidPath, "Cannot perform file operations on special files (FIFO, device, socket)", + p); s.complete(FileOpStatus::Failed); return; } - #if defined(_WIN32) +#if defined(_WIN32) auto osPathIn = toWinLongPath(p); - #else +#else const auto& osPathIn = p; - #endif +#endif std::ifstream in(osPathIn, std::ios::in | std::ios::binary); if (!in) { const int saved_errno = errno; // Capture errno immediately @@ -944,28 +1047,40 @@ FileOperationHandle LocalFileSystemBackend::readFile(const std::string& path, Re switch (werr) { case ERROR_INVALID_NAME: case ERROR_FILENAME_EXCED_RANGE: - case ERROR_PATH_NOT_FOUND: code = FileError::InvalidPath; break; + case ERROR_PATH_NOT_FOUND: + code = FileError::InvalidPath; + break; case ERROR_SHARING_VIOLATION: - case ERROR_LOCK_VIOLATION: code = FileError::Conflict; break; - case ERROR_ACCESS_DENIED: code = FileError::AccessDenied; break; - case ERROR_DISK_FULL: code = FileError::DiskFull; break; - default: code = FileError::IOError; break; + case ERROR_LOCK_VIOLATION: + code = FileError::Conflict; + break; + case ERROR_ACCESS_DENIED: + code = FileError::AccessDenied; + break; + case ERROR_DISK_FULL: + code = FileError::DiskFull; + break; + default: + code = FileError::IOError; + break; } - s.setError(code, "Cannot open file for reading", p, std::error_code((int)werr, std::system_category())); + s.setError(code, "Cannot open file for reading", p, + std::error_code((int)werr, std::system_category())); } else #endif { - s.setError(FileError::AccessDenied, "Cannot open file for reading", p, std::error_code(saved_errno, std::generic_category())); + s.setError(FileError::AccessDenied, "Cannot open file for reading", p, + std::error_code(saved_errno, std::generic_category())); } } s.complete(FileOpStatus::Failed); return; } - + if (options.offset > 0) { in.seekg(options.offset, std::ios::beg); } - + if (options.length.has_value()) { const size_t requested = options.length.value(); s.bytes.resize(requested); @@ -983,30 +1098,31 @@ FileOperationHandle LocalFileSystemBackend::readFile(const std::string& path, Re s.bytes.resize(toRead); in.read(reinterpret_cast(s.bytes.data()), toRead); } - + s.complete(FileOpStatus::Complete); }); } -FileOperationHandle LocalFileSystemBackend::writeFile(const std::string& path, std::span data, WriteOptions options) { +FileOperationHandle LocalFileSystemBackend::writeFile(const std::string& path, std::span data, + WriteOptions options) { // Route backend writes through VFS submitSerialized to honor advisory fallback policy and scope mapping. // Delegate the actual I/O to doWriteFile, which executes synchronously under the serialized section. if (_vfs) { auto buf = std::vector(data.begin(), data.end()); - return _vfs->submitSerialized(path, - [this, buf = std::move(buf), options] - (FileOperationHandle::OpState& s, std::shared_ptr /*backend*/, const std::string& p, const ExecContext& /*ctx*/) mutable { + return _vfs->submitSerialized( + path, [this, buf = std::move(buf), options](FileOperationHandle::OpState& s, + std::shared_ptr /*backend*/, + const std::string& p, const ExecContext& /*ctx*/) mutable { this->doWriteFile(s, p, std::span(buf.data(), buf.size()), options); - } - ); + }); } // Fallback: no VFS associated (rare). Execute synchronously via submitWork and doWriteFile. - return submitWork(path, [this, data = std::vector(data.begin(), data.end()), options] - (FileOperationHandle::OpState& s, const std::string& p) mutable { - + return submitWork(path, [this, data = std::vector(data.begin(), data.end()), options]( + FileOperationHandle::OpState& s, const std::string& p) mutable { // Check for special files (FIFO, device, socket) if (isSpecialFile(p)) { - s.setError(FileError::InvalidPath, "Cannot perform file operations on special files (FIFO, device, socket)", p); + s.setError(FileError::InvalidPath, "Cannot perform file operations on special files (FIFO, device, socket)", + p); s.complete(FileOpStatus::Failed); return; } @@ -1072,7 +1188,8 @@ FileOperationHandle LocalFileSystemBackend::writeFile(const std::string& path, s if (stEc) { s.setError(FileError::InvalidPath, "Invalid path or unsupported filename", p, stEc); } else { - s.setError(FileError::AccessDenied, "Cannot open file for writing", p, std::error_code(saved_errno, std::generic_category())); + s.setError(FileError::AccessDenied, "Cannot open file for writing", p, + std::error_code(saved_errno, std::generic_category())); } } s.complete(FileOpStatus::Failed); @@ -1089,14 +1206,15 @@ FileOperationHandle LocalFileSystemBackend::writeFile(const std::string& path, s // If ensureFinalNewline is requested and this is a whole-file write (truncate or offset==0 and not append), // add a platform-default newline if the payload does not already end with '\n'. - if (options.ensureFinalNewline.value_or(false) && !options.append && (options.truncate || options.offset == 0)) { - #if defined(_WIN32) + if (options.ensureFinalNewline.value_or(false) && !options.append && + (options.truncate || options.offset == 0)) { +#if defined(_WIN32) const char* eol = "\r\n"; const size_t eolLen = 2; - #else +#else const char* eol = "\n"; const size_t eolLen = 1; - #endif +#endif bool endsWithLF = (!data.empty() && reinterpret_cast(data.data())[data.size() - 1] == '\n'); if (!endsWithLF) { out.write(eol, static_cast(eolLen)); @@ -1122,7 +1240,8 @@ FileOperationHandle LocalFileSystemBackend::writeFile(const std::string& path, s if (::fcntl(fd, F_FULLFSYNC) != 0) { const int sync_errno = errno; ::close(fd); - s.setError(FileError::IOError, "F_FULLFSYNC failed", p, std::error_code(sync_errno, std::generic_category())); + s.setError(FileError::IOError, "F_FULLFSYNC failed", p, + std::error_code(sync_errno, std::generic_category())); s.complete(FileOpStatus::Failed); return; } @@ -1168,10 +1287,9 @@ FileOperationHandle LocalFileSystemBackend::writeFile(const std::string& path, s FileOperationHandle LocalFileSystemBackend::deleteFile(const std::string& path) { return submitWork(path, [](FileOperationHandle::OpState& s, const std::string& p) { - std::error_code ec; std::filesystem::remove(p, ec); - + if (ec && std::filesystem::exists(p)) { s.setError(FileError::IOError, "Failed to delete file", p, ec); s.complete(FileOpStatus::Failed); @@ -1183,7 +1301,6 @@ FileOperationHandle LocalFileSystemBackend::deleteFile(const std::string& path) FileOperationHandle LocalFileSystemBackend::createFile(const std::string& path) { return submitWork(path, [this](FileOperationHandle::OpState& s, const std::string& p) { - std::error_code ec; const bool createParents = _vfs ? _vfs->_cfg.defaultCreateParentDirs : false; if (createParents) { @@ -1197,7 +1314,7 @@ FileOperationHandle LocalFileSystemBackend::createFile(const std::string& path) } } } - + std::ofstream out(p, std::ios::out | std::ios::binary | std::ios::trunc); if (!out) { std::error_code stEc; @@ -1205,7 +1322,8 @@ FileOperationHandle LocalFileSystemBackend::createFile(const std::string& path) if (stEc) { s.setError(FileError::InvalidPath, "Invalid path or unsupported filename", p, stEc); } else { - s.setError(FileError::AccessDenied, "Cannot create file", p, std::error_code(errno, std::generic_category())); + s.setError(FileError::AccessDenied, "Cannot create file", p, + std::error_code(errno, std::generic_category())); } s.complete(FileOpStatus::Failed); } else { @@ -1255,8 +1373,7 @@ FileOperationHandle LocalFileSystemBackend::getMetadata(const std::string& path) if (!ec) { // Convert file_time_type to system_clock::time_point auto sctp = std::chrono::time_point_cast( - lwt - std::filesystem::file_time_type::clock::now() + std::chrono::system_clock::now() - ); + lwt - std::filesystem::file_time_type::clock::now() + std::chrono::system_clock::now()); meta.lastModified = sctp; } } @@ -1283,13 +1400,10 @@ FileOperationHandle LocalFileSystemBackend::getMetadataBatch(const BatchMetadata meta.path = path; WIN32_FIND_DATAW findData; - HANDLE hFind = FindFirstFileExW( - std::filesystem::path(path).wstring().c_str(), - FindExInfoBasic, // Don't retrieve short names - faster! - &findData, - FindExSearchNameMatch, - nullptr, - FIND_FIRST_EX_LARGE_FETCH // Optimize for batch queries + HANDLE hFind = FindFirstFileExW(std::filesystem::path(path).wstring().c_str(), + FindExInfoBasic, // Don't retrieve short names - faster! + &findData, FindExSearchNameMatch, nullptr, + FIND_FIRST_EX_LARGE_FETCH // Optimize for batch queries ); if (hFind == INVALID_HANDLE_VALUE) { @@ -1300,8 +1414,7 @@ FileOperationHandle LocalFileSystemBackend::getMetadataBatch(const BatchMetadata meta.exists = true; meta.isDirectory = (findData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0; - meta.isRegularFile = !meta.isDirectory && - (findData.dwFileAttributes & FILE_ATTRIBUTE_NORMAL) != 0; + meta.isRegularFile = !meta.isDirectory && (findData.dwFileAttributes & FILE_ATTRIBUTE_NORMAL) != 0; meta.isSymlink = (findData.dwFileAttributes & FILE_ATTRIBUTE_REPARSE_POINT) != 0; // Calculate file size @@ -1394,7 +1507,7 @@ FileOperationHandle LocalFileSystemBackend::createDirectory(const std::string& p return submitWork(path, [](FileOperationHandle::OpState& s, const std::string& p) { std::error_code ec; std::filesystem::create_directories(p, ec); - + if (ec) { s.setError(FileError::IOError, "Cannot create directory", p, ec); s.complete(FileOpStatus::Failed); @@ -1408,7 +1521,7 @@ FileOperationHandle LocalFileSystemBackend::removeDirectory(const std::string& p return submitWork(path, [](FileOperationHandle::OpState& s, const std::string& p) { std::error_code ec; std::filesystem::remove_all(p, ec); - + if (ec) { s.setError(FileError::IOError, "Cannot remove directory", p, ec); s.complete(FileOpStatus::Failed); @@ -1472,25 +1585,19 @@ FileOperationHandle LocalFileSystemBackend::listDirectory(const std::string& pat // Get permissions auto perms = status.permissions(); - auto has = [&](std::filesystem::perms perm) { - return (perms & perm) != std::filesystem::perms::none; - }; - meta.readable = has(std::filesystem::perms::owner_read) || - has(std::filesystem::perms::group_read) || - has(std::filesystem::perms::others_read); - meta.writable = has(std::filesystem::perms::owner_write) || - has(std::filesystem::perms::group_write) || - has(std::filesystem::perms::others_write); - meta.executable = has(std::filesystem::perms::owner_exec) || - has(std::filesystem::perms::group_exec) || - has(std::filesystem::perms::others_exec); + auto has = [&](std::filesystem::perms perm) { return (perms & perm) != std::filesystem::perms::none; }; + meta.readable = has(std::filesystem::perms::owner_read) || has(std::filesystem::perms::group_read) || + has(std::filesystem::perms::others_read); + meta.writable = has(std::filesystem::perms::owner_write) || has(std::filesystem::perms::group_write) || + has(std::filesystem::perms::others_write); + meta.executable = has(std::filesystem::perms::owner_exec) || has(std::filesystem::perms::group_exec) || + has(std::filesystem::perms::others_exec); // Get last modified time auto lwt = fsEntry.last_write_time(ec); if (!ec) { auto sctp = std::chrono::time_point_cast( - lwt - std::filesystem::file_time_type::clock::now() + std::chrono::system_clock::now() - ); + lwt - std::filesystem::file_time_type::clock::now() + std::chrono::system_clock::now()); meta.lastModified = sctp; } @@ -1567,9 +1674,8 @@ FileOperationHandle LocalFileSystemBackend::listDirectory(const std::string& pat if (options.sortBy != ListDirectoryOptions::None) { switch (options.sortBy) { case ListDirectoryOptions::ByName: - std::sort(entries.begin(), entries.end(), [](const DirectoryEntry& a, const DirectoryEntry& b) { - return a.name < b.name; - }); + std::sort(entries.begin(), entries.end(), + [](const DirectoryEntry& a, const DirectoryEntry& b) { return a.name < b.name; }); break; case ListDirectoryOptions::BySize: std::sort(entries.begin(), entries.end(), [](const DirectoryEntry& a, const DirectoryEntry& b) { @@ -1609,18 +1715,18 @@ std::unique_ptr LocalFileSystemBackend::openStream(const std::string FileOperationHandle LocalFileSystemBackend::readLine(const std::string& path, size_t lineNumber) { return submitWork(path, [lineNumber](FileOperationHandle::OpState& s, const std::string& p) { - #if defined(_WIN32) +#if defined(_WIN32) auto osPathIn2 = toWinLongPath(p); - #else +#else const auto& osPathIn2 = p; - #endif +#endif std::ifstream in(osPathIn2, std::ios::in); if (!in) { s.setError(FileError::FileNotFound, "File not found or cannot be opened", p); s.complete(FileOpStatus::Failed); return; } - + std::string line; size_t currentLine = 0; while (std::getline(in, line)) { @@ -1636,17 +1742,17 @@ FileOperationHandle LocalFileSystemBackend::readLine(const std::string& path, si } currentLine++; } - + s.complete(FileOpStatus::Partial); // Line not found }); } -FileOperationHandle LocalFileSystemBackend::writeLine(const std::string& path, size_t lineNumber, std::string_view line) { - return submitWork(path, [this, lineNumber, line = std::string(line)] - (FileOperationHandle::OpState& s, const std::string& p) mutable { - +FileOperationHandle LocalFileSystemBackend::writeLine(const std::string& path, size_t lineNumber, + std::string_view line) { + return submitWork(path, [this, lineNumber, line = std::string(line)](FileOperationHandle::OpState& s, + const std::string& p) mutable { std::error_code ec; - + // Ensure destination parent directory exists if configured const bool createParents = _vfs ? _vfs->_cfg.defaultCreateParentDirs : false; if (createParents) { @@ -1660,13 +1766,13 @@ FileOperationHandle LocalFileSystemBackend::writeLine(const std::string& path, s } } } - + // Generate temporary filename in the same directory as destination for atomic replace auto targetPath = std::filesystem::path(p); auto dir = targetPath.parent_path(); auto base = targetPath.filename().string(); auto tempPath = createSecureTempPath(dir, base); - + // Determine existing file content and line-ending style std::string data; bool originalFinalNewline = false; @@ -1679,7 +1785,8 @@ FileOperationHandle LocalFileSystemBackend::writeLine(const std::string& path, s { std::ifstream inBin(p, std::ios::in | std::ios::binary); if (inBin) { - std::ostringstream ss; ss << inBin.rdbuf(); + std::ostringstream ss; + ss << inBin.rdbuf(); data = ss.str(); if (!data.empty()) { if (data.back() == '\n') { @@ -1690,16 +1797,24 @@ FileOperationHandle LocalFileSystemBackend::writeLine(const std::string& path, s size_t crlf = 0, lf = 0; for (size_t i = 0; i < data.size(); ++i) { if (data[i] == '\n') { - if (i > 0 && data[i-1] == '\r') ++crlf; else ++lf; + if (i > 0 && data[i - 1] == '\r') + ++crlf; + else + ++lf; } } - if (crlf > lf) eol = "\r\n"; else if (lf > crlf) eol = "\n"; else eol = platformDefaultEol; + if (crlf > lf) + eol = "\r\n"; + else if (lf > crlf) + eol = "\n"; + else + eol = platformDefaultEol; } else { eol = platformDefaultEol; } } if (eol.empty()) eol = platformDefaultEol; - + // Parse existing lines without EOLs std::vector linesVec; if (!data.empty()) { @@ -1720,7 +1835,7 @@ FileOperationHandle LocalFileSystemBackend::writeLine(const std::string& path, s linesVec.push_back(std::move(cur)); } } - + // Apply writeLine semantics if (lineNumber < linesVec.size()) { linesVec[lineNumber] = line; @@ -1733,7 +1848,7 @@ FileOperationHandle LocalFileSystemBackend::writeLine(const std::string& path, s linesVec.emplace_back(line); } } - + // Write to temp file using chosen EOL and preserving original final-newline presence { std::ofstream out(tempPath, std::ios::out | std::ios::trunc | std::ios::binary); @@ -1742,7 +1857,8 @@ FileOperationHandle LocalFileSystemBackend::writeLine(const std::string& path, s s.complete(FileOpStatus::Failed); return; } - // Decide final newline presence: if original had no content, default to true; otherwise preserve prior policy + // Decide final newline presence: if original had no content, default to true; otherwise preserve prior + // policy const bool finalNewline = data.empty() ? true : originalFinalNewline; for (size_t i = 0; i < linesVec.size(); ++i) { out.write(linesVec[i].data(), static_cast(linesVec[i].size())); @@ -1763,34 +1879,35 @@ FileOperationHandle LocalFileSystemBackend::writeLine(const std::string& path, s return; } } - - // Atomic replace/rename - #if defined(_WIN32) - // Use Windows-specific atomic rename with retry logic (to avoid sharing violations) - auto wsrc = tempPath.wstring(); - auto wdst = targetPath.wstring(); - - const int maxRetries = 50; - const int retryDelayMs = 10; - bool success = false; - for (int i = 0; i < maxRetries; ++i) { - if (MoveFileExW(wsrc.c_str(), wdst.c_str(), - MOVEFILE_REPLACE_EXISTING | MOVEFILE_WRITE_THROUGH) != 0) { - success = true; break; - } - DWORD error = GetLastError(); - if (error == ERROR_SHARING_VIOLATION || error == ERROR_ACCESS_DENIED || error == ERROR_LOCK_VIOLATION) { - Sleep(retryDelayMs); continue; - } + +// Atomic replace/rename +#if defined(_WIN32) + // Use Windows-specific atomic rename with retry logic (to avoid sharing violations) + auto wsrc = tempPath.wstring(); + auto wdst = targetPath.wstring(); + + const int maxRetries = 50; + const int retryDelayMs = 10; + bool success = false; + for (int i = 0; i < maxRetries; ++i) { + if (MoveFileExW(wsrc.c_str(), wdst.c_str(), MOVEFILE_REPLACE_EXISTING | MOVEFILE_WRITE_THROUGH) != 0) { + success = true; break; } - if (!success) { - std::filesystem::remove(tempPath, ec); - s.setError(FileError::IOError, "Failed to replace destination file", p); - s.complete(FileOpStatus::Failed); - return; + DWORD error = GetLastError(); + if (error == ERROR_SHARING_VIOLATION || error == ERROR_ACCESS_DENIED || error == ERROR_LOCK_VIOLATION) { + Sleep(retryDelayMs); + continue; } - #else + break; + } + if (!success) { + std::filesystem::remove(tempPath, ec); + s.setError(FileError::IOError, "Failed to replace destination file", p); + s.complete(FileOpStatus::Failed); + return; + } +#else std::filesystem::rename(tempPath, targetPath, ec); if (ec) { std::filesystem::remove(tempPath, ec); @@ -1798,14 +1915,15 @@ FileOperationHandle LocalFileSystemBackend::writeLine(const std::string& path, s s.complete(FileOpStatus::Failed); return; } - #endif - +#endif + s.wrote = line.size(); s.complete(FileOpStatus::Complete); }); } -FileOperationHandle LocalFileSystemBackend::copyFile(const std::string& src, const std::string& dst, const CopyOptions& options) { +FileOperationHandle LocalFileSystemBackend::copyFile(const std::string& src, const std::string& dst, + const CopyOptions& options) { return submitWork(src, [this, src, dst, options](FileOperationHandle::OpState& s, const std::string&) { std::error_code ec; @@ -1860,11 +1978,11 @@ FileOperationHandle LocalFileSystemBackend::copyFile(const std::string& src, con // Regular copy with progress callback if (options.progressCallback && fileSize > 0) { // Chunked copy with progress - #if defined(_WIN32) +#if defined(_WIN32) auto osPathSrc = toWinLongPath(src); - #else +#else const auto& osPathSrc = src; - #endif +#endif std::ifstream in(osPathSrc, std::ios::binary); std::ofstream out(dst, std::ios::binary | std::ios::trunc); @@ -1889,7 +2007,9 @@ FileOperationHandle LocalFileSystemBackend::copyFile(const std::string& src, con // Call progress callback - if it returns false, cancel if (!options.progressCallback(totalCopied, fileSize)) { // Ensure streams are closed before cleanup on Windows - out.flush(); out.close(); in.close(); + out.flush(); + out.close(); + in.close(); std::filesystem::remove(dst, ec); // Clean up partial copy before completing s.setError(FileError::Unknown, "Copy cancelled by user", src); s.complete(FileOpStatus::Failed); @@ -1900,7 +2020,9 @@ FileOperationHandle LocalFileSystemBackend::copyFile(const std::string& src, con if (!out.good()) { // Close streams before attempting cleanup - out.flush(); out.close(); in.close(); + out.flush(); + out.close(); + in.close(); std::filesystem::remove(dst, ec); s.setError(FileError::IOError, "Write error during copy", dst); s.complete(FileOpStatus::Failed); @@ -1910,9 +2032,9 @@ FileOperationHandle LocalFileSystemBackend::copyFile(const std::string& src, con s.wrote = totalCopied; } else { // Fast copy without progress using std::filesystem - std::filesystem::copy_options copyOpts = options.overwriteExisting ? - std::filesystem::copy_options::overwrite_existing : - std::filesystem::copy_options::none; + std::filesystem::copy_options copyOpts = options.overwriteExisting + ? std::filesystem::copy_options::overwrite_existing + : std::filesystem::copy_options::none; if (!std::filesystem::copy_file(src, dst, copyOpts, ec)) { s.setError(FileError::IOError, "Copy failed", src, ec); @@ -1933,7 +2055,8 @@ FileOperationHandle LocalFileSystemBackend::copyFile(const std::string& src, con }); } -FileOperationHandle LocalFileSystemBackend::moveFile(const std::string& src, const std::string& dst, bool overwriteExisting) { +FileOperationHandle LocalFileSystemBackend::moveFile(const std::string& src, const std::string& dst, + bool overwriteExisting) { return submitWork(src, [this, src, dst, overwriteExisting](FileOperationHandle::OpState& s, const std::string&) { std::error_code ec; @@ -1971,7 +2094,7 @@ FileOperationHandle LocalFileSystemBackend::moveFile(const std::string& src, con // Remove destination if overwriting if (overwriteExisting && std::filesystem::exists(dst, ec)) { std::filesystem::remove(dst, ec); - ec.clear(); // Clear error if removal fails - rename/copy will handle it + ec.clear(); // Clear error if removal fails - rename/copy will handle it } // Try rename first (atomic if on same filesystem) @@ -1987,8 +2110,9 @@ FileOperationHandle LocalFileSystemBackend::moveFile(const std::string& src, con // Rename failed (likely cross-filesystem) - do copy + delete ec.clear(); if (!std::filesystem::copy_file(src, dst, - overwriteExisting ? std::filesystem::copy_options::overwrite_existing : - std::filesystem::copy_options::none, ec)) { + overwriteExisting ? std::filesystem::copy_options::overwrite_existing + : std::filesystem::copy_options::none, + ec)) { s.setError(FileError::IOError, "Copy failed during move", src, ec); s.complete(FileOpStatus::Failed); return; @@ -2021,7 +2145,7 @@ BackendCapabilities LocalFileSystemBackend::getCapabilities() const { return caps; } -} // namespace EntropyEngine::Core::IO +} // namespace EntropyEngine::Core::IO // Backend-aware path normalization for LocalFileSystemBackend std::string EntropyEngine::Core::IO::LocalFileSystemBackend::normalizeKey(const std::string& path) const { @@ -2035,7 +2159,7 @@ std::string EntropyEngine::Core::IO::LocalFileSystemBackend::normalizeKey(const auto canon = std::filesystem::weakly_canonical(p, ec); std::string s = (ec ? p.lexically_normal().string() : canon.string()); #if defined(_WIN32) - std::transform(s.begin(), s.end(), s.begin(), [](unsigned char c){ return static_cast(std::tolower(c)); }); + std::transform(s.begin(), s.end(), s.begin(), [](unsigned char c) { return static_cast(std::tolower(c)); }); #endif return s; } @@ -2053,7 +2177,9 @@ std::string EntropyEngine::Core::IO::LocalFileSystemBackend::normalizeKey(const // 3. Use a lock server/database // // For now, VFS provides in-process serialization which is sufficient for most use cases. -EntropyEngine::Core::IO::IFileSystemBackend::AcquireWriteScopeResult EntropyEngine::Core::IO::LocalFileSystemBackend::acquireWriteScope(const std::string& path, EntropyEngine::Core::IO::IFileSystemBackend::AcquireScopeOptions options) { +EntropyEngine::Core::IO::IFileSystemBackend::AcquireWriteScopeResult +EntropyEngine::Core::IO::LocalFileSystemBackend::acquireWriteScope( + const std::string& path, EntropyEngine::Core::IO::IFileSystemBackend::AcquireScopeOptions options) { (void)path; (void)options; @@ -2068,6 +2194,7 @@ EntropyEngine::Core::IO::IFileSystemBackend::AcquireWriteScopeResult EntropyEngi } }); result.status = AcquireWriteScopeResult::Status::NotSupported; - result.message = "flock() disabled - incompatible with atomic file replacement (temp+rename). Using VFS advisory locks."; + result.message = + "flock() disabled - incompatible with atomic file replacement (temp+rename). Using VFS advisory locks."; return result; } diff --git a/src/VirtualFileSystem/LocalFileSystemBackend.h b/src/VirtualFileSystem/LocalFileSystemBackend.h index 475fd5e..04d36b8 100644 --- a/src/VirtualFileSystem/LocalFileSystemBackend.h +++ b/src/VirtualFileSystem/LocalFileSystemBackend.h @@ -1,45 +1,53 @@ #pragma once -#include "IFileSystemBackend.h" #include #include -namespace EntropyEngine::Core::IO { +#include "IFileSystemBackend.h" + +namespace EntropyEngine::Core::IO +{ -class LocalFileSystemBackend : public IFileSystemBackend { +class LocalFileSystemBackend : public IFileSystemBackend +{ public: LocalFileSystemBackend(); ~LocalFileSystemBackend() override = default; - + // Core file operations FileOperationHandle readFile(const std::string& path, ReadOptions options = {}) override; - FileOperationHandle writeFile(const std::string& path, std::span data, WriteOptions options = {}) override; + FileOperationHandle writeFile(const std::string& path, std::span data, + WriteOptions options = {}) override; FileOperationHandle deleteFile(const std::string& path) override; FileOperationHandle createFile(const std::string& path) override; - + // Metadata operations FileOperationHandle getMetadata(const std::string& path) override; bool exists(const std::string& path) override; FileOperationHandle getMetadataBatch(const BatchMetadataOptions& options) override; - + // Directory operations FileOperationHandle createDirectory(const std::string& path) override; FileOperationHandle removeDirectory(const std::string& path) override; FileOperationHandle listDirectory(const std::string& path, ListDirectoryOptions options = {}) override; - + // Stream support std::unique_ptr openStream(const std::string& path, StreamOptions options = {}) override; - + // Line operations FileOperationHandle readLine(const std::string& path, size_t lineNumber) override; FileOperationHandle writeLine(const std::string& path, size_t lineNumber, std::string_view line) override; // Copy/Move operations (Phase 2) - FileOperationHandle copyFile(const std::string& src, const std::string& dst, const CopyOptions& options = {}) override; - FileOperationHandle moveFile(const std::string& src, const std::string& dst, bool overwriteExisting = false) override; - + FileOperationHandle copyFile(const std::string& src, const std::string& dst, + const CopyOptions& options = {}) override; + FileOperationHandle moveFile(const std::string& src, const std::string& dst, + bool overwriteExisting = false) override; + // Backend info BackendCapabilities getCapabilities() const override; - std::string getBackendType() const override { return "LocalFileSystem"; } + std::string getBackendType() const override { + return "LocalFileSystem"; + } // Backend-aware normalization for identity/locking std::string normalizeKey(const std::string& path) const override; @@ -48,10 +56,12 @@ class LocalFileSystemBackend : public IFileSystemBackend { AcquireWriteScopeResult acquireWriteScope(const std::string& path, AcquireScopeOptions options = {}) override; // Synchronous operations for use by VFS submitSerialized (these execute inline, no async work) - void doWriteFile(FileOperationHandle::OpState& s, const std::string& path, std::span data, WriteOptions options); + void doWriteFile(FileOperationHandle::OpState& s, const std::string& path, std::span data, + WriteOptions options); void doDeleteFile(FileOperationHandle::OpState& s, const std::string& path); void doCreateFile(FileOperationHandle::OpState& s, const std::string& path); - void doWriteLine(FileOperationHandle::OpState& s, const std::string& path, size_t lineNumber, std::string_view line); + void doWriteLine(FileOperationHandle::OpState& s, const std::string& path, size_t lineNumber, + std::string_view line); private: // Submit work to the VFS work group @@ -59,11 +69,12 @@ class LocalFileSystemBackend : public IFileSystemBackend { std::function work); // Context-aware submit, used when called from VFS serialized paths - FileOperationHandle submitWork(const std::string& path, - std::function work, - const ExecContext& ctx); + FileOperationHandle submitWork( + const std::string& path, + std::function work, + const ExecContext& ctx); // No internal write lock map; serialization handled by VFS policy }; -} // namespace EntropyEngine::Core::IO \ No newline at end of file +} // namespace EntropyEngine::Core::IO diff --git a/src/VirtualFileSystem/VirtualFileSystem.cpp b/src/VirtualFileSystem/VirtualFileSystem.cpp index a977c38..b622984 100644 --- a/src/VirtualFileSystem/VirtualFileSystem.cpp +++ b/src/VirtualFileSystem/VirtualFileSystem.cpp @@ -1,26 +1,25 @@ #include "VirtualFileSystem.h" -#include "FileOperationHandle.h" -#include "FileHandle.h" + +#include +#include +#include +#include + #include "DirectoryHandle.h" +#include "FileHandle.h" +#include "FileOperationHandle.h" +#include "FileWatchManager.h" #include "LocalFileSystemBackend.h" #include "WriteBatch.h" -#include "FileWatchManager.h" -#include -#include -#include -#include using EntropyEngine::Core::Concurrency::ExecutionType; -namespace EntropyEngine::Core::IO { - +namespace EntropyEngine::Core::IO +{ // Constructor / Destructor VirtualFileSystem::VirtualFileSystem(EntropyEngine::Core::Concurrency::WorkContractGroup* group, Config cfg) - : _group(group) - , _cfg(cfg) - , _watchManager(std::make_unique(this)) { -} + : _group(group), _cfg(cfg), _watchManager(std::make_unique(this)) {} VirtualFileSystem::~VirtualFileSystem() { // Ensure FileWatchManager is destroyed before WorkContractGroup is potentially destroyed @@ -42,15 +41,19 @@ std::shared_ptr VirtualFileSystem::getDefaultBackend() const } // VFS submit helper -FileOperationHandle VirtualFileSystem::submit(std::string path, std::function body) const { +FileOperationHandle VirtualFileSystem::submit( + std::string path, + std::function body) const { auto st = makeState(); // Set cooperative progress hook so wait() can pump ready work - st->progress = [grp=_group]() { if (grp) grp->executeAllBackgroundWork(); }; + st->progress = [grp = _group]() { + if (grp) grp->executeAllBackgroundWork(); + }; - auto work = [this, st, p=std::move(path), body=std::move(body)]() mutable { + auto work = [this, st, p = std::move(path), body = std::move(body)]() mutable { st->st.store(FileOpStatus::Running, std::memory_order_release); try { - ExecContext ctx{ _group }; + ExecContext ctx{_group}; body(*st, p, ctx); // Ensure complete() was called - if not, call it with success // This prevents hanging if body forgets to call complete() @@ -96,13 +99,18 @@ FileOperationHandle VirtualFileSystem::submit(std::string path, std::function, const std::string&, const ExecContext&)> op) const { +FileOperationHandle VirtualFileSystem::submitSerialized( + std::string path, std::function, + const std::string&, const ExecContext&)> + op) const { auto backend = findBackend(path); auto vfsLock = lockForPath(path); auto advTimeout = _cfg.advisoryAcquireTimeout; auto policy = _cfg.advisoryFallback; - return submit(std::move(path), [backend, vfsLock, advTimeout, policy, op=std::move(op)](FileOperationHandle::OpState& s, const std::string& p, const ExecContext& ctx) mutable { + return submit(std::move(path), [backend, vfsLock, advTimeout, policy, op = std::move(op)]( + FileOperationHandle::OpState& s, const std::string& p, + const ExecContext& ctx) mutable { // Fail if no backend available - don't silently skip the operation if (!backend) { s.setError(FileError::IOError, "No file system backend available for path", p); @@ -113,7 +121,8 @@ FileOperationHandle VirtualFileSystem::submitSerialized(std::string path, std::f // Try backend-specific write scope first IFileSystemBackend::AcquireScopeOptions opts; opts.nonBlocking = false; - if (policy == Config::AdvisoryFallbackPolicy::FallbackWithTimeout || policy == Config::AdvisoryFallbackPolicy::None) { + if (policy == Config::AdvisoryFallbackPolicy::FallbackWithTimeout || + policy == Config::AdvisoryFallbackPolicy::None) { opts.timeout = advTimeout; } auto scopeRes = backend->acquireWriteScope(p, opts); @@ -133,10 +142,15 @@ FileOperationHandle VirtualFileSystem::submitSerialized(std::string path, std::f scopeRes.status == IFileSystemBackend::AcquireWriteScopeResult::Status::TimedOut || scopeRes.status == IFileSystemBackend::AcquireWriteScopeResult::Status::Error) { FileError code; - if (scopeRes.status == IFileSystemBackend::AcquireWriteScopeResult::Status::TimedOut) code = FileError::Timeout; - else if (scopeRes.status == IFileSystemBackend::AcquireWriteScopeResult::Status::Busy) code = FileError::Conflict; - else code = FileError::IOError; - s.setError(code, scopeRes.message.empty() ? std::string("Backend write scope unavailable") : scopeRes.message, p, scopeRes.errorCode); + if (scopeRes.status == IFileSystemBackend::AcquireWriteScopeResult::Status::TimedOut) + code = FileError::Timeout; + else if (scopeRes.status == IFileSystemBackend::AcquireWriteScopeResult::Status::Busy) + code = FileError::Conflict; + else + code = FileError::IOError; + s.setError(code, + scopeRes.message.empty() ? std::string("Backend write scope unavailable") : scopeRes.message, + p, scopeRes.errorCode); s.complete(FileOpStatus::Failed); return; } @@ -148,7 +162,10 @@ FileOperationHandle VirtualFileSystem::submitSerialized(std::string path, std::f if (!vfsLock->try_lock_for(advTimeout)) { auto key = backend->normalizeKey(p); auto ms = advTimeout.count(); - s.setError(FileError::Timeout, std::string("Advisory lock acquisition timed out after ") + std::to_string(ms) + " ms (key=" + key + ")", p); + s.setError(FileError::Timeout, + std::string("Advisory lock acquisition timed out after ") + std::to_string(ms) + + " ms (key=" + key + ")", + p); s.complete(FileOpStatus::Failed); return; } @@ -234,8 +251,9 @@ std::shared_ptr VirtualFileSystem::findBackend(const std::st #if defined(_WIN32) // Case-insensitive prefix matching on Windows - auto toLower = [](std::string s){ - std::transform(s.begin(), s.end(), s.begin(), [](unsigned char c){ return static_cast(std::tolower(c)); }); + auto toLower = [](std::string s) { + std::transform(s.begin(), s.end(), s.begin(), + [](unsigned char c) { return static_cast(std::tolower(c)); }); return s; }; const std::string pathLower = toLower(path); @@ -265,7 +283,7 @@ std::string VirtualFileSystem::normalizePath(const std::string& path) const { auto canon = std::filesystem::weakly_canonical(p, ec); std::string s = (ec ? p.lexically_normal().string() : canon.string()); #if defined(_WIN32) - std::transform(s.begin(), s.end(), s.begin(), [](unsigned char c){ return static_cast(std::tolower(c)); }); + std::transform(s.begin(), s.end(), s.begin(), [](unsigned char c) { return static_cast(std::tolower(c)); }); #endif return s; } @@ -273,7 +291,7 @@ std::string VirtualFileSystem::normalizePath(const std::string& path) const { // Lock management for write serialization std::shared_ptr VirtualFileSystem::lockForPath(const std::string& path) const { if (!_cfg.serializeWritesPerPath) return {}; - + std::lock_guard lk(_mapMutex); auto now = std::chrono::steady_clock::now(); std::string key; @@ -282,7 +300,7 @@ std::shared_ptr VirtualFileSystem::lockForPath(const std::stri } else { key = normalizePath(path); } - + // Check if path exists in cache auto it = _writeLocks.find(key); if (it != _writeLocks.end()) { @@ -293,12 +311,12 @@ std::shared_ptr VirtualFileSystem::lockForPath(const std::stri it->second.lruIt = _lruList.begin(); return it->second.mutex; } - + // Evict old entries if cache is full if (_writeLocks.size() >= _cfg.maxWriteLocksCached) { evictOldLocks(now); } - + // Create new lock entry auto m = std::make_shared(); _lruList.push_front(key); @@ -311,7 +329,7 @@ std::shared_ptr VirtualFileSystem::lockForPath(const std::stri void VirtualFileSystem::evictOldLocks(std::chrono::steady_clock::time_point now) const { // Remove entries that haven't been used recently auto cutoff = now - _cfg.writeLockTimeout; - + // Start from the end of LRU list (least recently used) while (!_lruList.empty() && _writeLocks.size() >= _cfg.maxWriteLocksCached) { const auto& path = _lruList.back(); @@ -344,7 +362,8 @@ std::unique_ptr VirtualFileSystem::openStream(const std::string& pat return backend->openStream(path, options); } -std::unique_ptr VirtualFileSystem::openBufferedStream(const std::string& path, size_t bufferSize, StreamOptions options) { +std::unique_ptr VirtualFileSystem::openBufferedStream(const std::string& path, size_t bufferSize, + StreamOptions options) { // Force unbuffered inner; buffering is handled by wrapper options.buffered = false; auto inner = openStream(path, options); @@ -357,7 +376,8 @@ std::unique_ptr VirtualFileSystem::createWriteBatch(const std::strin } // File watching -FileWatch* VirtualFileSystem::watchDirectory(const std::string& path, FileWatchCallback callback, const WatchOptions& options) { +FileWatch* VirtualFileSystem::watchDirectory(const std::string& path, FileWatchCallback callback, + const WatchOptions& options) { if (!_watchManager) { return nullptr; } @@ -370,4 +390,4 @@ void VirtualFileSystem::unwatchDirectory(FileWatch* watch) { } } -} // namespace EntropyEngine::Core::IO +} // namespace EntropyEngine::Core::IO diff --git a/src/VirtualFileSystem/VirtualFileSystem.h b/src/VirtualFileSystem/VirtualFileSystem.h index 8d59f72..5f177df 100644 --- a/src/VirtualFileSystem/VirtualFileSystem.h +++ b/src/VirtualFileSystem/VirtualFileSystem.h @@ -1,67 +1,75 @@ /** * @file VirtualFileSystem.h * @brief High-level facade for file operations over pluggable backends - * + * * VirtualFileSystem (VFS) routes file operations to a selected backend (local filesystem by default) * and provides ergonomic helpers: value-semantic FileHandle creation, advisory per-path write * serialization, batching, and file watching. Use with a WorkContractGroup; operations are executed * asynchronously and can be waited on. See Examples/VirtualFileSystemExample.cpp for end-to-end usage. */ #pragma once -#include -#include +#include +#include +#include +#include +#include +#include #include #include +#include #include -#include -#include -#include -#include -#include -#include +#include + #include "../Concurrency/WorkContractGroup.h" -#include "FileOperationHandle.h" #include "FileHandle.h" -#include "IFileSystemBackend.h" +#include "FileOperationHandle.h" #include "FileWatch.h" +#include "IFileSystemBackend.h" -namespace EntropyEngine::Core::IO { +namespace EntropyEngine::Core::IO +{ -class FileStream; // fwd -class BufferedFileStream; // fwd -class WriteBatch; // fwd -class FileWatchManager; // fwd -class FileWatch; // fwd -class DirectoryHandle; // fwd +class FileStream; // fwd +class BufferedFileStream; // fwd +class WriteBatch; // fwd +class FileWatchManager; // fwd +class FileWatch; // fwd +class DirectoryHandle; // fwd -class VirtualFileSystem { +class VirtualFileSystem +{ public: - struct Config { + struct Config + { bool serializeWritesPerPath; - size_t maxWriteLocksCached; // Maximum number of write locks to cache + size_t maxWriteLocksCached; // Maximum number of write locks to cache std::chrono::minutes writeLockTimeout; // Timeout for unused write locks - bool defaultCreateParentDirs; // Default behavior for creating parent directories + bool defaultCreateParentDirs; // Default behavior for creating parent directories // Advisory locking policy (in-process fallback) - std::chrono::milliseconds advisoryAcquireTimeout; // 5s default - enum class AdvisoryFallbackPolicy { None, FallbackWithTimeout }; + std::chrono::milliseconds advisoryAcquireTimeout; // 5s default + enum class AdvisoryFallbackPolicy + { + None, + FallbackWithTimeout + }; AdvisoryFallbackPolicy advisoryFallback; // Cross-process lock-file serialization (optional) - bool defaultUseLockFile; // default: false - std::chrono::milliseconds lockAcquireTimeout; // default: 5s - std::string lockSuffix; // default: ".lock" + bool defaultUseLockFile; // default: false + std::chrono::milliseconds lockAcquireTimeout; // default: 5s + std::string lockSuffix; // default: ".lock" Config() - : serializeWritesPerPath(true) - , maxWriteLocksCached(1024) - , writeLockTimeout(std::chrono::minutes(5)) - , defaultCreateParentDirs(false) - , advisoryAcquireTimeout(std::chrono::milliseconds(5000)) - , advisoryFallback(AdvisoryFallbackPolicy::FallbackWithTimeout) - , defaultUseLockFile(false) - , lockAcquireTimeout(std::chrono::milliseconds(5000)) - , lockSuffix(".lock") {} + : serializeWritesPerPath(true), + maxWriteLocksCached(1024), + writeLockTimeout(std::chrono::minutes(5)), + defaultCreateParentDirs(false), + advisoryAcquireTimeout(std::chrono::milliseconds(5000)), + advisoryFallback(AdvisoryFallbackPolicy::FallbackWithTimeout), + defaultUseLockFile(false), + lockAcquireTimeout(std::chrono::milliseconds(5000)), + lockSuffix(".lock") {} }; explicit VirtualFileSystem(EntropyEngine::Core::Concurrency::WorkContractGroup* group, Config cfg = {}); @@ -70,7 +78,7 @@ class VirtualFileSystem { // Factory is defined in .cpp to avoid circular includes issues /** * @brief Creates a value-semantic handle for the given path - * + * * Routes the path to the appropriate backend (mounted or default). The handle is copyable and * caches a backend-normalized identity key for equality/locking purposes. * @param path Target path @@ -81,11 +89,15 @@ class VirtualFileSystem { /** * @brief Shorthand for createFileHandle(path) */ - FileHandle handle(std::string path) { return createFileHandle(std::move(path)); } + FileHandle handle(std::string path) { + return createFileHandle(std::move(path)); + } /** * @brief Functor shorthand for createFileHandle(path) */ - FileHandle operator()(std::string path) { return createFileHandle(std::move(path)); } + FileHandle operator()(std::string path) { + return createFileHandle(std::move(path)); + } /** * @brief Creates a value-semantic handle for a directory path @@ -96,7 +108,7 @@ class VirtualFileSystem { * @return DirectoryHandle bound to this VFS */ DirectoryHandle createDirectoryHandle(std::string path); - + // Streaming convenience /** * @brief Opens a stream via the routed backend @@ -112,8 +124,9 @@ class VirtualFileSystem { * @param options Base stream options (buffered is ignored; wrapper handles buffering) * @return BufferedFileStream unique_ptr, or null on failure */ - std::unique_ptr openBufferedStream(const std::string& path, size_t bufferSize = 65536, StreamOptions options = {}); - + std::unique_ptr openBufferedStream(const std::string& path, size_t bufferSize = 65536, + StreamOptions options = {}); + // Batch operations /** * @brief Creates a WriteBatch builder for atomic multi-line edits @@ -183,7 +196,9 @@ class VirtualFileSystem { * @param body Lambda that populates OpState and calls complete() * @return FileOperationHandle that will be completed when body finishes */ - FileOperationHandle submit(std::string path, std::function body) const; + FileOperationHandle submit( + std::string path, + std::function body) const; /** * @brief Get VFS configuration (for backends) @@ -191,7 +206,9 @@ class VirtualFileSystem { * * Backends can check settings like defaultCreateParentDirs to respect VFS policy. */ - const Config& getConfig() const { return _cfg; } + const Config& getConfig() const { + return _cfg; + } /** * @brief Get WorkContractGroup for advanced scheduling (for backends) @@ -199,7 +216,9 @@ class VirtualFileSystem { * * Backends can use this to detect same-group execution and avoid nested scheduling. */ - EntropyEngine::Core::Concurrency::WorkContractGroup* getWorkGroup() const { return _group; } + EntropyEngine::Core::Concurrency::WorkContractGroup* getWorkGroup() const { + return _group; + } /** * @brief Normalize path for consistent comparison (for backends) @@ -216,17 +235,20 @@ class VirtualFileSystem { Config _cfg{}; // LRU cache for write serialization per path - struct LockEntry { + struct LockEntry + { std::shared_ptr mutex; std::chrono::steady_clock::time_point lastAccess; std::list::iterator lruIt; }; - + mutable std::mutex _mapMutex; mutable std::unordered_map _writeLocks; mutable std::list _lruList; // Most recently used at front - std::shared_ptr makeState() const { return std::make_shared(); } + std::shared_ptr makeState() const { + return std::make_shared(); + } // Lock management std::shared_ptr lockForPath(const std::string& path) const; @@ -245,8 +267,11 @@ class VirtualFileSystem { * Errors include backend message/systemError when provided. * The op must complete inline and call s.complete(). It must NOT schedule nested async work. */ - FileOperationHandle submitSerialized(std::string path, std::function, const std::string&, const ExecContext&)> op) const; - + FileOperationHandle submitSerialized( + std::string path, std::function, + const std::string&, const ExecContext&)> + op) const; + // Backend storage (reference-counted for thread-safe lifetime management) std::shared_ptr _defaultBackend; std::unordered_map> _mountedBackends; @@ -261,4 +286,4 @@ class VirtualFileSystem { friend class FileWatchManager; }; -} // namespace EntropyEngine::Core::IO +} // namespace EntropyEngine::Core::IO diff --git a/src/VirtualFileSystem/WriteBatch.cpp b/src/VirtualFileSystem/WriteBatch.cpp index d00279b..1e80683 100644 --- a/src/VirtualFileSystem/WriteBatch.cpp +++ b/src/VirtualFileSystem/WriteBatch.cpp @@ -1,12 +1,14 @@ #include "WriteBatch.h" -#include "VirtualFileSystem.h" -#include "FileHandle.h" -#include -#include -#include + #include -#include +#include +#include #include +#include +#include + +#include "FileHandle.h" +#include "VirtualFileSystem.h" #if defined(_WIN32) static std::string vfs_toWinLongPath(const std::string& path) { @@ -24,17 +26,16 @@ static std::string vfs_toWinLongPath(const std::string& path) { #include #endif #if defined(__unix__) || defined(__APPLE__) -#include // mkstemp(), close(), fsync() -#include // stat(), chmod() -#include // flock() -#include // open(), O_* +#include // open(), O_* +#include // flock() +#include // stat(), chmod() +#include // mkstemp(), close(), fsync() #endif -namespace EntropyEngine::Core::IO { +namespace EntropyEngine::Core::IO +{ -WriteBatch::WriteBatch(VirtualFileSystem* vfs, std::string path) - : _vfs(vfs), _path(std::move(path)) { -} +WriteBatch::WriteBatch(VirtualFileSystem* vfs, std::string path) : _vfs(vfs), _path(std::move(path)) {} WriteBatch& WriteBatch::writeLine(size_t lineNumber, std::string_view content) { _operations.push_back({OpType::Write, lineNumber, std::string(content)}); @@ -146,7 +147,7 @@ std::vector WriteBatch::applyOperations(const std::vector b.lineNumber; }); + [](const Operation& a, const Operation& b) { return a.lineNumber > b.lineNumber; }); for (const auto& op : deleteOps) { if (op.lineNumber < result.size()) { result.erase(result.begin() + op.lineNumber); @@ -155,7 +156,7 @@ std::vector WriteBatch::applyOperations(const std::vector b.lineNumber; }); + [](const Operation& a, const Operation& b) { return a.lineNumber > b.lineNumber; }); for (const auto& op : insertOps) { if (op.lineNumber > result.size()) { result.resize(op.lineNumber); @@ -191,20 +192,24 @@ FileOperationHandle WriteBatch::commit(const WriteOptions& opts) { // No operations to commit - return immediate success return FileOperationHandle::immediate(FileOpStatus::Complete); } - + auto backend = _vfs->findBackend(_path); auto vfsLock = _vfs->lockForPath(_path); - return _vfs->submit(_path, [this, backend, vfsLock, ops = _operations, opts](FileOperationHandle::OpState& s, const std::string& p, const ExecContext&) mutable { + return _vfs->submit(_path, [this, backend, vfsLock, ops = _operations, opts]( + FileOperationHandle::OpState& s, const std::string& p, const ExecContext&) mutable { // Prefer backend-provided write scope; fall back to VFS advisory lock with policy/timeout - std::unique_ptr scopeToken(nullptr, [](void*){}); + std::unique_ptr scopeToken(nullptr, [](void*) {}); IFileSystemBackend::AcquireWriteScopeResult scopeRes; if (backend) { IFileSystemBackend::AcquireScopeOptions scopeOpts; scopeOpts.nonBlocking = false; - auto pol = _vfs ? _vfs->_cfg.advisoryFallback : VirtualFileSystem::Config::AdvisoryFallbackPolicy::FallbackWithTimeout; - if (pol == VirtualFileSystem::Config::AdvisoryFallbackPolicy::FallbackWithTimeout || pol == VirtualFileSystem::Config::AdvisoryFallbackPolicy::None) { - scopeOpts.timeout = _vfs ? std::optional(_vfs->_cfg.advisoryAcquireTimeout) : std::nullopt; + auto pol = _vfs ? _vfs->_cfg.advisoryFallback + : VirtualFileSystem::Config::AdvisoryFallbackPolicy::FallbackWithTimeout; + if (pol == VirtualFileSystem::Config::AdvisoryFallbackPolicy::FallbackWithTimeout || + pol == VirtualFileSystem::Config::AdvisoryFallbackPolicy::None) { + scopeOpts.timeout = + _vfs ? std::optional(_vfs->_cfg.advisoryAcquireTimeout) : std::nullopt; } scopeRes = backend->acquireWriteScope(p, scopeOpts); if (scopeRes.status == IFileSystemBackend::AcquireWriteScopeResult::Status::Acquired) { @@ -212,23 +217,31 @@ FileOperationHandle WriteBatch::commit(const WriteOptions& opts) { } } std::unique_lock pathLock; - auto fallbackPolicy = _vfs ? _vfs->_cfg.advisoryFallback : VirtualFileSystem::Config::AdvisoryFallbackPolicy::FallbackWithTimeout; + auto fallbackPolicy = + _vfs ? _vfs->_cfg.advisoryFallback : VirtualFileSystem::Config::AdvisoryFallbackPolicy::FallbackWithTimeout; if (!scopeToken && vfsLock) { - bool needFallback = (scopeRes.status == IFileSystemBackend::AcquireWriteScopeResult::Status::NotSupported) || - (scopeRes.status == IFileSystemBackend::AcquireWriteScopeResult::Status::Acquired && !scopeToken) || - (scopeRes.status == IFileSystemBackend::AcquireWriteScopeResult::Status::Busy) || - (scopeRes.status == IFileSystemBackend::AcquireWriteScopeResult::Status::TimedOut) || - (scopeRes.status == IFileSystemBackend::AcquireWriteScopeResult::Status::Error); + bool needFallback = + (scopeRes.status == IFileSystemBackend::AcquireWriteScopeResult::Status::NotSupported) || + (scopeRes.status == IFileSystemBackend::AcquireWriteScopeResult::Status::Acquired && !scopeToken) || + (scopeRes.status == IFileSystemBackend::AcquireWriteScopeResult::Status::Busy) || + (scopeRes.status == IFileSystemBackend::AcquireWriteScopeResult::Status::TimedOut) || + (scopeRes.status == IFileSystemBackend::AcquireWriteScopeResult::Status::Error); if (needFallback) { if ((scopeRes.status == IFileSystemBackend::AcquireWriteScopeResult::Status::Busy) || (scopeRes.status == IFileSystemBackend::AcquireWriteScopeResult::Status::TimedOut) || (scopeRes.status == IFileSystemBackend::AcquireWriteScopeResult::Status::Error)) { if (fallbackPolicy == VirtualFileSystem::Config::AdvisoryFallbackPolicy::None) { FileError code; - if (scopeRes.status == IFileSystemBackend::AcquireWriteScopeResult::Status::TimedOut) code = FileError::Timeout; - else if (scopeRes.status == IFileSystemBackend::AcquireWriteScopeResult::Status::Busy) code = FileError::Conflict; - else code = FileError::IOError; - s.setError(code, scopeRes.message.empty() ? std::string("Backend write scope unavailable") : scopeRes.message, p, scopeRes.errorCode); + if (scopeRes.status == IFileSystemBackend::AcquireWriteScopeResult::Status::TimedOut) + code = FileError::Timeout; + else if (scopeRes.status == IFileSystemBackend::AcquireWriteScopeResult::Status::Busy) + code = FileError::Conflict; + else + code = FileError::IOError; + s.setError(code, + scopeRes.message.empty() ? std::string("Backend write scope unavailable") + : scopeRes.message, + p, scopeRes.errorCode); s.complete(FileOpStatus::Failed); return; } @@ -237,7 +250,10 @@ FileOperationHandle WriteBatch::commit(const WriteOptions& opts) { if (!vfsLock->try_lock_for(_vfs->_cfg.advisoryAcquireTimeout)) { auto key = backend ? backend->normalizeKey(p) : _vfs->normalizePath(p); auto ms = _vfs->_cfg.advisoryAcquireTimeout.count(); - s.setError(FileError::Timeout, std::string("Advisory lock acquisition timed out after ") + std::to_string(ms) + " ms (key=" + key + ")", p); + s.setError(FileError::Timeout, + std::string("Advisory lock acquisition timed out after ") + std::to_string(ms) + + " ms (key=" + key + ")", + p); s.complete(FileOpStatus::Failed); return; } @@ -248,9 +264,9 @@ FileOperationHandle WriteBatch::commit(const WriteOptions& opts) { } else { // scopeToken acquired; no advisory lock } - + std::error_code ec; - + // Determine line-ending style and original final-newline presence std::string data; bool originalFinalNewline = false; @@ -270,7 +286,8 @@ FileOperationHandle WriteBatch::commit(const WriteOptions& opts) { std::ifstream inBin(osPathR, std::ios::in | std::ios::binary); if (inBin) { originalExists = true; - std::ostringstream ss; ss << inBin.rdbuf(); + std::ostringstream ss; + ss << inBin.rdbuf(); data = ss.str(); if (!data.empty() && data.back() == '\n') { originalFinalNewline = true; @@ -278,16 +295,24 @@ FileOperationHandle WriteBatch::commit(const WriteOptions& opts) { size_t crlf = 0, lf = 0; for (size_t i = 0; i < data.size(); ++i) { if (data[i] == '\n') { - if (i > 0 && data[i-1] == '\r') ++crlf; else ++lf; + if (i > 0 && data[i - 1] == '\r') + ++crlf; + else + ++lf; } } - if (crlf > lf) eol = "\r\n"; else if (lf > crlf) eol = "\n"; else eol = platformDefaultEol; + if (crlf > lf) + eol = "\r\n"; + else if (lf > crlf) + eol = "\n"; + else + eol = platformDefaultEol; } else { eol = platformDefaultEol; } } if (eol.empty()) eol = platformDefaultEol; - + // Parse original lines without EOLs std::vector originalLines; if (!data.empty()) { @@ -306,10 +331,10 @@ FileOperationHandle WriteBatch::commit(const WriteOptions& opts) { originalLines.push_back(std::move(cur)); } } - + // Apply all operations to get final content std::vector finalLines = applyOperations(originalLines); - + // Generate secure temporary filename in the same directory as destination auto targetPath = std::filesystem::path(p); auto dir = targetPath.parent_path(); @@ -333,7 +358,7 @@ FileOperationHandle WriteBatch::commit(const WriteOptions& opts) { // Windows: use random device auto tempPath = dir / (base + ".tmp" + std::to_string(std::random_device{}())); #endif - + // Ensure parent directory exists if configured (effective option) const bool createParents = opts.createParentDirs.value_or(_vfs ? _vfs->_cfg.defaultCreateParentDirs : false); if (createParents) { @@ -347,19 +372,29 @@ FileOperationHandle WriteBatch::commit(const WriteOptions& opts) { } } } - + // Optional cross-process serialization via sibling lock file (POSIX) #if defined(__unix__) || defined(__APPLE__) - struct LockFileToken { int fd = -1; ~LockFileToken(){ if (fd >= 0) { ::flock(fd, LOCK_UN); ::close(fd); } } }; + struct LockFileToken + { + int fd = -1; + ~LockFileToken() { + if (fd >= 0) { + ::flock(fd, LOCK_UN); + ::close(fd); + } + } + }; std::unique_ptr lockToken; const bool useLockFile = opts.useLockFile.value_or(_vfs ? _vfs->_cfg.defaultUseLockFile : false); if (useLockFile) { - auto timeout = opts.lockTimeout.value_or(_vfs ? _vfs->_cfg.lockAcquireTimeout : std::chrono::milliseconds(5000)); + auto timeout = + opts.lockTimeout.value_or(_vfs ? _vfs->_cfg.lockAcquireTimeout : std::chrono::milliseconds(5000)); auto suffix = opts.lockSuffix.value_or(_vfs ? _vfs->_cfg.lockSuffix : std::string(".lock")); std::error_code lockEc; bool timedOut = false; // Ensure parent directory exists for the lock file - (void)createParents; // already handled above + (void)createParents; // already handled above auto lockPath = (dir / (base + suffix)).string(); int fd = ::open(lockPath.c_str(), O_CREAT | O_CLOEXEC | O_RDWR, 0600); if (fd < 0) { @@ -383,7 +418,9 @@ FileOperationHandle WriteBatch::commit(const WriteOptions& opts) { } std::this_thread::sleep_for(std::chrono::milliseconds(10)); } - if (!lockToken && fd >= 0) { ::close(fd); } + if (!lockToken && fd >= 0) { + ::close(fd); + } } if (!lockToken) { if (timedOut) { @@ -400,10 +437,9 @@ FileOperationHandle WriteBatch::commit(const WriteOptions& opts) { #endif // Decide final newline presence - const bool finalNewline = opts.ensureFinalNewline.has_value() - ? opts.ensureFinalNewline.value() - : (originalExists ? originalFinalNewline : true); - + const bool finalNewline = opts.ensureFinalNewline.has_value() ? opts.ensureFinalNewline.value() + : (originalExists ? originalFinalNewline : true); + // Write to temp file in binary with consistent EOL { std::ofstream out(tempPath, std::ios::out | std::ios::trunc | std::ios::binary); @@ -431,41 +467,38 @@ FileOperationHandle WriteBatch::commit(const WriteOptions& opts) { return; } } - - // Atomic rename - #if defined(_WIN32) - // Use Windows-specific atomic rename with retry logic - auto wsrc = tempPath.wstring(); - auto wdst = targetPath.wstring(); - - const int maxRetries = 50; - const int retryDelayMs = 10; - - bool success = false; - for (int i = 0; i < maxRetries; ++i) { - if (MoveFileExW(wsrc.c_str(), wdst.c_str(), - MOVEFILE_REPLACE_EXISTING | MOVEFILE_WRITE_THROUGH) != 0) { - success = true; - break; - } - - DWORD error = GetLastError(); - if (error == ERROR_SHARING_VIOLATION || - error == ERROR_ACCESS_DENIED || - error == ERROR_LOCK_VIOLATION) { - Sleep(retryDelayMs); - continue; - } + +// Atomic rename +#if defined(_WIN32) + // Use Windows-specific atomic rename with retry logic + auto wsrc = tempPath.wstring(); + auto wdst = targetPath.wstring(); + + const int maxRetries = 50; + const int retryDelayMs = 10; + + bool success = false; + for (int i = 0; i < maxRetries; ++i) { + if (MoveFileExW(wsrc.c_str(), wdst.c_str(), MOVEFILE_REPLACE_EXISTING | MOVEFILE_WRITE_THROUGH) != 0) { + success = true; break; } - - if (!success) { - std::filesystem::remove(tempPath, ec); - s.setError(FileError::IOError, "Failed to replace destination file", p); - s.complete(FileOpStatus::Failed); - return; + + DWORD error = GetLastError(); + if (error == ERROR_SHARING_VIOLATION || error == ERROR_ACCESS_DENIED || error == ERROR_LOCK_VIOLATION) { + Sleep(retryDelayMs); + continue; } - #else + break; + } + + if (!success) { + std::filesystem::remove(tempPath, ec); + s.setError(FileError::IOError, "Failed to replace destination file", p); + s.complete(FileOpStatus::Failed); + return; + } +#else // Preserve destination permissions if it exists { struct stat st; @@ -502,8 +535,8 @@ FileOperationHandle WriteBatch::commit(const WriteOptions& opts) { return; } } - #endif - +#endif + // Calculate total bytes written size_t totalBytes = 0; if (finalLines.empty()) { @@ -516,41 +549,42 @@ FileOperationHandle WriteBatch::commit(const WriteOptions& opts) { } } } - + s.wrote = totalBytes; s.complete(FileOpStatus::Complete); }); } FileOperationHandle WriteBatch::preview() const { - return _vfs->submit(_path, [this, ops = _operations](FileOperationHandle::OpState& s, const std::string& p, const ExecContext&) { - // Read the original file - std::vector originalLines; - { - std::ifstream in(p, std::ios::in); - if (in) { - std::string line; - while (std::getline(in, line)) { - originalLines.push_back(line); + return _vfs->submit( + _path, [this, ops = _operations](FileOperationHandle::OpState& s, const std::string& p, const ExecContext&) { + // Read the original file + std::vector originalLines; + { + std::ifstream in(p, std::ios::in); + if (in) { + std::string line; + while (std::getline(in, line)) { + originalLines.push_back(line); + } } } - } - - // Apply all operations to get final content - std::vector finalLines = applyOperations(originalLines); - - // Build result string - std::ostringstream oss; - for (size_t i = 0; i < finalLines.size(); ++i) { - oss << finalLines[i]; - if (i < finalLines.size() - 1) { - oss << "\n"; + + // Apply all operations to get final content + std::vector finalLines = applyOperations(originalLines); + + // Build result string + std::ostringstream oss; + for (size_t i = 0; i < finalLines.size(); ++i) { + oss << finalLines[i]; + if (i < finalLines.size() - 1) { + oss << "\n"; + } } - } - - s.text = oss.str(); - s.complete(FileOpStatus::Complete); - }); + + s.text = oss.str(); + s.complete(FileOpStatus::Complete); + }); } -} // namespace EntropyEngine::Core::IO \ No newline at end of file +} // namespace EntropyEngine::Core::IO diff --git a/src/VirtualFileSystem/WriteBatch.h b/src/VirtualFileSystem/WriteBatch.h index f20b6fb..9f7d33c 100644 --- a/src/VirtualFileSystem/WriteBatch.h +++ b/src/VirtualFileSystem/WriteBatch.h @@ -1,38 +1,41 @@ #pragma once -#include #include -#include #include +#include #include +#include + #include "FileOperationHandle.h" -#include "IFileSystemBackend.h" // for WriteOptions +#include "IFileSystemBackend.h" // for WriteOptions -namespace EntropyEngine::Core::IO { +namespace EntropyEngine::Core::IO +{ class VirtualFileSystem; class FileHandle; /** * WriteBatch - Collects multiple write operations and applies them atomically - * + * * This allows for efficient batch processing of file modifications without * repeatedly reading and writing the entire file. All operations are collected * in memory and then applied in a single atomic operation. - * + * * Usage: * auto batch = vfs->createWriteBatch("myfile.txt"); * batch->writeLine(0, "First line"); - * batch->writeLine(5, "Sixth line"); + * batch->writeLine(5, "Sixth line"); * batch->insertLine(2, "New third line"); * batch->deleteLine(10); * auto handle = batch->commit(); // Applies all changes atomically * handle.wait(); */ -class WriteBatch { +class WriteBatch +{ public: WriteBatch(VirtualFileSystem* vfs, std::string path); ~WriteBatch() = default; - + // Line operations /** * @brief Overwrite a line at index (0-based) @@ -60,7 +63,7 @@ class WriteBatch { * @return Reference to this batch for chaining */ WriteBatch& appendLine(std::string_view content); - + // Bulk operations /** * @brief Overwrite multiple specific lines in one call @@ -79,7 +82,7 @@ class WriteBatch { * @return Reference to this batch for chaining */ WriteBatch& clear(); - + // Range operations /** * @brief Delete a range of lines [startLine, endLine) @@ -95,11 +98,11 @@ class WriteBatch { * @return Reference to this batch for chaining */ WriteBatch& insertLines(size_t startLine, const std::vector& lines); - + // Commit changes /** * @brief Apply all pending operations atomically - * + * * Uses VFS defaults for parent directory creation and preserves the original * file's line-ending style and trailing-newline policy when possible. * @return Handle for the asynchronous commit @@ -110,10 +113,10 @@ class WriteBatch { * @endcode */ FileOperationHandle commit(); - + /** * @brief Apply all pending operations atomically with per-commit options - * + * * @param opts WriteOptions controlling behavior: * - createParentDirs: per-commit override for creating parent directories * - ensureFinalNewline: force presence/absence of a final newline on whole-file rewrites @@ -122,54 +125,62 @@ class WriteBatch { * @return Handle for the asynchronous commit */ FileOperationHandle commit(const WriteOptions& opts); - + /** * @brief Build the resulting content without writing it (debugging aid) * @return Handle whose contentsText() contains the preview after wait() */ - FileOperationHandle preview() const; // Get what the file would look like (for debugging) - + FileOperationHandle preview() const; // Get what the file would look like (for debugging) + // Query /** * @brief Number of pending operations in the batch */ - size_t pendingOperations() const { return _operations.size(); } + size_t pendingOperations() const { + return _operations.size(); + } /** * @brief Returns true if no operations are pending */ - bool empty() const { return _operations.empty(); } + bool empty() const { + return _operations.empty(); + } /** * @brief Clears all pending operations without writing */ - void reset(); // Clear all pending operations - + void reset(); // Clear all pending operations + /** * @brief Target file path for this batch */ - const std::string& getPath() const { return _path; } - + const std::string& getPath() const { + return _path; + } + private: - enum class OpType { - Write, // Replace line at index - Insert, // Insert line, shifting others down - Delete, // Delete line, shifting others up - Append, // Add to end of file - Clear, // Clear entire file - Replace // Replace entire file content + enum class OpType + { + Write, // Replace line at index + Insert, // Insert line, shifting others down + Delete, // Delete line, shifting others up + Append, // Add to end of file + Clear, // Clear entire file + Replace // Replace entire file content }; - - struct Operation { + + struct Operation + { OpType type; size_t lineNumber; std::string content; }; - + VirtualFileSystem* _vfs; std::string _path; std::vector _operations; - + // Apply operations to build final file content std::vector applyOperations(const std::vector& originalLines) const; }; -} // namespace EntropyEngine::Core::IO \ No newline at end of file +} // namespace EntropyEngine::Core::IO diff --git a/src/entropy/entropy_concurrency_types_c.cpp b/src/entropy/entropy_concurrency_types_c.cpp index f6b7829..672da11 100644 --- a/src/entropy/entropy_concurrency_types_c.cpp +++ b/src/entropy/entropy_concurrency_types_c.cpp @@ -7,9 +7,10 @@ * This file is part of the Entropy Core project. */ -#include "../../include/entropy/entropy_concurrency_types.h" #include +#include "../../include/entropy/entropy_concurrency_types.h" + // ============================================================================ // Helper String Conversions // ============================================================================ @@ -18,40 +19,55 @@ extern "C" { const char* entropy_contract_state_to_string(EntropyContractState state) { switch (state) { - case ENTROPY_CONTRACT_FREE: return "Free"; - case ENTROPY_CONTRACT_ALLOCATED: return "Allocated"; - case ENTROPY_CONTRACT_SCHEDULED: return "Scheduled"; - case ENTROPY_CONTRACT_EXECUTING: return "Executing"; - case ENTROPY_CONTRACT_COMPLETED: return "Completed"; - default: return "Unknown"; + case ENTROPY_CONTRACT_FREE: + return "Free"; + case ENTROPY_CONTRACT_ALLOCATED: + return "Allocated"; + case ENTROPY_CONTRACT_SCHEDULED: + return "Scheduled"; + case ENTROPY_CONTRACT_EXECUTING: + return "Executing"; + case ENTROPY_CONTRACT_COMPLETED: + return "Completed"; + default: + return "Unknown"; } } const char* entropy_schedule_result_to_string(EntropyScheduleResult result) { switch (result) { - case ENTROPY_SCHEDULE_SCHEDULED: return "Scheduled"; - case ENTROPY_SCHEDULE_ALREADY_SCHEDULED: return "AlreadyScheduled"; - case ENTROPY_SCHEDULE_NOT_SCHEDULED: return "NotScheduled"; - case ENTROPY_SCHEDULE_EXECUTING: return "Executing"; - case ENTROPY_SCHEDULE_INVALID: return "Invalid"; - default: return "Unknown"; + case ENTROPY_SCHEDULE_SCHEDULED: + return "Scheduled"; + case ENTROPY_SCHEDULE_ALREADY_SCHEDULED: + return "AlreadyScheduled"; + case ENTROPY_SCHEDULE_NOT_SCHEDULED: + return "NotScheduled"; + case ENTROPY_SCHEDULE_EXECUTING: + return "Executing"; + case ENTROPY_SCHEDULE_INVALID: + return "Invalid"; + default: + return "Unknown"; } } const char* entropy_execution_type_to_string(EntropyExecutionType type) { switch (type) { - case ENTROPY_EXEC_ANY_THREAD: return "AnyThread"; - case ENTROPY_EXEC_MAIN_THREAD: return "MainThread"; - default: return "Unknown"; + case ENTROPY_EXEC_ANY_THREAD: + return "AnyThread"; + case ENTROPY_EXEC_MAIN_THREAD: + return "MainThread"; + default: + return "Unknown"; } } void entropy_work_service_config_init(EntropyWorkServiceConfig* config) { if (!config) return; - config->thread_count = 0; // 0 = auto-detect + config->thread_count = 0; // 0 = auto-detect config->max_soft_failure_count = 5; config->failure_sleep_time_ns = 1; } -} // extern "C" +} // extern "C" diff --git a/src/entropy/entropy_directory_handle_c.cpp b/src/entropy/entropy_directory_handle_c.cpp index c4ae84a..3809bd4 100644 --- a/src/entropy/entropy_directory_handle_c.cpp +++ b/src/entropy/entropy_directory_handle_c.cpp @@ -3,12 +3,13 @@ * @brief Implementation of DirectoryHandle C API */ -#include "entropy/entropy_directory_handle.h" +#include +#include + #include "VirtualFileSystem/DirectoryHandle.h" #include "VirtualFileSystem/FileOperationHandle.h" #include "VirtualFileSystem/IFileSystemBackend.h" -#include -#include +#include "entropy/entropy_directory_handle.h" using namespace EntropyEngine::Core::IO; @@ -75,10 +76,7 @@ static ListDirectoryOptions to_cpp_list_options(const EntropyListDirectoryOption extern "C" { -entropy_DirectoryHandle entropy_directory_handle_clone( - entropy_DirectoryHandle handle, - EntropyStatus* status -) { +entropy_DirectoryHandle entropy_directory_handle_clone(entropy_DirectoryHandle handle, EntropyStatus* status) { if (!status) return nullptr; if (!handle) { *status = ENTROPY_ERR_INVALID_ARG; @@ -87,7 +85,7 @@ entropy_DirectoryHandle entropy_directory_handle_clone( try { auto* cpp_handle = reinterpret_cast(handle); - auto* clone = new(std::nothrow) DirectoryHandle(*cpp_handle); + auto* clone = new (std::nothrow) DirectoryHandle(*cpp_handle); if (!clone) { *status = ENTROPY_ERR_NO_MEMORY; return nullptr; @@ -106,11 +104,8 @@ void entropy_directory_handle_destroy(entropy_DirectoryHandle handle) { delete cpp_handle; } -entropy_FileOperationHandle entropy_directory_handle_create( - entropy_DirectoryHandle handle, - EntropyBool create_parents, - EntropyStatus* status -) { +entropy_FileOperationHandle entropy_directory_handle_create(entropy_DirectoryHandle handle, EntropyBool create_parents, + EntropyStatus* status) { if (!status) return nullptr; if (!handle) { *status = ENTROPY_ERR_INVALID_ARG; @@ -128,11 +123,8 @@ entropy_FileOperationHandle entropy_directory_handle_create( } } -entropy_FileOperationHandle entropy_directory_handle_remove( - entropy_DirectoryHandle handle, - EntropyBool recursive, - EntropyStatus* status -) { +entropy_FileOperationHandle entropy_directory_handle_remove(entropy_DirectoryHandle handle, EntropyBool recursive, + EntropyStatus* status) { if (!status) return nullptr; if (!handle) { *status = ENTROPY_ERR_INVALID_ARG; @@ -150,11 +142,9 @@ entropy_FileOperationHandle entropy_directory_handle_remove( } } -entropy_FileOperationHandle entropy_directory_handle_list( - entropy_DirectoryHandle handle, - const EntropyListDirectoryOptions* options, - EntropyStatus* status -) { +entropy_FileOperationHandle entropy_directory_handle_list(entropy_DirectoryHandle handle, + const EntropyListDirectoryOptions* options, + EntropyStatus* status) { if (!status) return nullptr; if (!handle || !options) { *status = ENTROPY_ERR_INVALID_ARG; @@ -173,10 +163,8 @@ entropy_FileOperationHandle entropy_directory_handle_list( } } -entropy_FileOperationHandle entropy_directory_handle_get_metadata( - entropy_DirectoryHandle handle, - EntropyStatus* status -) { +entropy_FileOperationHandle entropy_directory_handle_get_metadata(entropy_DirectoryHandle handle, + EntropyStatus* status) { if (!status) return nullptr; if (!handle) { *status = ENTROPY_ERR_INVALID_ARG; @@ -194,10 +182,7 @@ entropy_FileOperationHandle entropy_directory_handle_get_metadata( } } -const char* entropy_directory_handle_normalized_key( - entropy_DirectoryHandle handle, - EntropyStatus* status -) { +const char* entropy_directory_handle_normalized_key(entropy_DirectoryHandle handle, EntropyStatus* status) { if (!status) return nullptr; if (!handle) { *status = ENTROPY_ERR_INVALID_ARG; @@ -214,4 +199,4 @@ const char* entropy_directory_handle_normalized_key( } } -} // extern "C" +} // extern "C" diff --git a/src/entropy/entropy_file_handle_c.cpp b/src/entropy/entropy_file_handle_c.cpp index ccd4741..4cb3928 100644 --- a/src/entropy/entropy_file_handle_c.cpp +++ b/src/entropy/entropy_file_handle_c.cpp @@ -3,14 +3,15 @@ * @brief Implementation of FileHandle C API */ -#include "entropy/entropy_file_handle.h" +#include +#include +#include +#include + #include "VirtualFileSystem/FileHandle.h" #include "VirtualFileSystem/FileOperationHandle.h" #include "VirtualFileSystem/IFileSystemBackend.h" -#include -#include -#include -#include +#include "entropy/entropy_file_handle.h" using namespace EntropyEngine::Core::IO; @@ -78,10 +79,7 @@ static WriteOptions to_cpp_write_options(const EntropyWriteOptions* opts) { extern "C" { -entropy_FileHandle entropy_file_handle_clone( - entropy_FileHandle handle, - EntropyStatus* status -) { +entropy_FileHandle entropy_file_handle_clone(entropy_FileHandle handle, EntropyStatus* status) { if (!status) return nullptr; if (!handle) { *status = ENTROPY_ERR_INVALID_ARG; @@ -90,7 +88,7 @@ entropy_FileHandle entropy_file_handle_clone( try { auto* cpp_handle = reinterpret_cast(handle); - auto* clone = new(std::nothrow) FileHandle(*cpp_handle); + auto* clone = new (std::nothrow) FileHandle(*cpp_handle); if (!clone) { *status = ENTROPY_ERR_NO_MEMORY; return nullptr; @@ -109,10 +107,7 @@ void entropy_file_handle_destroy(entropy_FileHandle handle) { delete cpp_handle; } -entropy_FileOperationHandle entropy_file_handle_read_all( - entropy_FileHandle handle, - EntropyStatus* status -) { +entropy_FileOperationHandle entropy_file_handle_read_all(entropy_FileHandle handle, EntropyStatus* status) { if (!status) return nullptr; if (!handle) { *status = ENTROPY_ERR_INVALID_ARG; @@ -130,12 +125,8 @@ entropy_FileOperationHandle entropy_file_handle_read_all( } } -entropy_FileOperationHandle entropy_file_handle_read_range( - entropy_FileHandle handle, - uint64_t offset, - size_t length, - EntropyStatus* status -) { +entropy_FileOperationHandle entropy_file_handle_read_range(entropy_FileHandle handle, uint64_t offset, size_t length, + EntropyStatus* status) { if (!status) return nullptr; if (!handle) { *status = ENTROPY_ERR_INVALID_ARG; @@ -153,11 +144,8 @@ entropy_FileOperationHandle entropy_file_handle_read_range( } } -entropy_FileOperationHandle entropy_file_handle_read_line( - entropy_FileHandle handle, - size_t line_number, - EntropyStatus* status -) { +entropy_FileOperationHandle entropy_file_handle_read_line(entropy_FileHandle handle, size_t line_number, + EntropyStatus* status) { if (!status) return nullptr; if (!handle) { *status = ENTROPY_ERR_INVALID_ARG; @@ -175,11 +163,8 @@ entropy_FileOperationHandle entropy_file_handle_read_line( } } -entropy_FileOperationHandle entropy_file_handle_write_all_text( - entropy_FileHandle handle, - const char* text, - EntropyStatus* status -) { +entropy_FileOperationHandle entropy_file_handle_write_all_text(entropy_FileHandle handle, const char* text, + EntropyStatus* status) { if (!status) return nullptr; if (!handle || !text) { *status = ENTROPY_ERR_INVALID_ARG; @@ -197,12 +182,9 @@ entropy_FileOperationHandle entropy_file_handle_write_all_text( } } -entropy_FileOperationHandle entropy_file_handle_write_all_text_with_options( - entropy_FileHandle handle, - const char* text, - const EntropyWriteOptions* options, - EntropyStatus* status -) { +entropy_FileOperationHandle entropy_file_handle_write_all_text_with_options(entropy_FileHandle handle, const char* text, + const EntropyWriteOptions* options, + EntropyStatus* status) { if (!status) return nullptr; if (!handle || !text || !options) { *status = ENTROPY_ERR_INVALID_ARG; @@ -221,12 +203,8 @@ entropy_FileOperationHandle entropy_file_handle_write_all_text_with_options( } } -entropy_FileOperationHandle entropy_file_handle_write_all_bytes( - entropy_FileHandle handle, - const uint8_t* bytes, - size_t length, - EntropyStatus* status -) { +entropy_FileOperationHandle entropy_file_handle_write_all_bytes(entropy_FileHandle handle, const uint8_t* bytes, + size_t length, EntropyStatus* status) { if (!status) return nullptr; if (!handle || !bytes) { *status = ENTROPY_ERR_INVALID_ARG; @@ -245,13 +223,10 @@ entropy_FileOperationHandle entropy_file_handle_write_all_bytes( } } -entropy_FileOperationHandle entropy_file_handle_write_all_bytes_with_options( - entropy_FileHandle handle, - const uint8_t* bytes, - size_t length, - const EntropyWriteOptions* options, - EntropyStatus* status -) { +entropy_FileOperationHandle entropy_file_handle_write_all_bytes_with_options(entropy_FileHandle handle, + const uint8_t* bytes, size_t length, + const EntropyWriteOptions* options, + EntropyStatus* status) { if (!status) return nullptr; if (!handle || !bytes || !options) { *status = ENTROPY_ERR_INVALID_ARG; @@ -271,13 +246,9 @@ entropy_FileOperationHandle entropy_file_handle_write_all_bytes_with_options( } } -entropy_FileOperationHandle entropy_file_handle_write_range( - entropy_FileHandle handle, - uint64_t offset, - const uint8_t* bytes, - size_t length, - EntropyStatus* status -) { +entropy_FileOperationHandle entropy_file_handle_write_range(entropy_FileHandle handle, uint64_t offset, + const uint8_t* bytes, size_t length, + EntropyStatus* status) { if (!status) return nullptr; if (!handle || !bytes) { *status = ENTROPY_ERR_INVALID_ARG; @@ -296,12 +267,8 @@ entropy_FileOperationHandle entropy_file_handle_write_range( } } -entropy_FileOperationHandle entropy_file_handle_write_line( - entropy_FileHandle handle, - size_t line_number, - const char* line, - EntropyStatus* status -) { +entropy_FileOperationHandle entropy_file_handle_write_line(entropy_FileHandle handle, size_t line_number, + const char* line, EntropyStatus* status) { if (!status) return nullptr; if (!handle || !line) { *status = ENTROPY_ERR_INVALID_ARG; @@ -319,10 +286,7 @@ entropy_FileOperationHandle entropy_file_handle_write_line( } } -entropy_FileOperationHandle entropy_file_handle_create_empty( - entropy_FileHandle handle, - EntropyStatus* status -) { +entropy_FileOperationHandle entropy_file_handle_create_empty(entropy_FileHandle handle, EntropyStatus* status) { if (!status) return nullptr; if (!handle) { *status = ENTROPY_ERR_INVALID_ARG; @@ -340,10 +304,7 @@ entropy_FileOperationHandle entropy_file_handle_create_empty( } } -entropy_FileOperationHandle entropy_file_handle_remove( - entropy_FileHandle handle, - EntropyStatus* status -) { +entropy_FileOperationHandle entropy_file_handle_remove(entropy_FileHandle handle, EntropyStatus* status) { if (!status) return nullptr; if (!handle) { *status = ENTROPY_ERR_INVALID_ARG; @@ -361,10 +322,7 @@ entropy_FileOperationHandle entropy_file_handle_remove( } } -const char* entropy_file_handle_normalized_key( - entropy_FileHandle handle, - EntropyStatus* status -) { +const char* entropy_file_handle_normalized_key(entropy_FileHandle handle, EntropyStatus* status) { if (!status) return nullptr; if (!handle) { *status = ENTROPY_ERR_INVALID_ARG; @@ -381,4 +339,4 @@ const char* entropy_file_handle_normalized_key( } } -} // extern "C" +} // extern "C" diff --git a/src/entropy/entropy_file_operation_handle_c.cpp b/src/entropy/entropy_file_operation_handle_c.cpp index 020aa05..8de8e87 100644 --- a/src/entropy/entropy_file_operation_handle_c.cpp +++ b/src/entropy/entropy_file_operation_handle_c.cpp @@ -3,13 +3,14 @@ * @brief Implementation of FileOperationHandle C API */ -#include "entropy/entropy_file_operation_handle.h" -#include "VirtualFileSystem/FileOperationHandle.h" +#include #include -#include #include -#include #include +#include + +#include "VirtualFileSystem/FileOperationHandle.h" +#include "entropy/entropy_file_operation_handle.h" using namespace EntropyEngine::Core::IO; @@ -18,8 +19,9 @@ using namespace EntropyEngine::Core::IO; * ============================================================================ */ // Cache structure to hold C-compatible versions of results -struct FileOpResultCache { - std::mutex mutex; // Protect concurrent access to cache +struct FileOpResultCache +{ + std::mutex mutex; // Protect concurrent access to cache // Cached C structures EntropyFileMetadata c_metadata{}; @@ -44,12 +46,12 @@ struct FileOpResultCache { }; // Wrapper to hold C++ FileOperationHandle + C result cache -struct FileOpHandleWrapper { +struct FileOpHandleWrapper +{ FileOperationHandle cpp_handle; FileOpResultCache cache; - explicit FileOpHandleWrapper(FileOperationHandle&& h) - : cpp_handle(std::move(h)) {} + explicit FileOpHandleWrapper(FileOperationHandle&& h) : cpp_handle(std::move(h)) {} }; /* ============================================================================ @@ -74,28 +76,45 @@ static void translate_exception(EntropyStatus* status) { static EntropyFileOpStatus to_c_status(FileOpStatus s) { switch (s) { - case FileOpStatus::Pending: return ENTROPY_FILE_OP_PENDING; - case FileOpStatus::Running: return ENTROPY_FILE_OP_RUNNING; - case FileOpStatus::Partial: return ENTROPY_FILE_OP_PARTIAL; - case FileOpStatus::Complete: return ENTROPY_FILE_OP_COMPLETE; - case FileOpStatus::Failed: return ENTROPY_FILE_OP_FAILED; - default: return ENTROPY_FILE_OP_FAILED; + case FileOpStatus::Pending: + return ENTROPY_FILE_OP_PENDING; + case FileOpStatus::Running: + return ENTROPY_FILE_OP_RUNNING; + case FileOpStatus::Partial: + return ENTROPY_FILE_OP_PARTIAL; + case FileOpStatus::Complete: + return ENTROPY_FILE_OP_COMPLETE; + case FileOpStatus::Failed: + return ENTROPY_FILE_OP_FAILED; + default: + return ENTROPY_FILE_OP_FAILED; } } static EntropyFileError to_c_error(FileError e) { switch (e) { - case FileError::None: return ENTROPY_FILE_ERROR_NONE; - case FileError::FileNotFound: return ENTROPY_FILE_ERROR_FILE_NOT_FOUND; - case FileError::AccessDenied: return ENTROPY_FILE_ERROR_ACCESS_DENIED; - case FileError::DiskFull: return ENTROPY_FILE_ERROR_DISK_FULL; - case FileError::InvalidPath: return ENTROPY_FILE_ERROR_INVALID_PATH; - case FileError::IOError: return ENTROPY_FILE_ERROR_IO_ERROR; - case FileError::NetworkError: return ENTROPY_FILE_ERROR_NETWORK_ERROR; - case FileError::Timeout: return ENTROPY_FILE_ERROR_TIMEOUT; - case FileError::Conflict: return ENTROPY_FILE_ERROR_CONFLICT; - case FileError::Unknown: return ENTROPY_FILE_ERROR_UNKNOWN; - default: return ENTROPY_FILE_ERROR_UNKNOWN; + case FileError::None: + return ENTROPY_FILE_ERROR_NONE; + case FileError::FileNotFound: + return ENTROPY_FILE_ERROR_FILE_NOT_FOUND; + case FileError::AccessDenied: + return ENTROPY_FILE_ERROR_ACCESS_DENIED; + case FileError::DiskFull: + return ENTROPY_FILE_ERROR_DISK_FULL; + case FileError::InvalidPath: + return ENTROPY_FILE_ERROR_INVALID_PATH; + case FileError::IOError: + return ENTROPY_FILE_ERROR_IO_ERROR; + case FileError::NetworkError: + return ENTROPY_FILE_ERROR_NETWORK_ERROR; + case FileError::Timeout: + return ENTROPY_FILE_ERROR_TIMEOUT; + case FileError::Conflict: + return ENTROPY_FILE_ERROR_CONFLICT; + case FileError::Unknown: + return ENTROPY_FILE_ERROR_UNKNOWN; + default: + return ENTROPY_FILE_ERROR_UNKNOWN; } } @@ -114,8 +133,7 @@ static void cache_metadata(FileOpResultCache& cache, const FileMetadata& meta) { cache.c_metadata.executable = meta.executable ? ENTROPY_TRUE : ENTROPY_FALSE; if (meta.lastModified.has_value()) { - auto ms = std::chrono::duration_cast( - meta.lastModified->time_since_epoch()).count(); + auto ms = std::chrono::duration_cast(meta.lastModified->time_since_epoch()).count(); cache.c_metadata.last_modified_ms = ms; } else { cache.c_metadata.last_modified_ms = -1; @@ -163,8 +181,9 @@ static void cache_entries(FileOpResultCache& cache, const std::vector( - src.metadata.lastModified->time_since_epoch()).count(); + auto ms = + std::chrono::duration_cast(src.metadata.lastModified->time_since_epoch()) + .count(); dst.metadata.last_modified_ms = ms; } else { dst.metadata.last_modified_ms = -1; @@ -218,10 +237,8 @@ static void cache_error(FileOpResultCache& cache, const FileErrorInfo& err) { extern "C" { -entropy_FileOperationHandle entropy_file_operation_handle_clone( - entropy_FileOperationHandle handle, - EntropyStatus* status -) { +entropy_FileOperationHandle entropy_file_operation_handle_clone(entropy_FileOperationHandle handle, + EntropyStatus* status) { if (!status) return nullptr; if (!handle) { *status = ENTROPY_ERR_INVALID_ARG; @@ -231,7 +248,7 @@ entropy_FileOperationHandle entropy_file_operation_handle_clone( try { auto* wrapper = reinterpret_cast(handle); // Copy the C++ handle (it's value-semantic with shared state) - auto* clone = new(std::nothrow) FileOpHandleWrapper(FileOperationHandle(wrapper->cpp_handle)); + auto* clone = new (std::nothrow) FileOpHandleWrapper(FileOperationHandle(wrapper->cpp_handle)); if (!clone) { *status = ENTROPY_ERR_NO_MEMORY; return nullptr; @@ -250,10 +267,7 @@ void entropy_file_operation_handle_destroy(entropy_FileOperationHandle handle) { delete wrapper; } -void entropy_file_operation_handle_wait( - entropy_FileOperationHandle handle, - EntropyStatus* status -) { +void entropy_file_operation_handle_wait(entropy_FileOperationHandle handle, EntropyStatus* status) { if (!status) return; if (!handle) { *status = ENTROPY_ERR_INVALID_ARG; @@ -269,10 +283,7 @@ void entropy_file_operation_handle_wait( } } -EntropyFileOpStatus entropy_file_operation_handle_status( - entropy_FileOperationHandle handle, - EntropyStatus* status -) { +EntropyFileOpStatus entropy_file_operation_handle_status(entropy_FileOperationHandle handle, EntropyStatus* status) { if (!status) return ENTROPY_FILE_OP_FAILED; if (!handle) { *status = ENTROPY_ERR_INVALID_ARG; @@ -289,11 +300,8 @@ EntropyFileOpStatus entropy_file_operation_handle_status( } } -const uint8_t* entropy_file_operation_handle_contents_bytes( - entropy_FileOperationHandle handle, - size_t* out_size, - EntropyStatus* status -) { +const uint8_t* entropy_file_operation_handle_contents_bytes(entropy_FileOperationHandle handle, size_t* out_size, + EntropyStatus* status) { if (!status) return nullptr; if (!handle || !out_size) { *status = ENTROPY_ERR_INVALID_ARG; @@ -318,10 +326,7 @@ const uint8_t* entropy_file_operation_handle_contents_bytes( } } -const char* entropy_file_operation_handle_contents_text( - entropy_FileOperationHandle handle, - EntropyStatus* status -) { +const char* entropy_file_operation_handle_contents_text(entropy_FileOperationHandle handle, EntropyStatus* status) { if (!status) return nullptr; if (!handle) { *status = ENTROPY_ERR_INVALID_ARG; @@ -353,10 +358,7 @@ const char* entropy_file_operation_handle_contents_text( } } -uint64_t entropy_file_operation_handle_bytes_written( - entropy_FileOperationHandle handle, - EntropyStatus* status -) { +uint64_t entropy_file_operation_handle_bytes_written(entropy_FileOperationHandle handle, EntropyStatus* status) { if (!status) return 0; if (!handle) { *status = ENTROPY_ERR_INVALID_ARG; @@ -373,10 +375,8 @@ uint64_t entropy_file_operation_handle_bytes_written( } } -const EntropyFileMetadata* entropy_file_operation_handle_metadata( - entropy_FileOperationHandle handle, - EntropyStatus* status -) { +const EntropyFileMetadata* entropy_file_operation_handle_metadata(entropy_FileOperationHandle handle, + EntropyStatus* status) { if (!status) return nullptr; if (!handle) { *status = ENTROPY_ERR_INVALID_ARG; @@ -404,11 +404,8 @@ const EntropyFileMetadata* entropy_file_operation_handle_metadata( } } -const EntropyDirectoryEntry* entropy_file_operation_handle_directory_entries( - entropy_FileOperationHandle handle, - size_t* out_count, - EntropyStatus* status -) { +const EntropyDirectoryEntry* entropy_file_operation_handle_directory_entries(entropy_FileOperationHandle handle, + size_t* out_count, EntropyStatus* status) { if (!status) return nullptr; if (!handle || !out_count) { *status = ENTROPY_ERR_INVALID_ARG; @@ -439,10 +436,8 @@ const EntropyDirectoryEntry* entropy_file_operation_handle_directory_entries( } } -const EntropyFileErrorInfo* entropy_file_operation_handle_error_info( - entropy_FileOperationHandle handle, - EntropyStatus* status -) { +const EntropyFileErrorInfo* entropy_file_operation_handle_error_info(entropy_FileOperationHandle handle, + EntropyStatus* status) { if (!status) return nullptr; if (!handle) { *status = ENTROPY_ERR_INVALID_ARG; @@ -466,12 +461,13 @@ const EntropyFileErrorInfo* entropy_file_operation_handle_error_info( } } -} // extern "C" +} // extern "C" // Internal helper for other C API files to create FileOpHandleWrapper -namespace EntropyEngine::Core::IO { - extern "C" entropy_FileOperationHandle wrap_file_operation_handle(FileOperationHandle&& handle) { - auto* wrapper = new(std::nothrow) FileOpHandleWrapper(std::move(handle)); - return reinterpret_cast(wrapper); - } +namespace EntropyEngine::Core::IO +{ +extern "C" entropy_FileOperationHandle wrap_file_operation_handle(FileOperationHandle&& handle) { + auto* wrapper = new (std::nothrow) FileOpHandleWrapper(std::move(handle)); + return reinterpret_cast(wrapper); } +} // namespace EntropyEngine::Core::IO diff --git a/src/entropy/entropy_vfs_types_c.cpp b/src/entropy/entropy_vfs_types_c.cpp index 6078bd9..975980e 100644 --- a/src/entropy/entropy_vfs_types_c.cpp +++ b/src/entropy/entropy_vfs_types_c.cpp @@ -3,9 +3,10 @@ * @brief Implementation of VFS type helpers and conversions */ -#include "entropy/entropy_vfs_types.h" -#include #include +#include + +#include "entropy/entropy_vfs_types.h" extern "C" { @@ -30,7 +31,7 @@ void entropy_read_options_init(EntropyReadOptions* options) { if (!options) return; options->offset = 0; - options->length = 0; // 0 = read to EOF + options->length = 0; // 0 = read to EOF options->binary = ENTROPY_TRUE; } @@ -41,12 +42,12 @@ void entropy_write_options_init(EntropyWriteOptions* options) { options->append = ENTROPY_FALSE; options->create_if_missing = ENTROPY_TRUE; options->truncate = ENTROPY_FALSE; - options->create_parent_dirs = -1; // Use VFS default - options->ensure_final_newline = -1; // Preserve original + options->create_parent_dirs = -1; // Use VFS default + options->ensure_final_newline = -1; // Preserve original options->fsync = ENTROPY_FALSE; - options->use_lock_file = -1; // Use VFS default - options->lock_timeout_ms = 0; // Use VFS default - options->lock_suffix = NULL; // Use VFS default + options->use_lock_file = -1; // Use VFS default + options->lock_timeout_ms = 0; // Use VFS default + options->lock_suffix = NULL; // Use VFS default } void entropy_stream_options_init(EntropyStreamOptions* options) { @@ -54,7 +55,7 @@ void entropy_stream_options_init(EntropyStreamOptions* options) { options->mode = ENTROPY_STREAM_MODE_READ; options->buffered = ENTROPY_TRUE; - options->buffer_size = 65536; // 64KB + options->buffer_size = 65536; // 64KB } void entropy_list_directory_options_init(EntropyListDirectoryOptions* options) { @@ -66,7 +67,7 @@ void entropy_list_directory_options_init(EntropyListDirectoryOptions* options) { options->glob_pattern = NULL; options->include_hidden = ENTROPY_FALSE; options->sort_by = ENTROPY_SORT_NONE; - options->max_results = 0; // Unlimited + options->max_results = 0; // Unlimited } /* ============================================================================ @@ -75,29 +76,46 @@ void entropy_list_directory_options_init(EntropyListDirectoryOptions* options) { const char* entropy_file_op_status_to_string(EntropyFileOpStatus status) { switch (status) { - case ENTROPY_FILE_OP_PENDING: return "Pending"; - case ENTROPY_FILE_OP_RUNNING: return "Running"; - case ENTROPY_FILE_OP_PARTIAL: return "Partial"; - case ENTROPY_FILE_OP_COMPLETE: return "Complete"; - case ENTROPY_FILE_OP_FAILED: return "Failed"; - default: return "Unknown"; + case ENTROPY_FILE_OP_PENDING: + return "Pending"; + case ENTROPY_FILE_OP_RUNNING: + return "Running"; + case ENTROPY_FILE_OP_PARTIAL: + return "Partial"; + case ENTROPY_FILE_OP_COMPLETE: + return "Complete"; + case ENTROPY_FILE_OP_FAILED: + return "Failed"; + default: + return "Unknown"; } } const char* entropy_file_error_to_string(EntropyFileError error) { switch (error) { - case ENTROPY_FILE_ERROR_NONE: return "None"; - case ENTROPY_FILE_ERROR_FILE_NOT_FOUND: return "FileNotFound"; - case ENTROPY_FILE_ERROR_ACCESS_DENIED: return "AccessDenied"; - case ENTROPY_FILE_ERROR_DISK_FULL: return "DiskFull"; - case ENTROPY_FILE_ERROR_INVALID_PATH: return "InvalidPath"; - case ENTROPY_FILE_ERROR_IO_ERROR: return "IOError"; - case ENTROPY_FILE_ERROR_NETWORK_ERROR: return "NetworkError"; - case ENTROPY_FILE_ERROR_TIMEOUT: return "Timeout"; - case ENTROPY_FILE_ERROR_CONFLICT: return "Conflict"; - case ENTROPY_FILE_ERROR_UNKNOWN: return "Unknown"; - default: return "Unknown"; + case ENTROPY_FILE_ERROR_NONE: + return "None"; + case ENTROPY_FILE_ERROR_FILE_NOT_FOUND: + return "FileNotFound"; + case ENTROPY_FILE_ERROR_ACCESS_DENIED: + return "AccessDenied"; + case ENTROPY_FILE_ERROR_DISK_FULL: + return "DiskFull"; + case ENTROPY_FILE_ERROR_INVALID_PATH: + return "InvalidPath"; + case ENTROPY_FILE_ERROR_IO_ERROR: + return "IOError"; + case ENTROPY_FILE_ERROR_NETWORK_ERROR: + return "NetworkError"; + case ENTROPY_FILE_ERROR_TIMEOUT: + return "Timeout"; + case ENTROPY_FILE_ERROR_CONFLICT: + return "Conflict"; + case ENTROPY_FILE_ERROR_UNKNOWN: + return "Unknown"; + default: + return "Unknown"; } } -} // extern "C" +} // extern "C" diff --git a/src/entropy/entropy_virtual_file_system_c.cpp b/src/entropy/entropy_virtual_file_system_c.cpp index 90674ef..d43a1d5 100644 --- a/src/entropy/entropy_virtual_file_system_c.cpp +++ b/src/entropy/entropy_virtual_file_system_c.cpp @@ -3,15 +3,16 @@ * @brief Implementation of VirtualFileSystem C API */ -#include "entropy/entropy_virtual_file_system.h" -#include "VirtualFileSystem/VirtualFileSystem.h" -#include "VirtualFileSystem/FileHandle.h" -#include "VirtualFileSystem/DirectoryHandle.h" -#include "VirtualFileSystem/WriteBatch.h" -#include "Concurrency/WorkContractGroup.h" +#include #include #include -#include + +#include "Concurrency/WorkContractGroup.h" +#include "VirtualFileSystem/DirectoryHandle.h" +#include "VirtualFileSystem/FileHandle.h" +#include "VirtualFileSystem/VirtualFileSystem.h" +#include "VirtualFileSystem/WriteBatch.h" +#include "entropy/entropy_virtual_file_system.h" using namespace EntropyEngine::Core; using namespace EntropyEngine::Core::IO; @@ -58,10 +59,7 @@ static VirtualFileSystem::Config to_cpp_config(const EntropyVFSConfig* c) { extern "C" { -entropy_VirtualFileSystem entropy_vfs_create( - entropy_WorkContractGroup group, - EntropyStatus* status -) { +entropy_VirtualFileSystem entropy_vfs_create(entropy_WorkContractGroup group, EntropyStatus* status) { if (!status) return nullptr; if (!group) { *status = ENTROPY_ERR_INVALID_ARG; @@ -70,7 +68,7 @@ entropy_VirtualFileSystem entropy_vfs_create( try { auto* cpp_group = reinterpret_cast(group); - auto* vfs = new(std::nothrow) VirtualFileSystem(cpp_group); + auto* vfs = new (std::nothrow) VirtualFileSystem(cpp_group); if (!vfs) { *status = ENTROPY_ERR_NO_MEMORY; return nullptr; @@ -83,11 +81,8 @@ entropy_VirtualFileSystem entropy_vfs_create( } } -entropy_VirtualFileSystem entropy_vfs_create_with_config( - entropy_WorkContractGroup group, - const EntropyVFSConfig* config, - EntropyStatus* status -) { +entropy_VirtualFileSystem entropy_vfs_create_with_config(entropy_WorkContractGroup group, + const EntropyVFSConfig* config, EntropyStatus* status) { if (!status) return nullptr; if (!group || !config) { *status = ENTROPY_ERR_INVALID_ARG; @@ -97,7 +92,7 @@ entropy_VirtualFileSystem entropy_vfs_create_with_config( try { auto* cpp_group = reinterpret_cast(group); auto cpp_config = to_cpp_config(config); - auto* vfs = new(std::nothrow) VirtualFileSystem(cpp_group, cpp_config); + auto* vfs = new (std::nothrow) VirtualFileSystem(cpp_group, cpp_config); if (!vfs) { *status = ENTROPY_ERR_NO_MEMORY; return nullptr; @@ -116,11 +111,8 @@ void entropy_vfs_destroy(entropy_VirtualFileSystem vfs) { delete cpp_vfs; } -entropy_FileHandle entropy_vfs_create_file_handle( - entropy_VirtualFileSystem vfs, - const char* path, - EntropyStatus* status -) { +entropy_FileHandle entropy_vfs_create_file_handle(entropy_VirtualFileSystem vfs, const char* path, + EntropyStatus* status) { if (!status) return nullptr; if (!vfs || !path) { *status = ENTROPY_ERR_INVALID_ARG; @@ -132,7 +124,7 @@ entropy_FileHandle entropy_vfs_create_file_handle( auto cpp_handle = cpp_vfs->createFileHandle(path); // FileHandle is value-semantic, so we need to allocate on heap - auto* handle = new(std::nothrow) FileHandle(std::move(cpp_handle)); + auto* handle = new (std::nothrow) FileHandle(std::move(cpp_handle)); if (!handle) { *status = ENTROPY_ERR_NO_MEMORY; return nullptr; @@ -145,11 +137,8 @@ entropy_FileHandle entropy_vfs_create_file_handle( } } -entropy_DirectoryHandle entropy_vfs_create_directory_handle( - entropy_VirtualFileSystem vfs, - const char* path, - EntropyStatus* status -) { +entropy_DirectoryHandle entropy_vfs_create_directory_handle(entropy_VirtualFileSystem vfs, const char* path, + EntropyStatus* status) { if (!status) return nullptr; if (!vfs || !path) { *status = ENTROPY_ERR_INVALID_ARG; @@ -161,7 +150,7 @@ entropy_DirectoryHandle entropy_vfs_create_directory_handle( auto cpp_handle = cpp_vfs->createDirectoryHandle(path); // DirectoryHandle is value-semantic, so allocate on heap - auto* handle = new(std::nothrow) DirectoryHandle(std::move(cpp_handle)); + auto* handle = new (std::nothrow) DirectoryHandle(std::move(cpp_handle)); if (!handle) { *status = ENTROPY_ERR_NO_MEMORY; return nullptr; @@ -174,11 +163,8 @@ entropy_DirectoryHandle entropy_vfs_create_directory_handle( } } -entropy_WriteBatch entropy_vfs_create_write_batch( - entropy_VirtualFileSystem vfs, - const char* path, - EntropyStatus* status -) { +entropy_WriteBatch entropy_vfs_create_write_batch(entropy_VirtualFileSystem vfs, const char* path, + EntropyStatus* status) { if (!status) return nullptr; if (!vfs || !path) { *status = ENTROPY_ERR_INVALID_ARG; @@ -203,4 +189,4 @@ entropy_WriteBatch entropy_vfs_create_write_batch( } } -} // extern "C" +} // extern "C" diff --git a/src/entropy/entropy_work_contract_group_c.cpp b/src/entropy/entropy_work_contract_group_c.cpp index 2e8ecdc..38666b5 100644 --- a/src/entropy/entropy_work_contract_group_c.cpp +++ b/src/entropy/entropy_work_contract_group_c.cpp @@ -7,12 +7,13 @@ * This file is part of the Entropy Core project. */ +#include +#include +#include + #include "../../include/entropy/entropy_work_contract_group.h" #include "../Concurrency/WorkContractGroup.h" #include "../Concurrency/WorkGraphTypes.h" -#include -#include -#include using namespace EntropyEngine::Core::Concurrency; @@ -20,14 +21,15 @@ using namespace EntropyEngine::Core::Concurrency; // Internal Helpers // ============================================================================ -namespace { +namespace +{ // Centralized exception translation void translate_exception(EntropyStatus* status) { if (!status) return; try { - throw; // Re-throw current exception + throw; // Re-throw current exception } catch (const std::bad_alloc&) { *status = ENTROPY_ERR_NO_MEMORY; } catch (const std::invalid_argument&) { @@ -35,7 +37,7 @@ void translate_exception(EntropyStatus* status) { } catch (const std::exception&) { *status = ENTROPY_ERR_UNKNOWN; } catch (...) { - std::terminate(); // Unknown exception = programming bug + std::terminate(); // Unknown exception = programming bug } } @@ -62,13 +64,16 @@ inline WorkContractHandle* to_cpp_handle(entropy_WorkContractHandle handle) { // Convert C ExecutionType to C++ enum ExecutionType to_cpp_execution_type(EntropyExecutionType type) { switch (type) { - case ENTROPY_EXEC_ANY_THREAD: return ExecutionType::AnyThread; - case ENTROPY_EXEC_MAIN_THREAD: return ExecutionType::MainThread; - default: return ExecutionType::AnyThread; + case ENTROPY_EXEC_ANY_THREAD: + return ExecutionType::AnyThread; + case ENTROPY_EXEC_MAIN_THREAD: + return ExecutionType::MainThread; + default: + return ExecutionType::AnyThread; } } -} // anonymous namespace +} // anonymous namespace // ============================================================================ // WorkContractGroup C API Implementation @@ -76,11 +81,7 @@ ExecutionType to_cpp_execution_type(EntropyExecutionType type) { extern "C" { -entropy_WorkContractGroup entropy_work_contract_group_create( - size_t capacity, - const char* name, - EntropyStatus* status -) { +entropy_WorkContractGroup entropy_work_contract_group_create(size_t capacity, const char* name, EntropyStatus* status) { if (!status) return nullptr; *status = ENTROPY_OK; @@ -91,7 +92,7 @@ entropy_WorkContractGroup entropy_work_contract_group_create( try { std::string group_name = name ? name : "WorkContractGroup"; - auto* group = new(std::nothrow) WorkContractGroup(capacity, group_name); + auto* group = new (std::nothrow) WorkContractGroup(capacity, group_name); if (!group) { *status = ENTROPY_ERR_NO_MEMORY; return nullptr; @@ -103,9 +104,7 @@ entropy_WorkContractGroup entropy_work_contract_group_create( } } -void entropy_work_contract_group_destroy( - entropy_WorkContractGroup group -) { +void entropy_work_contract_group_destroy(entropy_WorkContractGroup group) { if (!group) return; try { @@ -116,13 +115,10 @@ void entropy_work_contract_group_destroy( } } -entropy_WorkContractHandle entropy_work_contract_group_create_contract( - entropy_WorkContractGroup group, - EntropyWorkCallback callback, - void* user_data, - EntropyExecutionType execution_type, - EntropyStatus* status -) { +entropy_WorkContractHandle entropy_work_contract_group_create_contract(entropy_WorkContractGroup group, + EntropyWorkCallback callback, void* user_data, + EntropyExecutionType execution_type, + EntropyStatus* status) { if (!status) return nullptr; *status = ENTROPY_OK; @@ -153,7 +149,7 @@ entropy_WorkContractHandle entropy_work_contract_group_create_contract( } // Allocate a C++ handle on the heap to return to C code - auto* handle_ptr = new(std::nothrow) WorkContractHandle(cpp_handle); + auto* handle_ptr = new (std::nothrow) WorkContractHandle(cpp_handle); if (!handle_ptr) { // Failed to allocate handle wrapper - need to release the contract cpp_handle.release(); @@ -168,10 +164,7 @@ entropy_WorkContractHandle entropy_work_contract_group_create_contract( } } -void entropy_work_contract_group_wait( - entropy_WorkContractGroup group, - EntropyStatus* status -) { +void entropy_work_contract_group_wait(entropy_WorkContractGroup group, EntropyStatus* status) { if (!status) return; *status = ENTROPY_OK; @@ -188,10 +181,7 @@ void entropy_work_contract_group_wait( } } -void entropy_work_contract_group_stop( - entropy_WorkContractGroup group, - EntropyStatus* status -) { +void entropy_work_contract_group_stop(entropy_WorkContractGroup group, EntropyStatus* status) { if (!status) return; *status = ENTROPY_OK; @@ -208,10 +198,7 @@ void entropy_work_contract_group_stop( } } -void entropy_work_contract_group_resume( - entropy_WorkContractGroup group, - EntropyStatus* status -) { +void entropy_work_contract_group_resume(entropy_WorkContractGroup group, EntropyStatus* status) { if (!status) return; *status = ENTROPY_OK; @@ -228,9 +215,7 @@ void entropy_work_contract_group_resume( } } -EntropyBool entropy_work_contract_group_is_stopping( - entropy_WorkContractGroup group -) { +EntropyBool entropy_work_contract_group_is_stopping(entropy_WorkContractGroup group) { if (!group) return ENTROPY_FALSE; try { @@ -241,9 +226,7 @@ EntropyBool entropy_work_contract_group_is_stopping( } } -size_t entropy_work_contract_group_capacity( - entropy_WorkContractGroup group -) { +size_t entropy_work_contract_group_capacity(entropy_WorkContractGroup group) { if (!group) return 0; try { @@ -254,9 +237,7 @@ size_t entropy_work_contract_group_capacity( } } -size_t entropy_work_contract_group_active_count( - entropy_WorkContractGroup group -) { +size_t entropy_work_contract_group_active_count(entropy_WorkContractGroup group) { if (!group) return 0; try { @@ -267,9 +248,7 @@ size_t entropy_work_contract_group_active_count( } } -size_t entropy_work_contract_group_scheduled_count( - entropy_WorkContractGroup group -) { +size_t entropy_work_contract_group_scheduled_count(entropy_WorkContractGroup group) { if (!group) return 0; try { @@ -280,9 +259,7 @@ size_t entropy_work_contract_group_scheduled_count( } } -size_t entropy_work_contract_group_executing_count( - entropy_WorkContractGroup group -) { +size_t entropy_work_contract_group_executing_count(entropy_WorkContractGroup group) { if (!group) return 0; try { @@ -293,9 +270,7 @@ size_t entropy_work_contract_group_executing_count( } } -size_t entropy_work_contract_group_main_thread_scheduled_count( - entropy_WorkContractGroup group -) { +size_t entropy_work_contract_group_main_thread_scheduled_count(entropy_WorkContractGroup group) { if (!group) return 0; try { @@ -306,9 +281,7 @@ size_t entropy_work_contract_group_main_thread_scheduled_count( } } -size_t entropy_work_contract_group_main_thread_executing_count( - entropy_WorkContractGroup group -) { +size_t entropy_work_contract_group_main_thread_executing_count(entropy_WorkContractGroup group) { if (!group) return 0; try { @@ -319,9 +292,7 @@ size_t entropy_work_contract_group_main_thread_executing_count( } } -EntropyBool entropy_work_contract_group_has_main_thread_work( - entropy_WorkContractGroup group -) { +EntropyBool entropy_work_contract_group_has_main_thread_work(entropy_WorkContractGroup group) { if (!group) return ENTROPY_FALSE; try { @@ -332,10 +303,8 @@ EntropyBool entropy_work_contract_group_has_main_thread_work( } } -size_t entropy_work_contract_group_execute_all_main_thread_work( - entropy_WorkContractGroup group, - EntropyStatus* status -) { +size_t entropy_work_contract_group_execute_all_main_thread_work(entropy_WorkContractGroup group, + EntropyStatus* status) { if (!status) return 0; *status = ENTROPY_OK; @@ -353,11 +322,8 @@ size_t entropy_work_contract_group_execute_all_main_thread_work( } } -size_t entropy_work_contract_group_execute_main_thread_work( - entropy_WorkContractGroup group, - size_t max_contracts, - EntropyStatus* status -) { +size_t entropy_work_contract_group_execute_main_thread_work(entropy_WorkContractGroup group, size_t max_contracts, + EntropyStatus* status) { if (!status) return 0; *status = ENTROPY_OK; @@ -375,11 +341,8 @@ size_t entropy_work_contract_group_execute_main_thread_work( } } -entropy_WorkContractHandle entropy_work_contract_group_select_for_execution( - entropy_WorkContractGroup group, - uint64_t* bias, - EntropyStatus* status -) { +entropy_WorkContractHandle entropy_work_contract_group_select_for_execution(entropy_WorkContractGroup group, + uint64_t* bias, EntropyStatus* status) { if (!status) return nullptr; *status = ENTROPY_OK; @@ -403,7 +366,7 @@ entropy_WorkContractHandle entropy_work_contract_group_select_for_execution( } // Allocate a C++ handle on the heap - auto* handle_ptr = new(std::nothrow) WorkContractHandle(cpp_handle); + auto* handle_ptr = new (std::nothrow) WorkContractHandle(cpp_handle); if (!handle_ptr) { *status = ENTROPY_ERR_NO_MEMORY; return nullptr; @@ -416,11 +379,8 @@ entropy_WorkContractHandle entropy_work_contract_group_select_for_execution( } } -void entropy_work_contract_group_execute_contract( - entropy_WorkContractGroup group, - entropy_WorkContractHandle handle, - EntropyStatus* status -) { +void entropy_work_contract_group_execute_contract(entropy_WorkContractGroup group, entropy_WorkContractHandle handle, + EntropyStatus* status) { if (!status) return; *status = ENTROPY_OK; @@ -438,11 +398,8 @@ void entropy_work_contract_group_execute_contract( } } -void entropy_work_contract_group_complete_execution( - entropy_WorkContractGroup group, - entropy_WorkContractHandle handle, - EntropyStatus* status -) { +void entropy_work_contract_group_complete_execution(entropy_WorkContractGroup group, entropy_WorkContractHandle handle, + EntropyStatus* status) { if (!status) return; *status = ENTROPY_OK; @@ -464,4 +421,4 @@ void entropy_work_contract_group_complete_execution( } } -} // extern "C" +} // extern "C" diff --git a/src/entropy/entropy_work_contract_handle_c.cpp b/src/entropy/entropy_work_contract_handle_c.cpp index d2790a6..fd96fb6 100644 --- a/src/entropy/entropy_work_contract_handle_c.cpp +++ b/src/entropy/entropy_work_contract_handle_c.cpp @@ -7,25 +7,27 @@ * This file is part of the Entropy Core project. */ -#include "../../include/entropy/entropy_work_contract_handle.h" -#include "../Concurrency/WorkContractHandle.h" #include #include +#include "../../include/entropy/entropy_work_contract_handle.h" +#include "../Concurrency/WorkContractHandle.h" + using namespace EntropyEngine::Core::Concurrency; // ============================================================================ // Internal Helpers // ============================================================================ -namespace { +namespace +{ // Centralized exception translation for WorkContractHandle operations void translate_exception(EntropyStatus* status) { if (!status) return; try { - throw; // Re-throw current exception + throw; // Re-throw current exception } catch (const std::bad_alloc&) { *status = ENTROPY_ERR_NO_MEMORY; } catch (const std::invalid_argument&) { @@ -33,7 +35,7 @@ void translate_exception(EntropyStatus* status) { } catch (const std::exception&) { *status = ENTROPY_ERR_UNKNOWN; } catch (...) { - std::terminate(); // Unknown exception = programming bug + std::terminate(); // Unknown exception = programming bug } } @@ -50,28 +52,40 @@ inline entropy_WorkContractHandle to_c(WorkContractHandle* handle) { // Convert C++ ScheduleResult to C enum EntropyScheduleResult to_c_schedule_result(ScheduleResult result) { switch (result) { - case ScheduleResult::Scheduled: return ENTROPY_SCHEDULE_SCHEDULED; - case ScheduleResult::AlreadyScheduled: return ENTROPY_SCHEDULE_ALREADY_SCHEDULED; - case ScheduleResult::NotScheduled: return ENTROPY_SCHEDULE_NOT_SCHEDULED; - case ScheduleResult::Executing: return ENTROPY_SCHEDULE_EXECUTING; - case ScheduleResult::Invalid: return ENTROPY_SCHEDULE_INVALID; - default: return ENTROPY_SCHEDULE_INVALID; + case ScheduleResult::Scheduled: + return ENTROPY_SCHEDULE_SCHEDULED; + case ScheduleResult::AlreadyScheduled: + return ENTROPY_SCHEDULE_ALREADY_SCHEDULED; + case ScheduleResult::NotScheduled: + return ENTROPY_SCHEDULE_NOT_SCHEDULED; + case ScheduleResult::Executing: + return ENTROPY_SCHEDULE_EXECUTING; + case ScheduleResult::Invalid: + return ENTROPY_SCHEDULE_INVALID; + default: + return ENTROPY_SCHEDULE_INVALID; } } // Convert C++ ContractState to C enum EntropyContractState to_c_contract_state(ContractState state) { switch (state) { - case ContractState::Free: return ENTROPY_CONTRACT_FREE; - case ContractState::Allocated: return ENTROPY_CONTRACT_ALLOCATED; - case ContractState::Scheduled: return ENTROPY_CONTRACT_SCHEDULED; - case ContractState::Executing: return ENTROPY_CONTRACT_EXECUTING; - case ContractState::Completed: return ENTROPY_CONTRACT_COMPLETED; - default: return ENTROPY_CONTRACT_FREE; + case ContractState::Free: + return ENTROPY_CONTRACT_FREE; + case ContractState::Allocated: + return ENTROPY_CONTRACT_ALLOCATED; + case ContractState::Scheduled: + return ENTROPY_CONTRACT_SCHEDULED; + case ContractState::Executing: + return ENTROPY_CONTRACT_EXECUTING; + case ContractState::Completed: + return ENTROPY_CONTRACT_COMPLETED; + default: + return ENTROPY_CONTRACT_FREE; } } -} // anonymous namespace +} // anonymous namespace // ============================================================================ // WorkContractHandle C API Implementation @@ -79,10 +93,7 @@ EntropyContractState to_c_contract_state(ContractState state) { extern "C" { -EntropyScheduleResult entropy_work_contract_schedule( - entropy_WorkContractHandle handle, - EntropyStatus* status -) { +EntropyScheduleResult entropy_work_contract_schedule(entropy_WorkContractHandle handle, EntropyStatus* status) { if (!status) return ENTROPY_SCHEDULE_INVALID; *status = ENTROPY_OK; @@ -101,10 +112,7 @@ EntropyScheduleResult entropy_work_contract_schedule( } } -EntropyScheduleResult entropy_work_contract_unschedule( - entropy_WorkContractHandle handle, - EntropyStatus* status -) { +EntropyScheduleResult entropy_work_contract_unschedule(entropy_WorkContractHandle handle, EntropyStatus* status) { if (!status) return ENTROPY_SCHEDULE_INVALID; *status = ENTROPY_OK; @@ -123,9 +131,7 @@ EntropyScheduleResult entropy_work_contract_unschedule( } } -EntropyBool entropy_work_contract_is_valid( - entropy_WorkContractHandle handle -) { +EntropyBool entropy_work_contract_is_valid(entropy_WorkContractHandle handle) { if (!handle) return ENTROPY_FALSE; try { @@ -136,9 +142,7 @@ EntropyBool entropy_work_contract_is_valid( } } -void entropy_work_contract_release( - entropy_WorkContractHandle handle -) { +void entropy_work_contract_release(entropy_WorkContractHandle handle) { if (!handle) return; try { @@ -152,9 +156,7 @@ void entropy_work_contract_release( } } -void entropy_work_contract_handle_destroy( - entropy_WorkContractHandle handle -) { +void entropy_work_contract_handle_destroy(entropy_WorkContractHandle handle) { if (!handle) return; // Delete the heap-allocated wrapper created by entropy_work_contract_group_create_contract @@ -162,10 +164,7 @@ void entropy_work_contract_handle_destroy( delete cpp_handle; } -EntropyBool entropy_work_contract_is_scheduled( - entropy_WorkContractHandle handle, - EntropyStatus* status -) { +EntropyBool entropy_work_contract_is_scheduled(entropy_WorkContractHandle handle, EntropyStatus* status) { if (!status) return ENTROPY_FALSE; *status = ENTROPY_OK; @@ -183,10 +182,7 @@ EntropyBool entropy_work_contract_is_scheduled( } } -EntropyBool entropy_work_contract_is_executing( - entropy_WorkContractHandle handle, - EntropyStatus* status -) { +EntropyBool entropy_work_contract_is_executing(entropy_WorkContractHandle handle, EntropyStatus* status) { if (!status) return ENTROPY_FALSE; *status = ENTROPY_OK; @@ -204,10 +200,7 @@ EntropyBool entropy_work_contract_is_executing( } } -EntropyContractState entropy_work_contract_get_state( - entropy_WorkContractHandle handle, - EntropyStatus* status -) { +EntropyContractState entropy_work_contract_get_state(entropy_WorkContractHandle handle, EntropyStatus* status) { if (!status) return ENTROPY_CONTRACT_FREE; *status = ENTROPY_OK; @@ -239,4 +232,4 @@ EntropyContractState entropy_work_contract_get_state( } } -} // extern "C" +} // extern "C" diff --git a/src/entropy/entropy_work_graph_c.cpp b/src/entropy/entropy_work_graph_c.cpp index ab4ce2e..abb4c30 100644 --- a/src/entropy/entropy_work_graph_c.cpp +++ b/src/entropy/entropy_work_graph_c.cpp @@ -7,13 +7,14 @@ * This file is part of the Entropy Core project. */ +#include +#include +#include + #include "../../include/entropy/entropy_work_graph.h" +#include "../Concurrency/WorkContractGroup.h" #include "../Concurrency/WorkGraph.h" #include "../Concurrency/WorkGraphTypes.h" -#include "../Concurrency/WorkContractGroup.h" -#include -#include -#include using namespace EntropyEngine::Core::Concurrency; @@ -21,14 +22,15 @@ using namespace EntropyEngine::Core::Concurrency; // Internal Helpers // ============================================================================ -namespace { +namespace +{ // Centralized exception translation void translate_exception(EntropyStatus* status) { if (!status) return; try { - throw; // Re-throw current exception + throw; // Re-throw current exception } catch (const std::bad_alloc&) { *status = ENTROPY_ERR_NO_MEMORY; } catch (const std::invalid_argument&) { @@ -36,7 +38,7 @@ void translate_exception(EntropyStatus* status) { } catch (const std::exception&) { *status = ENTROPY_ERR_UNKNOWN; } catch (...) { - std::terminate(); // Unknown exception = programming bug + std::terminate(); // Unknown exception = programming bug } } @@ -63,43 +65,62 @@ inline entropy_NodeHandle to_c_node(WorkGraph::NodeHandle* handle) { // Convert C ExecutionType to C++ enum ExecutionType to_cpp_execution_type(EntropyExecutionType type) { switch (type) { - case ENTROPY_EXEC_ANY_THREAD: return ExecutionType::AnyThread; - case ENTROPY_EXEC_MAIN_THREAD: return ExecutionType::MainThread; - default: return ExecutionType::AnyThread; + case ENTROPY_EXEC_ANY_THREAD: + return ExecutionType::AnyThread; + case ENTROPY_EXEC_MAIN_THREAD: + return ExecutionType::MainThread; + default: + return ExecutionType::AnyThread; } } // Convert C++ NodeState to C enum EntropyNodeState to_c_node_state(NodeState state) { switch (state) { - case NodeState::Pending: return ENTROPY_NODE_PENDING; - case NodeState::Ready: return ENTROPY_NODE_READY; - case NodeState::Scheduled: return ENTROPY_NODE_SCHEDULED; - case NodeState::Executing: return ENTROPY_NODE_EXECUTING; - case NodeState::Completed: return ENTROPY_NODE_COMPLETED; - case NodeState::Failed: return ENTROPY_NODE_FAILED; - case NodeState::Cancelled: return ENTROPY_NODE_CANCELLED; - case NodeState::Yielded: return ENTROPY_NODE_YIELDED; - default: return ENTROPY_NODE_PENDING; + case NodeState::Pending: + return ENTROPY_NODE_PENDING; + case NodeState::Ready: + return ENTROPY_NODE_READY; + case NodeState::Scheduled: + return ENTROPY_NODE_SCHEDULED; + case NodeState::Executing: + return ENTROPY_NODE_EXECUTING; + case NodeState::Completed: + return ENTROPY_NODE_COMPLETED; + case NodeState::Failed: + return ENTROPY_NODE_FAILED; + case NodeState::Cancelled: + return ENTROPY_NODE_CANCELLED; + case NodeState::Yielded: + return ENTROPY_NODE_YIELDED; + default: + return ENTROPY_NODE_PENDING; } } // Convert C++ WorkResult to C enum EntropyWorkResult to_c_work_result(WorkResult result) { switch (result) { - case WorkResult::Complete: return ENTROPY_WORK_COMPLETE; - case WorkResult::Yield: return ENTROPY_WORK_YIELD; - case WorkResult::YieldUntil: return ENTROPY_WORK_YIELD; // C API doesn't support timed yields yet - default: return ENTROPY_WORK_COMPLETE; + case WorkResult::Complete: + return ENTROPY_WORK_COMPLETE; + case WorkResult::Yield: + return ENTROPY_WORK_YIELD; + case WorkResult::YieldUntil: + return ENTROPY_WORK_YIELD; // C API doesn't support timed yields yet + default: + return ENTROPY_WORK_COMPLETE; } } // Convert C WorkResult to C++ WorkResultContext WorkResultContext to_cpp_work_result_context(EntropyWorkResult result) { switch (result) { - case ENTROPY_WORK_COMPLETE: return WorkResultContext::complete(); - case ENTROPY_WORK_YIELD: return WorkResultContext::yield(); - default: return WorkResultContext::complete(); + case ENTROPY_WORK_COMPLETE: + return WorkResultContext::complete(); + case ENTROPY_WORK_YIELD: + return WorkResultContext::yield(); + default: + return WorkResultContext::complete(); } } @@ -120,7 +141,7 @@ EntropyWorkGraphConfig to_c_config(const WorkGraphConfig& config) { // Convert C config to C++ WorkGraphConfig WorkGraphConfig to_cpp_config(const EntropyWorkGraphConfig* config) { WorkGraphConfig cpp_config; - if (!config) return cpp_config; // Return defaults + if (!config) return cpp_config; // Return defaults cpp_config.enableEvents = config->enable_events != ENTROPY_FALSE; cpp_config.enableStateManager = config->enable_state_manager != ENTROPY_FALSE; @@ -133,7 +154,7 @@ WorkGraphConfig to_cpp_config(const EntropyWorkGraphConfig* config) { return cpp_config; } -} // anonymous namespace +} // anonymous namespace // ============================================================================ // Helper Functions Implementation @@ -156,23 +177,35 @@ void entropy_work_graph_config_init(EntropyWorkGraphConfig* config) { const char* entropy_node_state_to_string(EntropyNodeState state) { switch (state) { - case ENTROPY_NODE_PENDING: return "Pending"; - case ENTROPY_NODE_READY: return "Ready"; - case ENTROPY_NODE_SCHEDULED: return "Scheduled"; - case ENTROPY_NODE_EXECUTING: return "Executing"; - case ENTROPY_NODE_COMPLETED: return "Completed"; - case ENTROPY_NODE_FAILED: return "Failed"; - case ENTROPY_NODE_CANCELLED: return "Cancelled"; - case ENTROPY_NODE_YIELDED: return "Yielded"; - default: return "Unknown"; + case ENTROPY_NODE_PENDING: + return "Pending"; + case ENTROPY_NODE_READY: + return "Ready"; + case ENTROPY_NODE_SCHEDULED: + return "Scheduled"; + case ENTROPY_NODE_EXECUTING: + return "Executing"; + case ENTROPY_NODE_COMPLETED: + return "Completed"; + case ENTROPY_NODE_FAILED: + return "Failed"; + case ENTROPY_NODE_CANCELLED: + return "Cancelled"; + case ENTROPY_NODE_YIELDED: + return "Yielded"; + default: + return "Unknown"; } } const char* entropy_work_result_to_string(EntropyWorkResult result) { switch (result) { - case ENTROPY_WORK_COMPLETE: return "Complete"; - case ENTROPY_WORK_YIELD: return "Yield"; - default: return "Unknown"; + case ENTROPY_WORK_COMPLETE: + return "Complete"; + case ENTROPY_WORK_YIELD: + return "Yield"; + default: + return "Unknown"; } } @@ -180,10 +213,7 @@ const char* entropy_work_result_to_string(EntropyWorkResult result) { // WorkGraph Lifecycle Implementation // ============================================================================ -entropy_WorkGraph entropy_work_graph_create( - entropy_WorkContractGroup work_group, - EntropyStatus* status -) { +entropy_WorkGraph entropy_work_graph_create(entropy_WorkContractGroup work_group, EntropyStatus* status) { if (!status) return nullptr; *status = ENTROPY_OK; @@ -194,7 +224,7 @@ entropy_WorkGraph entropy_work_graph_create( try { WorkContractGroup* cpp_group = reinterpret_cast(work_group); - auto* graph = new(std::nothrow) WorkGraph(cpp_group); + auto* graph = new (std::nothrow) WorkGraph(cpp_group); if (!graph) { *status = ENTROPY_ERR_NO_MEMORY; return nullptr; @@ -206,11 +236,8 @@ entropy_WorkGraph entropy_work_graph_create( } } -entropy_WorkGraph entropy_work_graph_create_with_config( - entropy_WorkContractGroup work_group, - const EntropyWorkGraphConfig* config, - EntropyStatus* status -) { +entropy_WorkGraph entropy_work_graph_create_with_config(entropy_WorkContractGroup work_group, + const EntropyWorkGraphConfig* config, EntropyStatus* status) { if (!status) return nullptr; *status = ENTROPY_OK; @@ -223,7 +250,7 @@ entropy_WorkGraph entropy_work_graph_create_with_config( WorkContractGroup* cpp_group = reinterpret_cast(work_group); WorkGraphConfig cpp_config = to_cpp_config(config); - auto* graph = new(std::nothrow) WorkGraph(cpp_group, cpp_config); + auto* graph = new (std::nothrow) WorkGraph(cpp_group, cpp_config); if (!graph) { *status = ENTROPY_ERR_NO_MEMORY; return nullptr; @@ -235,9 +262,7 @@ entropy_WorkGraph entropy_work_graph_create_with_config( } } -void entropy_work_graph_destroy( - entropy_WorkGraph graph -) { +void entropy_work_graph_destroy(entropy_WorkGraph graph) { if (!graph) return; try { @@ -252,14 +277,9 @@ void entropy_work_graph_destroy( // Node Creation Implementation // ============================================================================ -entropy_NodeHandle entropy_work_graph_add_node( - entropy_WorkGraph graph, - EntropyWorkCallback callback, - void* user_data, - const char* name, - EntropyExecutionType execution_type, - EntropyStatus* status -) { +entropy_NodeHandle entropy_work_graph_add_node(entropy_WorkGraph graph, EntropyWorkCallback callback, void* user_data, + const char* name, EntropyExecutionType execution_type, + EntropyStatus* status) { if (!status) return nullptr; *status = ENTROPY_OK; @@ -272,20 +292,16 @@ entropy_NodeHandle entropy_work_graph_add_node( WorkGraph* cpp_graph = to_cpp(graph); // Wrap the C callback in a std::function - std::function work = [callback, user_data]() noexcept { - callback(user_data); - }; + std::function work = [callback, user_data]() noexcept { callback(user_data); }; std::string node_name = name ? name : ""; ExecutionType cpp_exec_type = to_cpp_execution_type(execution_type); // Add the node - WorkGraph::NodeHandle cpp_handle = cpp_graph->addNode( - work, node_name, user_data, cpp_exec_type - ); + WorkGraph::NodeHandle cpp_handle = cpp_graph->addNode(work, node_name, user_data, cpp_exec_type); // Allocate a C++ handle on the heap to return to C code - auto* handle_ptr = new(std::nothrow) WorkGraph::NodeHandle(cpp_handle); + auto* handle_ptr = new (std::nothrow) WorkGraph::NodeHandle(cpp_handle); if (!handle_ptr) { *status = ENTROPY_ERR_NO_MEMORY; return nullptr; @@ -298,15 +314,10 @@ entropy_NodeHandle entropy_work_graph_add_node( } } -entropy_NodeHandle entropy_work_graph_add_yieldable_node( - entropy_WorkGraph graph, - EntropyYieldableWorkCallback callback, - void* user_data, - const char* name, - EntropyExecutionType execution_type, - uint32_t max_reschedules, - EntropyStatus* status -) { +entropy_NodeHandle entropy_work_graph_add_yieldable_node(entropy_WorkGraph graph, EntropyYieldableWorkCallback callback, + void* user_data, const char* name, + EntropyExecutionType execution_type, uint32_t max_reschedules, + EntropyStatus* status) { if (!status) return nullptr; *status = ENTROPY_OK; @@ -331,12 +342,11 @@ entropy_NodeHandle entropy_work_graph_add_yieldable_node( (max_reschedules > 0) ? std::optional(max_reschedules) : std::nullopt; // Add the yieldable node - WorkGraph::NodeHandle cpp_handle = cpp_graph->addYieldableNode( - work, node_name, user_data, cpp_exec_type, max_reschedule_opt - ); + WorkGraph::NodeHandle cpp_handle = + cpp_graph->addYieldableNode(work, node_name, user_data, cpp_exec_type, max_reschedule_opt); // Allocate a C++ handle on the heap - auto* handle_ptr = new(std::nothrow) WorkGraph::NodeHandle(cpp_handle); + auto* handle_ptr = new (std::nothrow) WorkGraph::NodeHandle(cpp_handle); if (!handle_ptr) { *status = ENTROPY_ERR_NO_MEMORY; return nullptr; @@ -353,12 +363,8 @@ entropy_NodeHandle entropy_work_graph_add_yieldable_node( // Dependency Management Implementation // ============================================================================ -void entropy_work_graph_add_dependency( - entropy_WorkGraph graph, - entropy_NodeHandle from, - entropy_NodeHandle to, - EntropyStatus* status -) { +void entropy_work_graph_add_dependency(entropy_WorkGraph graph, entropy_NodeHandle from, entropy_NodeHandle to, + EntropyStatus* status) { if (!status) return; *status = ENTROPY_OK; @@ -382,10 +388,7 @@ void entropy_work_graph_add_dependency( // Execution Control Implementation // ============================================================================ -void entropy_work_graph_execute( - entropy_WorkGraph graph, - EntropyStatus* status -) { +void entropy_work_graph_execute(entropy_WorkGraph graph, EntropyStatus* status) { if (!status) return; *status = ENTROPY_OK; @@ -402,10 +405,7 @@ void entropy_work_graph_execute( } } -void entropy_work_graph_suspend( - entropy_WorkGraph graph, - EntropyStatus* status -) { +void entropy_work_graph_suspend(entropy_WorkGraph graph, EntropyStatus* status) { if (!status) return; *status = ENTROPY_OK; @@ -422,10 +422,7 @@ void entropy_work_graph_suspend( } } -void entropy_work_graph_resume( - entropy_WorkGraph graph, - EntropyStatus* status -) { +void entropy_work_graph_resume(entropy_WorkGraph graph, EntropyStatus* status) { if (!status) return; *status = ENTROPY_OK; @@ -442,9 +439,7 @@ void entropy_work_graph_resume( } } -EntropyBool entropy_work_graph_is_suspended( - entropy_WorkGraph graph -) { +EntropyBool entropy_work_graph_is_suspended(entropy_WorkGraph graph) { if (!graph) return ENTROPY_FALSE; try { @@ -455,11 +450,7 @@ EntropyBool entropy_work_graph_is_suspended( } } -void entropy_work_graph_wait( - entropy_WorkGraph graph, - EntropyWaitResult* result, - EntropyStatus* status -) { +void entropy_work_graph_wait(entropy_WorkGraph graph, EntropyWaitResult* result, EntropyStatus* status) { if (!status) return; *status = ENTROPY_OK; @@ -482,9 +473,7 @@ void entropy_work_graph_wait( } } -EntropyBool entropy_work_graph_is_complete( - entropy_WorkGraph graph -) { +EntropyBool entropy_work_graph_is_complete(entropy_WorkGraph graph) { if (!graph) return ENTROPY_FALSE; try { @@ -499,11 +488,7 @@ EntropyBool entropy_work_graph_is_complete( // Statistics and Monitoring Implementation // ============================================================================ -void entropy_work_graph_get_stats( - entropy_WorkGraph graph, - EntropyWorkGraphStats* stats, - EntropyStatus* status -) { +void entropy_work_graph_get_stats(entropy_WorkGraph graph, EntropyWorkGraphStats* stats, EntropyStatus* status) { if (!status) return; *status = ENTROPY_OK; @@ -531,9 +516,7 @@ void entropy_work_graph_get_stats( } } -uint32_t entropy_work_graph_get_pending_count( - entropy_WorkGraph graph -) { +uint32_t entropy_work_graph_get_pending_count(entropy_WorkGraph graph) { if (!graph) return 0; try { @@ -548,10 +531,7 @@ uint32_t entropy_work_graph_get_pending_count( // Node Handle Operations Implementation // ============================================================================ -EntropyBool entropy_node_handle_is_valid( - entropy_WorkGraph graph, - entropy_NodeHandle handle -) { +EntropyBool entropy_node_handle_is_valid(entropy_WorkGraph graph, entropy_NodeHandle handle) { if (!graph || !handle) return ENTROPY_FALSE; try { @@ -563,11 +543,8 @@ EntropyBool entropy_node_handle_is_valid( } } -EntropyNodeState entropy_node_handle_get_state( - entropy_WorkGraph graph, - entropy_NodeHandle handle, - EntropyStatus* status -) { +EntropyNodeState entropy_node_handle_get_state(entropy_WorkGraph graph, entropy_NodeHandle handle, + EntropyStatus* status) { if (!status) return ENTROPY_NODE_PENDING; *status = ENTROPY_OK; @@ -593,10 +570,7 @@ EntropyNodeState entropy_node_handle_get_state( } } -const char* entropy_node_handle_get_name( - entropy_WorkGraph graph, - entropy_NodeHandle handle -) { +const char* entropy_node_handle_get_name(entropy_WorkGraph graph, entropy_NodeHandle handle) { if (!graph || !handle) return nullptr; try { @@ -612,9 +586,7 @@ const char* entropy_node_handle_get_name( } } -void entropy_node_handle_destroy( - entropy_NodeHandle handle -) { +void entropy_node_handle_destroy(entropy_NodeHandle handle) { if (!handle) return; try { @@ -625,4 +597,4 @@ void entropy_node_handle_destroy( } } -} // extern "C" +} // extern "C" diff --git a/src/entropy/entropy_work_service_c.cpp b/src/entropy/entropy_work_service_c.cpp index 917ae2c..5881054 100644 --- a/src/entropy/entropy_work_service_c.cpp +++ b/src/entropy/entropy_work_service_c.cpp @@ -7,11 +7,12 @@ * This file is part of the Entropy Core project. */ +#include +#include + #include "../../include/entropy/entropy_work_service.h" -#include "../Concurrency/WorkService.h" #include "../Concurrency/WorkContractGroup.h" -#include -#include +#include "../Concurrency/WorkService.h" using namespace EntropyEngine::Core::Concurrency; @@ -19,14 +20,15 @@ using namespace EntropyEngine::Core::Concurrency; // Internal Helpers // ============================================================================ -namespace { +namespace +{ // Centralized exception translation void translate_exception(EntropyStatus* status) { if (!status) return; try { - throw; // Re-throw current exception + throw; // Re-throw current exception } catch (const std::bad_alloc&) { *status = ENTROPY_ERR_NO_MEMORY; } catch (const std::invalid_argument&) { @@ -34,7 +36,7 @@ void translate_exception(EntropyStatus* status) { } catch (const std::exception&) { *status = ENTROPY_ERR_UNKNOWN; } catch (...) { - std::terminate(); // Unknown exception = programming bug + std::terminate(); // Unknown exception = programming bug } } @@ -64,15 +66,14 @@ inline WorkContractGroup* to_cpp_group(entropy_WorkContractGroup group) { } // Convert C++ MainThreadWorkResult to C struct -void to_c_result(const WorkService::MainThreadWorkResult& cpp_result, - EntropyMainThreadWorkResult* c_result) { +void to_c_result(const WorkService::MainThreadWorkResult& cpp_result, EntropyMainThreadWorkResult* c_result) { if (!c_result) return; c_result->contracts_executed = cpp_result.contractsExecuted; c_result->groups_with_work = cpp_result.groupsWithWork; c_result->more_work_available = cpp_result.moreWorkAvailable ? ENTROPY_TRUE : ENTROPY_FALSE; } -} // anonymous namespace +} // anonymous namespace // ============================================================================ // WorkService C API Implementation @@ -80,10 +81,7 @@ void to_c_result(const WorkService::MainThreadWorkResult& cpp_result, extern "C" { -entropy_WorkService entropy_work_service_create( - const EntropyWorkServiceConfig* config, - EntropyStatus* status -) { +entropy_WorkService entropy_work_service_create(const EntropyWorkServiceConfig* config, EntropyStatus* status) { if (!status) return nullptr; *status = ENTROPY_OK; @@ -94,7 +92,7 @@ entropy_WorkService entropy_work_service_create( try { WorkService::Config cpp_config = to_cpp_config(config); - auto* service = new(std::nothrow) WorkService(cpp_config); + auto* service = new (std::nothrow) WorkService(cpp_config); if (!service) { *status = ENTROPY_ERR_NO_MEMORY; return nullptr; @@ -106,9 +104,7 @@ entropy_WorkService entropy_work_service_create( } } -void entropy_work_service_destroy( - entropy_WorkService service -) { +void entropy_work_service_destroy(entropy_WorkService service) { if (!service) return; try { @@ -119,10 +115,7 @@ void entropy_work_service_destroy( } } -void entropy_work_service_start( - entropy_WorkService service, - EntropyStatus* status -) { +void entropy_work_service_start(entropy_WorkService service, EntropyStatus* status) { if (!status) return; *status = ENTROPY_OK; @@ -143,10 +136,7 @@ void entropy_work_service_start( } } -void entropy_work_service_request_stop( - entropy_WorkService service, - EntropyStatus* status -) { +void entropy_work_service_request_stop(entropy_WorkService service, EntropyStatus* status) { if (!status) return; *status = ENTROPY_OK; @@ -163,10 +153,7 @@ void entropy_work_service_request_stop( } } -void entropy_work_service_wait_for_stop( - entropy_WorkService service, - EntropyStatus* status -) { +void entropy_work_service_wait_for_stop(entropy_WorkService service, EntropyStatus* status) { if (!status) return; *status = ENTROPY_OK; @@ -183,10 +170,7 @@ void entropy_work_service_wait_for_stop( } } -void entropy_work_service_stop( - entropy_WorkService service, - EntropyStatus* status -) { +void entropy_work_service_stop(entropy_WorkService service, EntropyStatus* status) { if (!status) return; *status = ENTROPY_OK; @@ -203,9 +187,7 @@ void entropy_work_service_stop( } } -EntropyBool entropy_work_service_is_running( - entropy_WorkService service -) { +EntropyBool entropy_work_service_is_running(entropy_WorkService service) { if (!service) return ENTROPY_FALSE; try { @@ -216,11 +198,8 @@ EntropyBool entropy_work_service_is_running( } } -void entropy_work_service_add_group( - entropy_WorkService service, - entropy_WorkContractGroup group, - EntropyStatus* status -) { +void entropy_work_service_add_group(entropy_WorkService service, entropy_WorkContractGroup group, + EntropyStatus* status) { if (!status) return; *status = ENTROPY_OK; @@ -253,11 +232,8 @@ void entropy_work_service_add_group( } } -void entropy_work_service_remove_group( - entropy_WorkService service, - entropy_WorkContractGroup group, - EntropyStatus* status -) { +void entropy_work_service_remove_group(entropy_WorkService service, entropy_WorkContractGroup group, + EntropyStatus* status) { if (!status) return; *status = ENTROPY_OK; @@ -287,10 +263,7 @@ void entropy_work_service_remove_group( } } -void entropy_work_service_clear( - entropy_WorkService service, - EntropyStatus* status -) { +void entropy_work_service_clear(entropy_WorkService service, EntropyStatus* status) { if (!status) return; *status = ENTROPY_OK; @@ -311,9 +284,7 @@ void entropy_work_service_clear( } } -size_t entropy_work_service_get_group_count( - entropy_WorkService service -) { +size_t entropy_work_service_get_group_count(entropy_WorkService service) { if (!service) return 0; try { @@ -324,9 +295,7 @@ size_t entropy_work_service_get_group_count( } } -size_t entropy_work_service_get_thread_count( - entropy_WorkService service -) { +size_t entropy_work_service_get_thread_count(entropy_WorkService service) { if (!service) return 0; try { @@ -337,12 +306,8 @@ size_t entropy_work_service_get_thread_count( } } -void entropy_work_service_execute_main_thread_work( - entropy_WorkService service, - size_t max_contracts, - EntropyMainThreadWorkResult* result, - EntropyStatus* status -) { +void entropy_work_service_execute_main_thread_work(entropy_WorkService service, size_t max_contracts, + EntropyMainThreadWorkResult* result, EntropyStatus* status) { if (!status) return; *status = ENTROPY_OK; @@ -364,12 +329,9 @@ void entropy_work_service_execute_main_thread_work( } } -size_t entropy_work_service_execute_main_thread_work_from_group( - entropy_WorkService service, - entropy_WorkContractGroup group, - size_t max_contracts, - EntropyStatus* status -) { +size_t entropy_work_service_execute_main_thread_work_from_group(entropy_WorkService service, + entropy_WorkContractGroup group, size_t max_contracts, + EntropyStatus* status) { if (!status) return 0; *status = ENTROPY_OK; @@ -389,9 +351,7 @@ size_t entropy_work_service_execute_main_thread_work_from_group( } } -EntropyBool entropy_work_service_has_main_thread_work( - entropy_WorkService service -) { +EntropyBool entropy_work_service_has_main_thread_work(entropy_WorkService service) { if (!service) return ENTROPY_FALSE; try { @@ -402,4 +362,4 @@ EntropyBool entropy_work_service_has_main_thread_work( } } -} // extern "C" +} // extern "C" diff --git a/src/entropy/entropy_write_batch_c.cpp b/src/entropy/entropy_write_batch_c.cpp index 05b3d22..c3caf98 100644 --- a/src/entropy/entropy_write_batch_c.cpp +++ b/src/entropy/entropy_write_batch_c.cpp @@ -3,11 +3,12 @@ * @brief Implementation of WriteBatch C API */ -#include "entropy/entropy_write_batch.h" -#include "VirtualFileSystem/WriteBatch.h" +#include + #include "VirtualFileSystem/FileOperationHandle.h" #include "VirtualFileSystem/IFileSystemBackend.h" -#include +#include "VirtualFileSystem/WriteBatch.h" +#include "entropy/entropy_write_batch.h" using namespace EntropyEngine::Core::IO; @@ -81,12 +82,8 @@ void entropy_write_batch_destroy(entropy_WriteBatch batch) { delete cpp_batch; } -void entropy_write_batch_write_line( - entropy_WriteBatch batch, - size_t line_number, - const char* content, - EntropyStatus* status -) { +void entropy_write_batch_write_line(entropy_WriteBatch batch, size_t line_number, const char* content, + EntropyStatus* status) { if (!status) return; if (!batch || !content) { *status = ENTROPY_ERR_INVALID_ARG; @@ -102,12 +99,8 @@ void entropy_write_batch_write_line( } } -void entropy_write_batch_insert_line( - entropy_WriteBatch batch, - size_t line_number, - const char* content, - EntropyStatus* status -) { +void entropy_write_batch_insert_line(entropy_WriteBatch batch, size_t line_number, const char* content, + EntropyStatus* status) { if (!status) return; if (!batch || !content) { *status = ENTROPY_ERR_INVALID_ARG; @@ -123,11 +116,7 @@ void entropy_write_batch_insert_line( } } -void entropy_write_batch_delete_line( - entropy_WriteBatch batch, - size_t line_number, - EntropyStatus* status -) { +void entropy_write_batch_delete_line(entropy_WriteBatch batch, size_t line_number, EntropyStatus* status) { if (!status) return; if (!batch) { *status = ENTROPY_ERR_INVALID_ARG; @@ -143,11 +132,7 @@ void entropy_write_batch_delete_line( } } -void entropy_write_batch_append_line( - entropy_WriteBatch batch, - const char* content, - EntropyStatus* status -) { +void entropy_write_batch_append_line(entropy_WriteBatch batch, const char* content, EntropyStatus* status) { if (!status) return; if (!batch || !content) { *status = ENTROPY_ERR_INVALID_ARG; @@ -163,11 +148,7 @@ void entropy_write_batch_append_line( } } -void entropy_write_batch_replace_all( - entropy_WriteBatch batch, - const char* content, - EntropyStatus* status -) { +void entropy_write_batch_replace_all(entropy_WriteBatch batch, const char* content, EntropyStatus* status) { if (!status) return; if (!batch || !content) { *status = ENTROPY_ERR_INVALID_ARG; @@ -183,10 +164,7 @@ void entropy_write_batch_replace_all( } } -void entropy_write_batch_clear( - entropy_WriteBatch batch, - EntropyStatus* status -) { +void entropy_write_batch_clear(entropy_WriteBatch batch, EntropyStatus* status) { if (!status) return; if (!batch) { *status = ENTROPY_ERR_INVALID_ARG; @@ -202,10 +180,7 @@ void entropy_write_batch_clear( } } -entropy_FileOperationHandle entropy_write_batch_commit( - entropy_WriteBatch batch, - EntropyStatus* status -) { +entropy_FileOperationHandle entropy_write_batch_commit(entropy_WriteBatch batch, EntropyStatus* status) { if (!status) return nullptr; if (!batch) { *status = ENTROPY_ERR_INVALID_ARG; @@ -223,11 +198,9 @@ entropy_FileOperationHandle entropy_write_batch_commit( } } -entropy_FileOperationHandle entropy_write_batch_commit_with_options( - entropy_WriteBatch batch, - const EntropyWriteOptions* options, - EntropyStatus* status -) { +entropy_FileOperationHandle entropy_write_batch_commit_with_options(entropy_WriteBatch batch, + const EntropyWriteOptions* options, + EntropyStatus* status) { if (!status) return nullptr; if (!batch || !options) { *status = ENTROPY_ERR_INVALID_ARG; @@ -246,10 +219,7 @@ entropy_FileOperationHandle entropy_write_batch_commit_with_options( } } -entropy_FileOperationHandle entropy_write_batch_preview( - entropy_WriteBatch batch, - EntropyStatus* status -) { +entropy_FileOperationHandle entropy_write_batch_preview(entropy_WriteBatch batch, EntropyStatus* status) { if (!status) return nullptr; if (!batch) { *status = ENTROPY_ERR_INVALID_ARG; @@ -267,10 +237,7 @@ entropy_FileOperationHandle entropy_write_batch_preview( } } -size_t entropy_write_batch_pending_operations( - entropy_WriteBatch batch, - EntropyStatus* status -) { +size_t entropy_write_batch_pending_operations(entropy_WriteBatch batch, EntropyStatus* status) { if (!status) return 0; if (!batch) { *status = ENTROPY_ERR_INVALID_ARG; @@ -287,10 +254,7 @@ size_t entropy_write_batch_pending_operations( } } -EntropyBool entropy_write_batch_is_empty( - entropy_WriteBatch batch, - EntropyStatus* status -) { +EntropyBool entropy_write_batch_is_empty(entropy_WriteBatch batch, EntropyStatus* status) { if (!status) return ENTROPY_FALSE; if (!batch) { *status = ENTROPY_ERR_INVALID_ARG; @@ -307,10 +271,7 @@ EntropyBool entropy_write_batch_is_empty( } } -void entropy_write_batch_reset( - entropy_WriteBatch batch, - EntropyStatus* status -) { +void entropy_write_batch_reset(entropy_WriteBatch batch, EntropyStatus* status) { if (!status) return; if (!batch) { *status = ENTROPY_ERR_INVALID_ARG; @@ -326,10 +287,7 @@ void entropy_write_batch_reset( } } -const char* entropy_write_batch_get_path( - entropy_WriteBatch batch, - EntropyStatus* status -) { +const char* entropy_write_batch_get_path(entropy_WriteBatch batch, EntropyStatus* status) { if (!status) return nullptr; if (!batch) { *status = ENTROPY_ERR_INVALID_ARG; @@ -346,4 +304,4 @@ const char* entropy_write_batch_get_path( } } -} // extern "C" +} // extern "C" diff --git a/tools/format_code.sh b/tools/format_code.sh new file mode 100755 index 0000000..db005a3 --- /dev/null +++ b/tools/format_code.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +# Find and format all C/C++/ObjC files +# Excludes build directories and submodules/vcpkg artifacts via -prune if commonly named, +# but relying on hidden dirs mainly. +# Better leverage git ls-files if inside a repo, or find for general usage. +# Using find to be robust if git is not initialized or specific subdirs are desired. + +DIRS="." +if [ "$#" -gt 0 ]; then + DIRS="$@" +fi + +echo "Formatting code in: $DIRS" + +# Check for clang-format +if ! command -v clang-format &> /dev/null; then + echo "clang-format not found. Please install it (e.g., brew install llvm)." + exit 1 +fi + +find $DIRS \ + \( -name "*.cpp" -o -name "*.h" -o -name "*.c" -o -name "*.hpp" -o -name "*.cc" -o -name "*.mm" -o -name "*.m" \) \ + -not -path "*/build/*" \ + -not -path "*/cmake-build-*" \ + -not -path "*/submodules/*" \ + -not -path "*/vcpkg_installed/*" \ + -not -path "*/.git/*" \ + -print0 | xargs -0 clang-format -i -style=file + +echo "Formatting complete." diff --git a/tools/lint_code.sh b/tools/lint_code.sh new file mode 100755 index 0000000..a71def7 --- /dev/null +++ b/tools/lint_code.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +# Run clang-tidy +# Usage: ./tools/lint_code.sh [build_dir] + +BUILD_DIR="build" +if [ "$#" -gt 0 ]; then + BUILD_DIR="$1" +fi + +if [ ! -f "$BUILD_DIR/compile_commands.json" ]; then + echo "Error: compile_commands.json not found in $BUILD_DIR" + echo "Please configure the project with CMAKE_EXPORT_COMPILE_COMMANDS=ON first." + echo "Example: cmake -S . -B $BUILD_DIR -DCMAKE_EXPORT_COMPILE_COMMANDS=ON" + exit 1 +fi + +echo "Running clang-tidy using build directory: $BUILD_DIR" + +# Check for clang-tidy +if ! command -v clang-tidy &> /dev/null; then + # Try finding it in brew paths if not in PATH + if [ -f "/opt/homebrew/opt/llvm/bin/clang-tidy" ]; then + CLANG_TIDY="/opt/homebrew/opt/llvm/bin/clang-tidy" + elif [ -f "/usr/local/opt/llvm/bin/clang-tidy" ]; then + CLANG_TIDY="/usr/local/opt/llvm/bin/clang-tidy" + else + echo "clang-tidy not found. Please install llvm." + exit 1 + fi +else + CLANG_TIDY="clang-tidy" +fi + +echo "Using clang-tidy: $CLANG_TIDY" + +# Find source files (excluding external/build based on path) that are in the compile commands. +# run-clang-tidy is a common helper script, but we can call clang-tidy directly via find if unavailable. +# Or better, iterate over files directly. +# Let's use `find` similar to format_code.sh but pass -p to clang-tidy. + +find . \ + \( -name "*.cpp" -o -name "*.c" -o -name "*.cc" -o -name "*.mm" -o -name "*.m" \) \ + -not -path "*/build/*" \ + -not -path "*/cmake-build-*" \ + -not -path "*/submodules/*" \ + -not -path "*/vcpkg_installed/*" \ + -not -path "*/.git/*" \ + -print0 | xargs -0 "$CLANG_TIDY" -p "$BUILD_DIR" --quiet + +echo "Linting complete." diff --git a/tools/setup_hooks.sh b/tools/setup_hooks.sh new file mode 100755 index 0000000..91d128f --- /dev/null +++ b/tools/setup_hooks.sh @@ -0,0 +1,50 @@ +#!/bin/bash +set -e + +# Function to check if a command exists +command_exists() { + command -v "$1" >/dev/null 2>&1 +} + +echo "Setting up pre-commit hooks..." + +# Check for pre-commit +if ! command_exists pre-commit; then + echo "pre-commit not found. Attempting to install..." + if [[ "$OSTYPE" == "darwin"* ]]; then + if command_exists brew; then + brew install pre-commit + else + echo "Error: brew not found. Please install pre-commit manually." + exit 1 + fi + else + if command_exists pip; then + pip install pre-commit + elif command_exists pip3; then + pip3 install pre-commit + else + echo "Error: pip/pip3 not found. Please install pre-commit manually." + exit 1 + fi + fi +fi + +# Check for clang-tidy +if ! command_exists clang-tidy; then + echo "clang-tidy not found in PATH." + if [[ -d "/opt/homebrew/opt/llvm/bin" ]]; then + echo "Found llvm at /opt/homebrew/opt/llvm/bin. You may want to add this to your PATH:" + echo 'export PATH="/opt/homebrew/opt/llvm/bin:$PATH"' + elif [[ -d "/usr/local/opt/llvm/bin" ]]; then + echo "Found llvm at /usr/local/opt/llvm/bin. You may want to add this to your PATH:" + echo 'export PATH="/usr/local/opt/llvm/bin:$PATH"' + else + echo "Consider installing llvm via brew to get clang-tidy: brew install llvm" + fi +fi + +# Install the hooks +pre-commit install + +echo "pre-commit hooks installed successfully!" From b3c06f3d88446e49c17a0a048dae93864c0bbf65 Mon Sep 17 00:00:00 2001 From: "Jonathan \"Geenz\" Goodman" Date: Tue, 30 Dec 2025 19:12:33 -0500 Subject: [PATCH 2/6] Update tools/setup_hooks.sh Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- tools/setup_hooks.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/setup_hooks.sh b/tools/setup_hooks.sh index 91d128f..4d16ddf 100755 --- a/tools/setup_hooks.sh +++ b/tools/setup_hooks.sh @@ -24,8 +24,8 @@ if ! command_exists pre-commit; then elif command_exists pip3; then pip3 install pre-commit else - echo "Error: pip/pip3 not found. Please install pre-commit manually." - exit 1 + echo "Error: pip/pip3 not found. Please install pre-commit manually." + exit 1 fi fi fi From 310fdcf7335d69465eb9974f15eb30111bcc6e90 Mon Sep 17 00:00:00 2001 From: "Jonathan \"Geenz\" Goodman" Date: Tue, 30 Dec 2025 19:19:05 -0500 Subject: [PATCH 3/6] Update CMakeLists.txt --- CMakeLists.txt | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index acb61d1..738864b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -29,11 +29,15 @@ endif() # Back-compat shim: mirror to existing variable used throughout the file set(ENTROPY_BUILD_TESTS ${ENTROPY_ENABLE_TESTS} CACHE BOOL "Build tests for EntropyCore (deprecated; use ENTROPY_ENABLE_TESTS)" FORCE) -# Set C++20 standard with modules support +# Set C++20 standard (modules not used) set(CMAKE_CXX_STANDARD 20) set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) +# Disable automatic C++20 module scanning - this project doesn't use modules +# and the auto-scanning generates Clang-specific flags that GCC doesn't support +set(CMAKE_CXX_SCAN_FOR_MODULES OFF) + # Configure sanitizers # Configure sanitizers # Always enable ASAN+UBSAN for Debug builds on minimal supported compilers From 157e9f3599dff5a9928ce9ed67624be3b2a06017 Mon Sep 17 00:00:00 2001 From: "Jonathan \"Geenz\" Goodman" Date: Tue, 30 Dec 2025 19:31:37 -0500 Subject: [PATCH 4/6] Take care of linter warnings. --- src/Concurrency/SignalTree.h | 2 +- src/Concurrency/WorkContractGroup.cpp | 7 +-- src/Concurrency/WorkGraph.cpp | 88 ++++++++++++++------------- src/Concurrency/WorkGraph.h | 18 +++--- src/Logging/CLogger.cpp | 24 ++++---- src/Logging/ConsoleSink.cpp | 16 ++--- 6 files changed, 79 insertions(+), 76 deletions(-) diff --git a/src/Concurrency/SignalTree.h b/src/Concurrency/SignalTree.h index 4f6f463..e7c0d63 100644 --- a/src/Concurrency/SignalTree.h +++ b/src/Concurrency/SignalTree.h @@ -268,7 +268,7 @@ class SignalTree : public SignalTreeBase // 2. Calculate Leaf Node Array Index // Leaf nodes start after all internal nodes. // The number of internal nodes is LeafCapacity - 1 (for a complete binary tree) - // So, the first leaf node is at index (LeafCapacity - 1).\ + // So, the first leaf node is at index (LeafCapacity - 1). // Each leaf node (uint64_t) can hold 64 signals. size_t leafNodeArrayStartIndex = _totalNodes - _leafCapacity; size_t leafNodeOffsetInArray = leafIndex / S_BITS_PER_LEAF_NODE; // Which uint64_t leaf node diff --git a/src/Concurrency/WorkContractGroup.cpp b/src/Concurrency/WorkContractGroup.cpp index 3631097..6a1c3f7 100644 --- a/src/Concurrency/WorkContractGroup.cpp +++ b/src/Concurrency/WorkContractGroup.cpp @@ -40,7 +40,7 @@ std::unique_ptr WorkContractGroup::createSignalTree(size_t capac } WorkContractGroup::WorkContractGroup(size_t capacity, std::string name) - : _capacity(capacity), _contracts(capacity), _name(name) { + : _contracts(capacity), _name(std::move(name)), _capacity(capacity) { // Create SignalTree for ready contracts _readyContracts = createSignalTree(capacity); @@ -60,8 +60,7 @@ WorkContractGroup::WorkContractGroup(size_t capacity, std::string name) } WorkContractGroup::WorkContractGroup(WorkContractGroup&& other) noexcept - : _capacity(other._capacity), - _contracts(std::move(other._contracts)), + : _contracts(std::move(other._contracts)), _readyContracts(std::move(other._readyContracts)), _mainThreadContracts(std::move(other._mainThreadContracts)), _freeListHead(other._freeListHead.load(std::memory_order_acquire)), @@ -73,6 +72,7 @@ WorkContractGroup::WorkContractGroup(WorkContractGroup&& other) noexcept _mainThreadExecutingCount(other._mainThreadExecutingCount.load(std::memory_order_acquire)), _mainThreadSelectingCount(other._mainThreadSelectingCount.load(std::memory_order_acquire)), _name(std::move(other._name)), + _capacity(other._capacity), _concurrencyProvider(other._concurrencyProvider), _stopping(other._stopping.load(std::memory_order_acquire)) { // Clear the other object to prevent double cleanup @@ -241,7 +241,6 @@ WorkContractHandle WorkContractGroup::createContract(std::function work, uint32_t next = _contracts[idx].nextFree.load(std::memory_order_acquire); uint64_t newHead = packHead(next, headTag(head) + 1); if (_freeListHead.compare_exchange_weak(head, newHead, std::memory_order_acq_rel, std::memory_order_acquire)) { - head = newHead; // Not necessary, but keeps head updated // We successfully popped idx uint32_t index = idx; diff --git a/src/Concurrency/WorkGraph.cpp b/src/Concurrency/WorkGraph.cpp index 139fcd7..bb4098c 100644 --- a/src/Concurrency/WorkGraph.cpp +++ b/src/Concurrency/WorkGraph.cpp @@ -66,7 +66,7 @@ WorkGraph::WorkGraph(WorkContractGroup* workContractGroup, const WorkGraphConfig // Set up safe scheduler callbacks with proper lifetime tracking NodeScheduler::Callbacks callbacks; - callbacks.onNodeExecuting = [this](NodeHandle node) { + callbacks.onNodeExecuting = [this](const NodeHandle& node) { CallbackGuard guard(this); if (!_destroyed.load(std::memory_order_acquire)) { if (_config.enableDebugLogging) { @@ -97,7 +97,7 @@ WorkGraph::WorkGraph(WorkContractGroup* workContractGroup, const WorkGraphConfig onNodeComplete(node); } }; - callbacks.onNodeFailed = [this](NodeHandle node, std::exception_ptr /*ex*/) { + callbacks.onNodeFailed = [this](const NodeHandle& node, const std::exception_ptr& /*ex*/) { CallbackGuard guard(this); if (!_destroyed.load(std::memory_order_acquire)) { if (_config.enableDebugLogging) { @@ -106,7 +106,7 @@ WorkGraph::WorkGraph(WorkContractGroup* workContractGroup, const WorkGraphConfig onNodeFailed(node); } }; - callbacks.onNodeDropped = [this](NodeHandle node) { + callbacks.onNodeDropped = [this](const NodeHandle& node) { CallbackGuard guard(this); if (!_destroyed.load(std::memory_order_acquire)) { // Mark the node as failed (dropped is effectively a failure) @@ -136,7 +136,7 @@ WorkGraph::WorkGraph(WorkContractGroup* workContractGroup, const WorkGraphConfig } } }; - callbacks.onNodeYielded = [this](NodeHandle node) { + callbacks.onNodeYielded = [this](const NodeHandle& node) { CallbackGuard guard(this); if (!_destroyed.load(std::memory_order_acquire)) { if (_config.enableDebugLogging) { @@ -145,7 +145,7 @@ WorkGraph::WorkGraph(WorkContractGroup* workContractGroup, const WorkGraphConfig onNodeYielded(node); } }; - callbacks.onNodeYieldedUntil = [this](NodeHandle node, std::chrono::steady_clock::time_point wakeTime) { + callbacks.onNodeYieldedUntil = [this](const NodeHandle& node, std::chrono::steady_clock::time_point wakeTime) { CallbackGuard guard(this); if (!_destroyed.load(std::memory_order_acquire)) { if (_config.enableDebugLogging) { @@ -198,37 +198,41 @@ WorkGraph::WorkGraph(WorkContractGroup* workContractGroup, const WorkGraphConfig } WorkGraph::~WorkGraph() { - if (_config.enableDebugLogging) { - ENTROPY_LOG_DEBUG_CAT("Concurrency", - "WorkGraph destructor starting, pending nodes: " + std::to_string(_pendingNodes.load())); - } + try { + if (_config.enableDebugLogging) { + ENTROPY_LOG_DEBUG_CAT( + "Concurrency", "WorkGraph destructor starting, pending nodes: " + std::to_string(_pendingNodes.load())); + } - // Set destroyed flag to prevent new callbacks - _destroyed.store(true, std::memory_order_release); + // Set destroyed flag to prevent new callbacks + _destroyed.store(true, std::memory_order_release); - // Unregister callbacks from WorkContractGroup first - // This prevents new callbacks from being scheduled - if (_workContractGroup) { - _workContractGroup->removeOnCapacityAvailable(_capacityCallbackIt); - _workContractGroup->setTimedDeferralCallback(nullptr); // Clear timed deferral callback - } + // Unregister callbacks from WorkContractGroup first + // This prevents new callbacks from being scheduled + if (_workContractGroup) { + _workContractGroup->removeOnCapacityAvailable(_capacityCallbackIt); + _workContractGroup->setTimedDeferralCallback(nullptr); // Clear timed deferral callback + } - // Wait for all active callbacks to complete - if (_activeCallbacks.load(std::memory_order_acquire) > 0) { - if (_config.enableDebugLogging) { - ENTROPY_LOG_DEBUG_CAT("Concurrency", "WorkGraph destructor waiting for callbacks: " + - std::to_string(_activeCallbacks.load())); + // Wait for all active callbacks to complete + if (_activeCallbacks.load(std::memory_order_acquire) > 0) { + if (_config.enableDebugLogging) { + ENTROPY_LOG_DEBUG_CAT("Concurrency", "WorkGraph destructor waiting for callbacks: " + + std::to_string(_activeCallbacks.load())); + } + std::unique_lock lock(_waitMutex); + _shutdownCondition.wait(lock, [this]() { return _activeCallbacks.load(std::memory_order_acquire) == 0; }); } - std::unique_lock lock(_waitMutex); - _shutdownCondition.wait(lock, [this]() { return _activeCallbacks.load(std::memory_order_acquire) == 0; }); - } - // Now safe to proceed with cleanup - // Unregister from debug system (if registered) - if (_config.enableDebugRegistration) { - Debug::DebugRegistry::getInstance().unregisterObject(this); - auto msg = std::format("Destroyed WorkGraph '{}'", getName()); - ENTROPY_LOG_DEBUG_CAT("WorkGraph", msg); + // Now safe to proceed with cleanup + // Unregister from debug system (if registered) + if (_config.enableDebugRegistration) { + Debug::DebugRegistry::getInstance().unregisterObject(this); + auto msg = std::format("Destroyed WorkGraph '{}'", getName()); + ENTROPY_LOG_DEBUG_CAT("WorkGraph", msg); + } + } catch (...) { + // Suppress exceptions in destructor - cannot propagate } } @@ -321,11 +325,11 @@ WorkGraph::NodeHandle WorkGraph::addYieldableNode(YieldableWorkFunction work, co return handle; } -void WorkGraph::addDependency(NodeHandle from, NodeHandle to) { +void WorkGraph::addDependency(NodeHandle from, const NodeHandle& to) { std::unique_lock lock(_graphMutex); // Add edge in the DAG (this checks for cycles) - _graph.addEdge(from, to); + _graph.addEdge(std::move(from), to); // Increment dependency count for the target node incrementDependencies(to); @@ -421,7 +425,7 @@ void WorkGraph::clear() { } } -void WorkGraph::incrementDependencies(NodeHandle node) { +void WorkGraph::incrementDependencies(const NodeHandle& node) { if (auto* nodeData = _graph.getNodeData(node)) { nodeData->pendingDependencies.fetch_add(1, std::memory_order_acq_rel); if (_config.enableDebugLogging) { @@ -608,14 +612,14 @@ bool WorkGraph::scheduleNode(NodeHandle node) { ENTROPY_LOG_DEBUG_CAT("Concurrency", "WorkGraph::scheduleNode() called"); } // Always delegate to the scheduler component - bool result = _scheduler->scheduleNode(node); + bool result = _scheduler->scheduleNode(std::move(node)); if (_config.enableDebugLogging) { ENTROPY_LOG_DEBUG_CAT("Concurrency", "WorkGraph::scheduleNode() completed"); } return result; } -void WorkGraph::onNodeComplete(NodeHandle node) { +void WorkGraph::onNodeComplete(const NodeHandle& node) { auto* nodeData = _graph.getNodeData(node); if (!nodeData) return; @@ -793,7 +797,7 @@ WorkGraph::NodeHandle WorkGraph::addContinuation(const std::vector& return continuation; } -void WorkGraph::onNodeFailed(NodeHandle node) { +void WorkGraph::onNodeFailed(const NodeHandle& node) { auto* nodeData = _graph.getNodeData(node); if (!nodeData) return; @@ -820,7 +824,7 @@ void WorkGraph::onNodeFailed(NodeHandle node) { cancelDependents(node); } -void WorkGraph::onNodeYielded(NodeHandle node) { +void WorkGraph::onNodeYielded(const NodeHandle& node) { auto* nodeData = _graph.getNodeData(node); if (!nodeData) return; @@ -854,7 +858,7 @@ void WorkGraph::onNodeYielded(NodeHandle node) { rescheduleYieldedNode(node); } -void WorkGraph::onNodeYieldedUntil(NodeHandle node, std::chrono::steady_clock::time_point wakeTime) { +void WorkGraph::onNodeYieldedUntil(const NodeHandle& node, std::chrono::steady_clock::time_point wakeTime) { auto* nodeData = _graph.getNodeData(node); if (!nodeData) return; @@ -893,7 +897,7 @@ void WorkGraph::onNodeYieldedUntil(NodeHandle node, std::chrono::steady_clock::t } } -void WorkGraph::rescheduleYieldedNode(NodeHandle node) { +void WorkGraph::rescheduleYieldedNode(const NodeHandle& node) { auto* nodeData = _graph.getNodeData(node); if (!nodeData) return; @@ -930,7 +934,7 @@ void WorkGraph::rescheduleYieldedNode(NodeHandle node) { } } -void WorkGraph::onNodeCancelled(NodeHandle node) { +void WorkGraph::onNodeCancelled(const NodeHandle& node) { auto* nodeData = _graph.getNodeData(node); if (!nodeData) return; @@ -957,7 +961,7 @@ void WorkGraph::onNodeCancelled(NodeHandle node) { cancelDependents(node); } -void WorkGraph::cancelDependents(NodeHandle failedNode) { +void WorkGraph::cancelDependents(const NodeHandle& failedNode) { std::vector nodesToCancel; { diff --git a/src/Concurrency/WorkGraph.h b/src/Concurrency/WorkGraph.h index 1bdc655..bebaebe 100644 --- a/src/Concurrency/WorkGraph.h +++ b/src/Concurrency/WorkGraph.h @@ -509,7 +509,7 @@ class WorkGraph : public Debug::Named * graph.addDependency(C, D); // to complete * @endcode */ - void addDependency(NodeHandle from, NodeHandle to); + void addDependency(NodeHandle from, const NodeHandle& to); /** * @brief Resets execution state so the graph can be re-executed @@ -933,7 +933,7 @@ class WorkGraph : public Debug::Named * * @param node The node that just finished executing successfully */ - void onNodeComplete(NodeHandle node); + void onNodeComplete(const NodeHandle& node); /** * @brief Takes a ready node and gets it running in the thread pool @@ -954,7 +954,7 @@ class WorkGraph : public Debug::Named * * @param node The node that just got another parent to wait for */ - void incrementDependencies(NodeHandle node); + void incrementDependencies(const NodeHandle& node); /** * @brief Internal root scheduling - assumes you already hold the graph lock @@ -974,7 +974,7 @@ class WorkGraph : public Debug::Named * * @param failedNode The node whose failure triggers the cascade */ - void cancelDependents(NodeHandle failedNode); + void cancelDependents(const NodeHandle& failedNode); /** * @brief Handles the bookkeeping when a node gets cancelled @@ -984,7 +984,7 @@ class WorkGraph : public Debug::Named * * @param node The node that's being cancelled */ - void onNodeCancelled(NodeHandle node); + void onNodeCancelled(const NodeHandle& node); /** * @brief Deals with the aftermath when a node's work function throws @@ -994,7 +994,7 @@ class WorkGraph : public Debug::Named * * @param node The node whose work function threw an exception */ - void onNodeFailed(NodeHandle node); + void onNodeFailed(const NodeHandle& node); /** * @brief Handles a node that has yielded execution @@ -1004,7 +1004,7 @@ class WorkGraph : public Debug::Named * * @param node The node that yielded */ - void onNodeYielded(NodeHandle node); + void onNodeYielded(const NodeHandle& node); /** * @brief Handles timed node yield - node suspended until specific time @@ -1016,7 +1016,7 @@ class WorkGraph : public Debug::Named * @param node The node that yielded * @param wakeTime When the node should be reconsidered for scheduling */ - void onNodeYieldedUntil(NodeHandle node, std::chrono::steady_clock::time_point wakeTime); + void onNodeYieldedUntil(const NodeHandle& node, std::chrono::steady_clock::time_point wakeTime); /** * @brief Reschedules a yielded node for execution @@ -1026,7 +1026,7 @@ class WorkGraph : public Debug::Named * * @param node The yielded node to reschedule */ - void rescheduleYieldedNode(NodeHandle node); + void rescheduleYieldedNode(const NodeHandle& node); }; } // namespace Concurrency diff --git a/src/Logging/CLogger.cpp b/src/Logging/CLogger.cpp index cb6eda6..6c99ea5 100644 --- a/src/Logging/CLogger.cpp +++ b/src/Logging/CLogger.cpp @@ -12,7 +12,7 @@ using ::EntropyEngine::Core::Logging::Logger; using ::EntropyEngine::Core::Logging::LogLevel; -static LogLevel map_level(EntropyLogLevelC lvl) noexcept { +static LogLevel mapLevel(EntropyLogLevelC lvl) noexcept { switch (lvl) { case ENTROPY_LOG_TRACE_C: return LogLevel::Trace; @@ -31,14 +31,14 @@ static LogLevel map_level(EntropyLogLevelC lvl) noexcept { } } -static void vwrite_internal(EntropyLogLevelC level, const char* category, const char* fmt, va_list args) { +static void vwriteInternal(EntropyLogLevelC level, const char* category, const char* fmt, va_list args) { if (!fmt) return; // Format into a dynamically-sized buffer - va_list args_copy; - va_copy(args_copy, args); - int needed = std::vsnprintf(nullptr, 0, fmt, args_copy); - va_end(args_copy); + va_list argsCopy; + va_copy(argsCopy, args); + int needed = std::vsnprintf(nullptr, 0, fmt, argsCopy); + va_end(argsCopy); if (needed < 0) return; std::string message; @@ -47,33 +47,33 @@ static void vwrite_internal(EntropyLogLevelC level, const char* category, const // Forward to Logger backend if (category && *category) { - Logger::global().log(map_level(level), category, message); + Logger::global().log(mapLevel(level), category, message); } else { - Logger::global().log(map_level(level), "C", message); + Logger::global().log(mapLevel(level), "C", message); } } extern "C" { void entropy_log_vwrite(EntropyLogLevelC level, const char* fmt, va_list args) { - vwrite_internal(level, "C", fmt, args); + vwriteInternal(level, "C", fmt, args); } void entropy_log_vwrite_cat(EntropyLogLevelC level, const char* category, const char* fmt, va_list args) { - vwrite_internal(level, category, fmt, args); + vwriteInternal(level, category, fmt, args); } void entropy_log_write(EntropyLogLevelC level, const char* fmt, ...) { va_list args; va_start(args, fmt); - vwrite_internal(level, "C", fmt, args); + vwriteInternal(level, "C", fmt, args); va_end(args); } void entropy_log_write_cat(EntropyLogLevelC level, const char* category, const char* fmt, ...) { va_list args; va_start(args, fmt); - vwrite_internal(level, category, fmt, args); + vwriteInternal(level, category, fmt, args); va_end(args); } diff --git a/src/Logging/ConsoleSink.cpp b/src/Logging/ConsoleSink.cpp index 5f2e92b..f8a8fbc 100644 --- a/src/Logging/ConsoleSink.cpp +++ b/src/Logging/ConsoleSink.cpp @@ -68,17 +68,17 @@ void ConsoleSink::formatAndWrite(std::ostream& stream, const LogEntry& entry) { // Format: [TIMESTAMP] [LEVEL] [THREAD?] [CATEGORY] MESSAGE [LOCATION?] // Timestamp - auto time_t = std::chrono::system_clock::to_time_t(entry.timestamp); + auto timeVal = std::chrono::system_clock::to_time_t(entry.timestamp); auto ms = std::chrono::duration_cast(entry.timestamp.time_since_epoch()) % 1000; #ifdef _WIN32 - std::tm tm_buf; - localtime_s(&tm_buf, &time_t); - stream << "[" << std::put_time(&tm_buf, "%H:%M:%S"); + std::tm tmBuf; + localtime_s(&tmBuf, &timeVal); + stream << "[" << std::put_time(&tmBuf, "%H:%M:%S"); #else - std::tm tm_buf; - localtime_r(&time_t, &tm_buf); - stream << "[" << std::put_time(&tm_buf, "%H:%M:%S"); + std::tm tmBuf; + localtime_r(&timeVal, &tmBuf); + stream << "[" << std::put_time(&tmBuf, "%H:%M:%S"); #endif stream << "." << std::setfill('0') << std::setw(3) << ms.count() << "] "; @@ -114,7 +114,7 @@ void ConsoleSink::formatAndWrite(std::ostream& stream, const LogEntry& entry) { stream << " (" << entry.location.file_name() << ":" << entry.location.line() << ")"; } - stream << std::endl; + stream << '\n'; } } // namespace Logging From c5e37023900b3a13ccd1c17ade8068115d6a06f1 Mon Sep 17 00:00:00 2001 From: "Jonathan \"Geenz\" Goodman" Date: Tue, 30 Dec 2025 20:14:06 -0500 Subject: [PATCH 5/6] Loads of linter and warning fixes. --- src/Concurrency/AdaptiveRankingScheduler.cpp | 4 +- src/Concurrency/AdaptiveRankingScheduler.h | 3 +- src/Concurrency/DirectScheduler.h | 4 +- src/Concurrency/IWorkScheduler.h | 27 +------ src/Concurrency/NodeScheduler.cpp | 12 +-- src/Concurrency/NodeScheduler.h | 12 +-- src/Concurrency/NodeStateManager.cpp | 19 +++-- src/Concurrency/NodeStateManager.h | 12 +-- src/Concurrency/RandomScheduler.cpp | 5 +- src/Concurrency/RandomScheduler.h | 5 +- src/Concurrency/RoundRobinScheduler.cpp | 5 +- src/Concurrency/RoundRobinScheduler.h | 5 +- src/Concurrency/SpinningDirectScheduler.h | 3 +- src/Concurrency/WorkGraph.cpp | 6 +- src/Concurrency/WorkGraph.h | 2 +- src/Concurrency/WorkService.cpp | 7 +- src/Core/EntropyApplication.cpp | 26 +++--- src/Core/EntropyCAPI.cpp | 77 +++++++++--------- src/Core/EntropyMain.cpp | 2 +- src/Core/EntropyServiceRegistry.cpp | 8 +- src/Logging/Logger.h | 29 ++++--- src/VirtualFileSystem/DirectoryHandle.cpp | 4 +- src/VirtualFileSystem/FileHandle.cpp | 60 +++++++------- src/VirtualFileSystem/FileStream.cpp | 2 +- src/VirtualFileSystem/FileWatchManager.cpp | 8 +- src/VirtualFileSystem/VirtualFileSystem.cpp | 6 +- .../entropy_work_contract_handle_c.cpp | 80 +++++++------------ 27 files changed, 198 insertions(+), 235 deletions(-) diff --git a/src/Concurrency/AdaptiveRankingScheduler.cpp b/src/Concurrency/AdaptiveRankingScheduler.cpp index 666b57a..c70612b 100644 --- a/src/Concurrency/AdaptiveRankingScheduler.cpp +++ b/src/Concurrency/AdaptiveRankingScheduler.cpp @@ -26,8 +26,8 @@ thread_local AdaptiveRankingScheduler::ThreadState AdaptiveRankingScheduler::stT AdaptiveRankingScheduler::AdaptiveRankingScheduler(const Config& config) : _config(config) {} -IWorkScheduler::ScheduleResult AdaptiveRankingScheduler::selectNextGroup(const std::vector& groups, - const SchedulingContext& context) { +IWorkScheduler::ScheduleResult AdaptiveRankingScheduler::selectNextGroup( + const std::vector& groups) { // Phase 1: Try to execute from the current sticky group for cache locality if (stThreadState.consecutiveExecutionCount < _config.maxConsecutiveExecutionCount) { WorkContractGroup* stickyGroup = getCurrentGroupIfValid(); diff --git a/src/Concurrency/AdaptiveRankingScheduler.h b/src/Concurrency/AdaptiveRankingScheduler.h index 1a3310f..9bfea7b 100644 --- a/src/Concurrency/AdaptiveRankingScheduler.h +++ b/src/Concurrency/AdaptiveRankingScheduler.h @@ -149,8 +149,7 @@ class AdaptiveRankingScheduler : public IWorkScheduler * @param context Current thread context * @return Selected group or nullptr if no work available */ - ScheduleResult selectNextGroup(const std::vector& groups, - const SchedulingContext& context) override; + ScheduleResult selectNextGroup(const std::vector& groups) override; /** * @brief Updates execution counters for affinity tracking diff --git a/src/Concurrency/DirectScheduler.h b/src/Concurrency/DirectScheduler.h index 52a8e34..3d1a871 100644 --- a/src/Concurrency/DirectScheduler.h +++ b/src/Concurrency/DirectScheduler.h @@ -88,11 +88,9 @@ class DirectScheduler : public IWorkScheduler * bad for performance, good for measuring overhead. * * @param groups Groups to scan (in order) - * @param context Completely ignored * @return First group with work, or nullptr */ - ScheduleResult selectNextGroup(const std::vector& groups, - const SchedulingContext& context) override { + ScheduleResult selectNextGroup(const std::vector& groups) override { // Just scan and return first group with work for (auto* group : groups) { if (group && group->scheduledCount() > 0) { diff --git a/src/Concurrency/IWorkScheduler.h b/src/Concurrency/IWorkScheduler.h index f8cb4bd..55de55d 100644 --- a/src/Concurrency/IWorkScheduler.h +++ b/src/Concurrency/IWorkScheduler.h @@ -103,23 +103,6 @@ class IWorkScheduler size_t threadCount = 0; ///< Number of worker threads (0 = hardware_concurrency) }; - /** - * @brief Context passed to scheduler for each scheduling decision. - * - * Provides thread-local information to enable informed scheduling decisions. - * This context supports strategies such as maintaining thread-group affinity - * for cache locality, distributing work evenly across threads, or detecting - * and addressing thread starvation conditions. - * - * All fields are maintained by the WorkService and should be treated as read-only. - */ - struct SchedulingContext - { - size_t threadId; ///< Unique ID for this worker thread (0 to threadCount-1) - size_t consecutiveFailures; ///< How many times in a row we've found no work - WorkContractGroup* lastExecutedGroup; ///< Last group this thread executed from (nullptr on first call) - }; - /** * @brief Result of a scheduling decision. * @@ -142,12 +125,11 @@ class IWorkScheduler * avoid allocations and complexity. * * @param groups Current snapshot of registered work groups (groups might have no work) - * @param context Thread-specific info to help with scheduling decisions * @return ScheduleResult with chosen group (or nullptr if no work found) * * @code * // Simplest possible implementation - just find first group with work - * ScheduleResult selectNextGroup(groups, context) override { + * ScheduleResult selectNextGroup(groups) override { * for (auto* group : groups) { * if (group->scheduledCount() > 0) { * return {group, false}; @@ -157,8 +139,7 @@ class IWorkScheduler * } * @endcode */ - virtual ScheduleResult selectNextGroup(const std::vector& groups, - const SchedulingContext& context) = 0; + virtual ScheduleResult selectNextGroup(const std::vector& groups) = 0; /** * @brief Notifies scheduler that work was successfully executed @@ -169,7 +150,7 @@ class IWorkScheduler * @param group The group that work was executed from * @param threadId The thread that executed the work */ - virtual void notifyWorkExecuted(WorkContractGroup* group, size_t threadId) {} + virtual void notifyWorkExecuted([[maybe_unused]] WorkContractGroup* group, [[maybe_unused]] size_t threadId) {} /** * @brief Notifies scheduler that the group list has changed @@ -179,7 +160,7 @@ class IWorkScheduler * * @param newGroups Updated list of work groups (complete replacement) */ - virtual void notifyGroupsChanged(const std::vector& newGroups) {} + virtual void notifyGroupsChanged([[maybe_unused]] const std::vector& newGroups) {} /** * @brief Resets scheduler to initial state diff --git a/src/Concurrency/NodeScheduler.cpp b/src/Concurrency/NodeScheduler.cpp index dd157c7..e429192 100644 --- a/src/Concurrency/NodeScheduler.cpp +++ b/src/Concurrency/NodeScheduler.cpp @@ -28,7 +28,7 @@ namespace Core namespace Concurrency { -bool NodeScheduler::scheduleNode(NodeHandle node) { +bool NodeScheduler::scheduleNode(const NodeHandle& node) { if (_config.enableDebugLogging) { ENTROPY_LOG_DEBUG_CAT("NodeScheduler", "scheduleNode() called"); } @@ -89,7 +89,7 @@ bool NodeScheduler::scheduleNode(NodeHandle node) { return true; } -bool NodeScheduler::deferNode(NodeHandle node) { +bool NodeScheduler::deferNode(const NodeHandle& node) { std::lock_guard lock(_deferredMutex); // Exclusive lock for modifying queue if (_config.enableDebugLogging) { @@ -217,7 +217,7 @@ size_t NodeScheduler::scheduleReadyNodes(const std::vector& nodes) { return scheduled; } -std::function NodeScheduler::createWorkWrapper(NodeHandle node) { +std::function NodeScheduler::createWorkWrapper(const NodeHandle& node) { return [this, node]() { // Check if scheduler has been destroyed if (_destroyed.load(std::memory_order_acquire)) { @@ -327,13 +327,13 @@ void NodeScheduler::updateStats(bool scheduled, bool deferred, bool dropped) { } } -void NodeScheduler::publishScheduledEvent(NodeHandle node) { +void NodeScheduler::publishScheduledEvent(const NodeHandle& node) { if (_eventBus) { _eventBus->publish(NodeScheduledEvent(_graph, node)); } } -void NodeScheduler::publishDeferredEvent(NodeHandle node) { +void NodeScheduler::publishDeferredEvent(const NodeHandle& node) { if (_eventBus) { // Note: This is called from deferNode() which already holds an exclusive lock // We can safely read the queue size here without additional locking @@ -342,7 +342,7 @@ void NodeScheduler::publishDeferredEvent(NodeHandle node) { } } -bool NodeScheduler::deferNodeUntil(NodeHandle node, std::chrono::steady_clock::time_point wakeTime) { +bool NodeScheduler::deferNodeUntil(const NodeHandle& node, std::chrono::steady_clock::time_point wakeTime) { std::lock_guard lock(_timedDeferredMutex); if (_config.enableDebugLogging) { diff --git a/src/Concurrency/NodeScheduler.h b/src/Concurrency/NodeScheduler.h index bf03219..48a0bcf 100644 --- a/src/Concurrency/NodeScheduler.h +++ b/src/Concurrency/NodeScheduler.h @@ -210,7 +210,7 @@ class NodeScheduler * } * @endcode */ - bool scheduleNode(NodeHandle node); + bool scheduleNode(const NodeHandle& node); /** * @brief Explicitly defers a node without trying to schedule first @@ -234,7 +234,7 @@ class NodeScheduler * } * @endcode */ - bool deferNode(NodeHandle node); + bool deferNode(const NodeHandle& node); /** * @brief Drains the deferred queue into available execution slots @@ -302,7 +302,7 @@ class NodeScheduler * // Node sits in queue consuming zero CPU until wakeTime arrives * @endcode */ - bool deferNodeUntil(NodeHandle node, std::chrono::steady_clock::time_point wakeTime); + bool deferNodeUntil(const NodeHandle& node, std::chrono::steady_clock::time_point wakeTime); /** * @brief Quick check if we can accept more work right now @@ -510,7 +510,7 @@ class NodeScheduler * @param node The node whose work we're wrapping * @return Lambda function suitable for WorkContractGroup execution */ - std::function createWorkWrapper(NodeHandle node); + std::function createWorkWrapper(const NodeHandle& node); /** * @brief Thread-safe statistics update helper @@ -530,14 +530,14 @@ class NodeScheduler * * @param node The node that was scheduled */ - void publishScheduledEvent(NodeHandle node); + void publishScheduledEvent(const NodeHandle& node); /** * @brief Publishes a "node deferred" event to the event bus * * @param node The node that was deferred */ - void publishDeferredEvent(NodeHandle node); + void publishDeferredEvent(const NodeHandle& node); }; } // namespace Concurrency diff --git a/src/Concurrency/NodeStateManager.cpp b/src/Concurrency/NodeStateManager.cpp index 908022a..fcb222c 100644 --- a/src/Concurrency/NodeStateManager.cpp +++ b/src/Concurrency/NodeStateManager.cpp @@ -22,7 +22,7 @@ namespace Core namespace Concurrency { -bool NodeStateManager::transitionState(NodeHandle node, NodeState from, NodeState to) { +bool NodeStateManager::transitionState(const NodeHandle& node, NodeState from, NodeState to) { // Validate transition if (!canTransition(from, to)) { // Log warning for invalid state transition attempts @@ -60,7 +60,7 @@ bool NodeStateManager::transitionState(NodeHandle node, NodeState from, NodeStat return true; } -void NodeStateManager::forceState(NodeHandle node, NodeState to) { +void NodeStateManager::forceState(const NodeHandle& node, NodeState to) { auto* dag = node.handleOwnerAs>(); auto* nodeData = dag ? dag->getNodeData(node) : nullptr; if (!nodeData) { @@ -78,7 +78,7 @@ void NodeStateManager::forceState(NodeHandle node, NodeState to) { publishStateChange(node, from, to); } -NodeState NodeStateManager::getState(NodeHandle node) const { +NodeState NodeStateManager::getState(const NodeHandle& node) const { auto* dag = node.handleOwnerAs>(); auto* nodeData = dag ? dag->getNodeData(node) : nullptr; if (!nodeData) { @@ -88,7 +88,7 @@ NodeState NodeStateManager::getState(NodeHandle node) const { return nodeData->state.load(std::memory_order_acquire); } -void NodeStateManager::registerNode(NodeHandle node, NodeState initialState) { +void NodeStateManager::registerNode(const NodeHandle& node, NodeState initialState) { auto* dag = node.handleOwnerAs>(); auto* nodeData = dag ? dag->getNodeData(node) : nullptr; if (!nodeData) { @@ -125,6 +125,9 @@ void NodeStateManager::registerNode(NodeHandle node, NodeState initialState) { case NodeState::Cancelled: _stats.cancelledNodes.fetch_add(1, std::memory_order_relaxed); break; + case NodeState::Yielded: + // Yielded is a transitional state, no separate counter + break; } } @@ -185,6 +188,9 @@ void NodeStateManager::updateStats(NodeState oldState, NodeState newState) { if (_stats.cancelledNodes.load(std::memory_order_relaxed) > 0) _stats.cancelledNodes.fetch_sub(1, std::memory_order_relaxed); break; + case NodeState::Yielded: + // Yielded is a transitional state, no separate counter + break; } // Atomically increment new state counter @@ -210,10 +216,13 @@ void NodeStateManager::updateStats(NodeState oldState, NodeState newState) { case NodeState::Cancelled: _stats.cancelledNodes.fetch_add(1, std::memory_order_relaxed); break; + case NodeState::Yielded: + // Yielded is a transitional state, no separate counter + break; } } -void NodeStateManager::publishStateChange(NodeHandle node, NodeState from, NodeState to) { +void NodeStateManager::publishStateChange(const NodeHandle& node, NodeState from, NodeState to) { if (!_eventBus) { return; // No event bus configured } diff --git a/src/Concurrency/NodeStateManager.h b/src/Concurrency/NodeStateManager.h index ff3ae61..64bc8a7 100644 --- a/src/Concurrency/NodeStateManager.h +++ b/src/Concurrency/NodeStateManager.h @@ -58,7 +58,7 @@ class NodeStateManager * @param to Target state * @return true if transition succeeded, false if current state didn't match 'from' */ - bool transitionState(NodeHandle node, NodeState from, NodeState to); + bool transitionState(const NodeHandle& node, NodeState from, NodeState to); /** * @brief Force a state transition without validation @@ -68,7 +68,7 @@ class NodeStateManager * @param node The node to transition * @param to Target state */ - void forceState(NodeHandle node, NodeState to); + void forceState(const NodeHandle& node, NodeState to); /** * @brief Get current state of a node @@ -76,7 +76,7 @@ class NodeStateManager * @param node The node to query * @return Current state, or Pending if node is invalid */ - NodeState getState(NodeHandle node) const; + NodeState getState(const NodeHandle& node) const; /** * @brief Check if a state transition is valid @@ -103,7 +103,7 @@ class NodeStateManager * @param node The node to check * @return true if node is in Completed, Failed, or Cancelled state */ - bool isTerminal(NodeHandle node) const { + bool isTerminal(const NodeHandle& node) const { return isTerminalState(getState(node)); } @@ -146,7 +146,7 @@ class NodeStateManager * @param node The node to register * @param initialState Initial state (default: Pending) */ - void registerNode(NodeHandle node, NodeState initialState = NodeState::Pending); + void registerNode(const NodeHandle& node, NodeState initialState = NodeState::Pending); /** * @brief Batch update for multiple nodes @@ -198,7 +198,7 @@ class NodeStateManager /** * @brief Publish state change event if event bus is configured */ - void publishStateChange(NodeHandle node, NodeState from, NodeState to); + void publishStateChange(const NodeHandle& node, NodeState from, NodeState to); }; } // namespace Concurrency diff --git a/src/Concurrency/RandomScheduler.cpp b/src/Concurrency/RandomScheduler.cpp index 2ca6748..0f4c3d7 100644 --- a/src/Concurrency/RandomScheduler.cpp +++ b/src/Concurrency/RandomScheduler.cpp @@ -24,7 +24,7 @@ namespace Concurrency thread_local std::mt19937 RandomScheduler::stRng; thread_local bool RandomScheduler::stRngInitialized = false; -RandomScheduler::RandomScheduler(const Config& config) { +RandomScheduler::RandomScheduler([[maybe_unused]] const Config& config) { // Config unused for random scheduler } @@ -38,8 +38,7 @@ void RandomScheduler::ensureRngInitialized() { } } -IWorkScheduler::ScheduleResult RandomScheduler::selectNextGroup(const std::vector& groups, - const SchedulingContext& context) { +IWorkScheduler::ScheduleResult RandomScheduler::selectNextGroup(const std::vector& groups) { ensureRngInitialized(); // First, count groups with work diff --git a/src/Concurrency/RandomScheduler.h b/src/Concurrency/RandomScheduler.h index 5d49146..dea4f79 100644 --- a/src/Concurrency/RandomScheduler.h +++ b/src/Concurrency/RandomScheduler.h @@ -121,13 +121,12 @@ class RandomScheduler : public IWorkScheduler * // This gives each group exactly 1/N probability! * @endcode */ - ScheduleResult selectNextGroup(const std::vector& groups, - const SchedulingContext& context) override; + ScheduleResult selectNextGroup(const std::vector& groups) override; /** * @brief No-op - random selection doesn't learn from history */ - void notifyWorkExecuted(WorkContractGroup* group, size_t threadId) override {} + void notifyWorkExecuted([[maybe_unused]] WorkContractGroup* group, [[maybe_unused]] size_t threadId) override {} /** * @brief No-op - random scheduler has no state to reset diff --git a/src/Concurrency/RoundRobinScheduler.cpp b/src/Concurrency/RoundRobinScheduler.cpp index 3ea4a65..894d170 100644 --- a/src/Concurrency/RoundRobinScheduler.cpp +++ b/src/Concurrency/RoundRobinScheduler.cpp @@ -21,12 +21,11 @@ namespace Concurrency // Thread-local state definition thread_local size_t RoundRobinScheduler::stCurrentIndex = 0; -RoundRobinScheduler::RoundRobinScheduler(const Config& config) { +RoundRobinScheduler::RoundRobinScheduler([[maybe_unused]] const Config& config) { // Config mostly unused for round-robin } -IWorkScheduler::ScheduleResult RoundRobinScheduler::selectNextGroup(const std::vector& groups, - const SchedulingContext& context) { +IWorkScheduler::ScheduleResult RoundRobinScheduler::selectNextGroup(const std::vector& groups) { if (groups.empty()) { return {nullptr, true}; } diff --git a/src/Concurrency/RoundRobinScheduler.h b/src/Concurrency/RoundRobinScheduler.h index def6726..4c5b7ad 100644 --- a/src/Concurrency/RoundRobinScheduler.h +++ b/src/Concurrency/RoundRobinScheduler.h @@ -115,13 +115,12 @@ class RoundRobinScheduler : public IWorkScheduler * // 5. Return the winner (or nullptr) * @endcode */ - ScheduleResult selectNextGroup(const std::vector& groups, - const SchedulingContext& context) override; + ScheduleResult selectNextGroup(const std::vector& groups) override; /** * @brief No-op - round-robin doesn't track execution history */ - void notifyWorkExecuted(WorkContractGroup* group, size_t threadId) override {} + void notifyWorkExecuted([[maybe_unused]] WorkContractGroup* group, [[maybe_unused]] size_t threadId) override {} /** * @brief Resets thread-local rotation index to 0 diff --git a/src/Concurrency/SpinningDirectScheduler.h b/src/Concurrency/SpinningDirectScheduler.h index cb472d8..092aee2 100644 --- a/src/Concurrency/SpinningDirectScheduler.h +++ b/src/Concurrency/SpinningDirectScheduler.h @@ -118,8 +118,7 @@ class SpinningDirectScheduler : public IWorkScheduler * } * @endcode */ - ScheduleResult selectNextGroup(const std::vector& groups, - const SchedulingContext& context) override { + ScheduleResult selectNextGroup(const std::vector& groups) override { for (auto* group : groups) { if (group && group->scheduledCount() > 0) { return {group, false}; // Never sleep diff --git a/src/Concurrency/WorkGraph.cpp b/src/Concurrency/WorkGraph.cpp index bb4098c..81913dc 100644 --- a/src/Concurrency/WorkGraph.cpp +++ b/src/Concurrency/WorkGraph.cpp @@ -88,7 +88,7 @@ WorkGraph::WorkGraph(WorkContractGroup* workContractGroup, const WorkGraphConfig } } }; - callbacks.onNodeCompleted = [this](NodeHandle node) { + callbacks.onNodeCompleted = [this](const NodeHandle& node) { CallbackGuard guard(this); if (!_destroyed.load(std::memory_order_acquire)) { if (_config.enableDebugLogging) { @@ -598,7 +598,7 @@ void WorkGraph::execute() { } } -bool WorkGraph::scheduleNode(NodeHandle node) { +bool WorkGraph::scheduleNode(const NodeHandle& node) { // Check if suspended if (_suspended.load(std::memory_order_acquire)) { if (_config.enableDebugLogging) { @@ -612,7 +612,7 @@ bool WorkGraph::scheduleNode(NodeHandle node) { ENTROPY_LOG_DEBUG_CAT("Concurrency", "WorkGraph::scheduleNode() called"); } // Always delegate to the scheduler component - bool result = _scheduler->scheduleNode(std::move(node)); + bool result = _scheduler->scheduleNode(node); if (_config.enableDebugLogging) { ENTROPY_LOG_DEBUG_CAT("Concurrency", "WorkGraph::scheduleNode() completed"); } diff --git a/src/Concurrency/WorkGraph.h b/src/Concurrency/WorkGraph.h index bebaebe..cb3e7e3 100644 --- a/src/Concurrency/WorkGraph.h +++ b/src/Concurrency/WorkGraph.h @@ -944,7 +944,7 @@ class WorkGraph : public Debug::Named * @param node The ready node to submit for execution * @return true if scheduled, false if already scheduled/completed */ - bool scheduleNode(NodeHandle node); + bool scheduleNode(const NodeHandle& node); /** * @brief Bumps up how many parents a node is waiting for diff --git a/src/Concurrency/WorkService.cpp b/src/Concurrency/WorkService.cpp index 8b8cb36..8bd436a 100644 --- a/src/Concurrency/WorkService.cpp +++ b/src/Concurrency/WorkService.cpp @@ -222,11 +222,8 @@ void WorkService::executeWork(const std::stop_token& token) { std::shared_lock lock(_workContractGroupsMutex); if (!_workContractGroups.empty()) { - // Create scheduling context - IWorkScheduler::SchedulingContext context{stThreadId, stSoftFailureCount, lastExecutedGroup}; - // Ask scheduler for next group - reads directly from _workContractGroups - auto scheduleResult = _scheduler->selectNextGroup(_workContractGroups, context); + auto scheduleResult = _scheduler->selectNextGroup(_workContractGroups); // Select group if valid and not stopping if (scheduleResult.group && !scheduleResult.group->isStopping()) { @@ -305,7 +302,7 @@ void WorkService::checkTimedDeferrals() { } } -void WorkService::notifyWorkAvailable(WorkContractGroup* group) { +void WorkService::notifyWorkAvailable([[maybe_unused]] WorkContractGroup* group) { // We don't need to track which group has work, just that work is available _workAvailable = true; _workAvailableCV.notify_one(); diff --git a/src/Core/EntropyApplication.cpp b/src/Core/EntropyApplication.cpp index 8c5a83d..dbe1200 100644 --- a/src/Core/EntropyApplication.cpp +++ b/src/Core/EntropyApplication.cpp @@ -112,7 +112,7 @@ int EntropyApplication::run() { // Spawn dedicated signal handler thread // This thread waits for OS signals and handles termination requests // while the main thread runs the application loop at full speed - std::jthread signalThread([this](std::stop_token stopToken) { + std::jthread signalThread([this](const std::stop_token& stopToken) { #if defined(_WIN32) // Windows: wait on console control events HANDLE ctrlH = static_cast(_ctrlEvent); @@ -305,7 +305,7 @@ void EntropyApplication::handleConsoleSignal(unsigned long ctrlType) { namespace { // Signal handler - must be async-signal-safe -static void EntropySigHandler(int signum) { +static void entropySigHandler(int signum) { EntropyEngine::Core::EntropyApplication::shared().notifyPosixSignalFromHandler(signum); } } // namespace @@ -315,7 +315,7 @@ void EntropyApplication::installSignalHandlers() { // Set up sigaction for graceful termination signals struct sigaction sa; - sa.sa_handler = EntropySigHandler; + sa.sa_handler = entropySigHandler; sigemptyset(&sa.sa_mask); sa.sa_flags = 0; @@ -326,16 +326,16 @@ void EntropyApplication::installSignalHandlers() { sigaction(SIGQUIT, &sa, nullptr); // quit signal // For fatal signals like SIGSEGV, SIGABRT - also install but allow default behavior after logging - struct sigaction fatal_sa; - fatal_sa.sa_handler = EntropySigHandler; - sigemptyset(&fatal_sa.sa_mask); - fatal_sa.sa_flags = SA_RESETHAND; // Reset to default after first signal - - sigaction(SIGABRT, &fatal_sa, nullptr); // abort - sigaction(SIGSEGV, &fatal_sa, nullptr); // segmentation fault - sigaction(SIGBUS, &fatal_sa, nullptr); // bus error - sigaction(SIGFPE, &fatal_sa, nullptr); // floating point exception - sigaction(SIGILL, &fatal_sa, nullptr); // illegal instruction + struct sigaction fatalSa; + fatalSa.sa_handler = entropySigHandler; + sigemptyset(&fatalSa.sa_mask); + fatalSa.sa_flags = SA_RESETHAND; // Reset to default after first signal + + sigaction(SIGABRT, &fatalSa, nullptr); // abort + sigaction(SIGSEGV, &fatalSa, nullptr); // segmentation fault + sigaction(SIGBUS, &fatalSa, nullptr); // bus error + sigaction(SIGFPE, &fatalSa, nullptr); // floating point exception + sigaction(SIGILL, &fatalSa, nullptr); // illegal instruction } void EntropyApplication::uninstallSignalHandlers() { diff --git a/src/Core/EntropyCAPI.cpp b/src/Core/EntropyCAPI.cpp index 6d3303e..bcbb782 100644 --- a/src/Core/EntropyCAPI.cpp +++ b/src/Core/EntropyCAPI.cpp @@ -68,38 +68,41 @@ ENTROPY_API void entropy_buffer_dispose(EntropyOwnedBuffer b) { if (b.ptr) entropy_free((void*)b.ptr); } -static inline EntropyObject* to_cpp(EntropyObjectRef* o) { - return reinterpret_cast(o); -} -static inline const EntropyObject* to_cpp_c(const EntropyObjectRef* o) { +static inline const EntropyObject* toCppConst(const EntropyObjectRef* o) { return reinterpret_cast(o); } ENTROPY_API void entropy_object_retain(const EntropyObjectRef* obj) { - if (obj) to_cpp_c(obj)->retain(); + if (obj) toCppConst(obj)->retain(); } ENTROPY_API void entropy_object_release(const EntropyObjectRef* obj) { - if (obj) to_cpp_c(obj)->release(); + if (obj) toCppConst(obj)->release(); } ENTROPY_API uint32_t entropy_object_ref_count(const EntropyObjectRef* obj) { - return obj ? to_cpp_c(obj)->refCount() : 0; + return obj ? toCppConst(obj)->refCount() : 0; } ENTROPY_API EntropyTypeId entropy_object_type_id(const EntropyObjectRef* obj) { - return obj ? to_cpp_c(obj)->classHash() : 0u; + return obj ? toCppConst(obj)->classHash() : 0u; } ENTROPY_API const char* entropy_object_class_name(const EntropyObjectRef* obj) { - return obj ? to_cpp_c(obj)->className() : ""; + return obj ? toCppConst(obj)->className() : ""; } -static EntropyStatus copy_string_out(const std::string& s, EntropyOwnedString* out) { +static EntropyStatus copyStringOut(const std::string& s, EntropyOwnedString* out) { if (!out) return ENTROPY_ERR_INVALID_ARG; - char* mem = static_cast(entropy_alloc(s.size())); - if (!mem && s.size() != 0) return ENTROPY_ERR_NO_MEMORY; - if (s.size()) std::memcpy(mem, s.data(), s.size()); + if (s.empty()) { + out->ptr = nullptr; + out->len = 0; + return ENTROPY_OK; + } + char* mem = static_cast(entropy_alloc(s.size() + 1)); + if (!mem) return ENTROPY_ERR_NO_MEMORY; + std::memcpy(mem, s.data(), s.size()); + mem[s.size()] = '\0'; // Null-terminate for safety out->ptr = mem; out->len = static_cast(s.size()); return ENTROPY_OK; @@ -108,7 +111,7 @@ static EntropyStatus copy_string_out(const std::string& s, EntropyOwnedString* o ENTROPY_API EntropyStatus entropy_object_class_name_owned(const EntropyObjectRef* obj, EntropyOwnedString* out) { if (!obj) return ENTROPY_ERR_INVALID_ARG; try { - return copy_string_out(std::string(to_cpp_c(obj)->className()), out); + return copyStringOut(std::string(toCppConst(obj)->className()), out); } catch (...) { return ENTROPY_ERR_UNKNOWN; } @@ -117,7 +120,7 @@ ENTROPY_API EntropyStatus entropy_object_class_name_owned(const EntropyObjectRef ENTROPY_API EntropyStatus entropy_object_to_string(const EntropyObjectRef* obj, EntropyOwnedString* out) { if (!obj) return ENTROPY_ERR_INVALID_ARG; try { - return copy_string_out(to_cpp_c(obj)->toString(), out); + return copyStringOut(toCppConst(obj)->toString(), out); } catch (...) { return ENTROPY_ERR_UNKNOWN; } @@ -126,7 +129,7 @@ ENTROPY_API EntropyStatus entropy_object_to_string(const EntropyObjectRef* obj, ENTROPY_API EntropyStatus entropy_object_debug_string(const EntropyObjectRef* obj, EntropyOwnedString* out) { if (!obj) return ENTROPY_ERR_INVALID_ARG; try { - return copy_string_out(to_cpp_c(obj)->debugString(), out); + return copyStringOut(toCppConst(obj)->debugString(), out); } catch (...) { return ENTROPY_ERR_UNKNOWN; } @@ -135,7 +138,7 @@ ENTROPY_API EntropyStatus entropy_object_debug_string(const EntropyObjectRef* ob ENTROPY_API EntropyStatus entropy_object_description(const EntropyObjectRef* obj, EntropyOwnedString* out) { if (!obj) return ENTROPY_ERR_INVALID_ARG; try { - return copy_string_out(to_cpp_c(obj)->description(), out); + return copyStringOut(toCppConst(obj)->description(), out); } catch (...) { return ENTROPY_ERR_UNKNOWN; } @@ -156,15 +159,15 @@ ENTROPY_API EntropyBool entropy_handle_type_matches(EntropyHandle h, EntropyType return ((expected != 0) && (h.type_id != 0) && (h.type_id == expected)) ? ENTROPY_TRUE : ENTROPY_FALSE; } -ENTROPY_API EntropyStatus entropy_object_to_handle(const EntropyObjectRef* obj, EntropyHandle* out_handle) { - if (!obj || !out_handle) return ENTROPY_ERR_INVALID_ARG; - if (!to_cpp_c(obj)->hasHandle()) return ENTROPY_ERR_UNAVAILABLE; +ENTROPY_API EntropyStatus entropy_object_to_handle(const EntropyObjectRef* obj, EntropyHandle* outHandle) { + if (!obj || !outHandle) return ENTROPY_ERR_INVALID_ARG; + if (!toCppConst(obj)->hasHandle()) return ENTROPY_ERR_UNAVAILABLE; EntropyHandle h{}; - h.owner = to_cpp_c(obj)->handleOwner(); - h.index = to_cpp_c(obj)->handleIndex(); - h.generation = to_cpp_c(obj)->handleGeneration(); - h.type_id = to_cpp_c(obj)->classHash(); - *out_handle = h; + h.owner = toCppConst(obj)->handleOwner(); + h.index = toCppConst(obj)->handleIndex(); + h.generation = toCppConst(obj)->handleGeneration(); + h.type_id = toCppConst(obj)->classHash(); + *outHandle = h; return ENTROPY_OK; } @@ -189,15 +192,15 @@ ENTROPY_API EntropyStatus entropy_handle_release(EntropyHandle h) { return ENTROPY_OK; } -ENTROPY_API EntropyStatus entropy_handle_info(EntropyHandle h, EntropyTypeId* out_type_id, - EntropyOwnedString* out_class_name) { +ENTROPY_API EntropyStatus entropy_handle_info(EntropyHandle h, EntropyTypeId* outTypeId, + EntropyOwnedString* outClassName) { if (!h.owner) return ENTROPY_ERR_INVALID_ARG; EntropyObjectRef* obj = entropy_resolve_handle(h); if (!obj) return ENTROPY_ERR_NOT_FOUND; EntropyStatus st = ENTROPY_OK; - if (out_type_id) *out_type_id = entropy_object_type_id(obj); - if (out_class_name) { - st = entropy_object_class_name_owned(obj, out_class_name); + if (outTypeId) *outTypeId = entropy_object_type_id(obj); + if (outClassName) { + st = entropy_object_class_name_owned(obj, outClassName); } entropy_object_release(obj); return st; @@ -209,20 +212,20 @@ struct OwnerVTable EntropyResolveFn resolve; EntropyValidateFn validate; }; -static std::unordered_map g_ownerVTables; -static std::mutex g_ownerVTablesMutex; +static std::unordered_map sOwnerVTables; +static std::mutex sOwnerVTablesMutex; ENTROPY_API void entropy_register_owner_vtable(const void* owner, EntropyResolveFn resolve, EntropyValidateFn validate) { - std::lock_guard lock(g_ownerVTablesMutex); - g_ownerVTables[owner] = OwnerVTable{resolve, validate}; + std::lock_guard lock(sOwnerVTablesMutex); + sOwnerVTables[owner] = OwnerVTable{resolve, validate}; } ENTROPY_API EntropyObjectRef* entropy_resolve_handle(EntropyHandle h) { if (!h.owner) return nullptr; - std::lock_guard lock(g_ownerVTablesMutex); - auto it = g_ownerVTables.find(h.owner); - if (it == g_ownerVTables.end()) return nullptr; + std::lock_guard lock(sOwnerVTablesMutex); + auto it = sOwnerVTables.find(h.owner); + if (it == sOwnerVTables.end()) return nullptr; auto fn = it->second.resolve; if (!fn) return nullptr; // Contract: resolve returns a RETAINED pointer if valid; otherwise NULL diff --git a/src/Core/EntropyMain.cpp b/src/Core/EntropyMain.cpp index 0c996c8..fe30bc5 100644 --- a/src/Core/EntropyMain.cpp +++ b/src/Core/EntropyMain.cpp @@ -31,7 +31,7 @@ struct CppDelegate : EntropyEngine::Core::EntropyAppDelegate void applicationWillTerminate() override { if (del.will_terminate) del.will_terminate((EntropyApp*)app, del.userdata); } - void applicationDidCatchUnhandledException(std::exception_ptr) override { + void applicationDidCatchUnhandledException([[maybe_unused]] std::exception_ptr ex) override { if (del.did_catch_unhandled_exception) del.did_catch_unhandled_exception((EntropyApp*)app, del.userdata); } }; diff --git a/src/Core/EntropyServiceRegistry.cpp b/src/Core/EntropyServiceRegistry.cpp index a6eead0..7802a87 100644 --- a/src/Core/EntropyServiceRegistry.cpp +++ b/src/Core/EntropyServiceRegistry.cpp @@ -37,19 +37,19 @@ bool EntropyServiceRegistry::registerService(std::shared_ptr ser if (it->second.service) { HandleSlotOps::release(*it->second.service, it->second.generation); } - it->second.service = service; + it->second.service = std::move(service); // Stamp with new generation - HandleSlotOps::stamp(*service, this, it->second.slotIndex, it->second.generation); + HandleSlotOps::stamp(*it->second.service, this, it->second.slotIndex, it->second.generation); return false; // Not a new insertion } // New service - create slot ServiceSlot slot; - slot.service = service; + slot.service = std::move(service); slot.slotIndex = _nextSlotIndex++; // Stamp service with handle identity - HandleSlotOps::stamp(*service, this, slot.slotIndex, slot.generation); + HandleSlotOps::stamp(*slot.service, this, slot.slotIndex, slot.generation); _slots[tid] = std::move(slot); return true; diff --git a/src/Logging/Logger.h b/src/Logging/Logger.h index 445b135..e6efd5c 100644 --- a/src/Logging/Logger.h +++ b/src/Logging/Logger.h @@ -379,34 +379,39 @@ class Logger * } * @endcode */ -#define ENTROPY_LOG_TRACE(fmt, ...) ::EntropyEngine::Core::Logging::Logger::global().trace(__func__, fmt, ##__VA_ARGS__) +#define ENTROPY_LOG_TRACE(fmt, ...) \ + ::EntropyEngine::Core::Logging::Logger::global().trace(__func__, fmt __VA_OPT__(, ) __VA_ARGS__) -#define ENTROPY_LOG_DEBUG(fmt, ...) ::EntropyEngine::Core::Logging::Logger::global().debug(__func__, fmt, ##__VA_ARGS__) +#define ENTROPY_LOG_DEBUG(fmt, ...) \ + ::EntropyEngine::Core::Logging::Logger::global().debug(__func__, fmt __VA_OPT__(, ) __VA_ARGS__) -#define ENTROPY_LOG_INFO(fmt, ...) ::EntropyEngine::Core::Logging::Logger::global().info(__func__, fmt, ##__VA_ARGS__) +#define ENTROPY_LOG_INFO(fmt, ...) \ + ::EntropyEngine::Core::Logging::Logger::global().info(__func__, fmt __VA_OPT__(, ) __VA_ARGS__) #define ENTROPY_LOG_WARNING(fmt, ...) \ - ::EntropyEngine::Core::Logging::Logger::global().warning(__func__, fmt, ##__VA_ARGS__) + ::EntropyEngine::Core::Logging::Logger::global().warning(__func__, fmt __VA_OPT__(, ) __VA_ARGS__) -#define ENTROPY_LOG_ERROR(fmt, ...) ::EntropyEngine::Core::Logging::Logger::global().error(__func__, fmt, ##__VA_ARGS__) +#define ENTROPY_LOG_ERROR(fmt, ...) \ + ::EntropyEngine::Core::Logging::Logger::global().error(__func__, fmt __VA_OPT__(, ) __VA_ARGS__) -#define ENTROPY_LOG_FATAL(fmt, ...) ::EntropyEngine::Core::Logging::Logger::global().fatal(__func__, fmt, ##__VA_ARGS__) +#define ENTROPY_LOG_FATAL(fmt, ...) \ + ::EntropyEngine::Core::Logging::Logger::global().fatal(__func__, fmt __VA_OPT__(, ) __VA_ARGS__) // Category-specific macros for explicit category specification #define ENTROPY_LOG_TRACE_CAT(category, fmt, ...) \ - ::EntropyEngine::Core::Logging::Logger::global().trace(category, fmt, ##__VA_ARGS__) + ::EntropyEngine::Core::Logging::Logger::global().trace(category, fmt __VA_OPT__(, ) __VA_ARGS__) #define ENTROPY_LOG_DEBUG_CAT(category, fmt, ...) \ - ::EntropyEngine::Core::Logging::Logger::global().debug(category, fmt, ##__VA_ARGS__) + ::EntropyEngine::Core::Logging::Logger::global().debug(category, fmt __VA_OPT__(, ) __VA_ARGS__) #define ENTROPY_LOG_INFO_CAT(category, fmt, ...) \ - ::EntropyEngine::Core::Logging::Logger::global().info(category, fmt, ##__VA_ARGS__) + ::EntropyEngine::Core::Logging::Logger::global().info(category, fmt __VA_OPT__(, ) __VA_ARGS__) #define ENTROPY_LOG_WARNING_CAT(category, fmt, ...) \ - ::EntropyEngine::Core::Logging::Logger::global().warning(category, fmt, ##__VA_ARGS__) + ::EntropyEngine::Core::Logging::Logger::global().warning(category, fmt __VA_OPT__(, ) __VA_ARGS__) #define ENTROPY_LOG_ERROR_CAT(category, fmt, ...) \ - ::EntropyEngine::Core::Logging::Logger::global().error(category, fmt, ##__VA_ARGS__) + ::EntropyEngine::Core::Logging::Logger::global().error(category, fmt __VA_OPT__(, ) __VA_ARGS__) #define ENTROPY_LOG_FATAL_CAT(category, fmt, ...) \ - ::EntropyEngine::Core::Logging::Logger::global().fatal(category, fmt, ##__VA_ARGS__) + ::EntropyEngine::Core::Logging::Logger::global().fatal(category, fmt __VA_OPT__(, ) __VA_ARGS__) diff --git a/src/VirtualFileSystem/DirectoryHandle.cpp b/src/VirtualFileSystem/DirectoryHandle.cpp index 04b3c36..d28a454 100644 --- a/src/VirtualFileSystem/DirectoryHandle.cpp +++ b/src/VirtualFileSystem/DirectoryHandle.cpp @@ -16,10 +16,10 @@ DirectoryHandle::DirectoryHandle(VirtualFileSystem* vfs, std::string path) : _vf // Backend attachment and normalized key computation are performed by VirtualFileSystem::createDirectoryHandle. // Initialize metadata - _meta.path = path; + _meta.path = std::move(path); // Extract parent directory and name - std::filesystem::path p(path); + std::filesystem::path p(_meta.path); if (p.has_parent_path()) { _meta.directory = p.parent_path().string(); } diff --git a/src/VirtualFileSystem/FileHandle.cpp b/src/VirtualFileSystem/FileHandle.cpp index d7a7db6..70c62cd 100644 --- a/src/VirtualFileSystem/FileHandle.cpp +++ b/src/VirtualFileSystem/FileHandle.cpp @@ -101,24 +101,24 @@ FileOperationHandle FileHandle::writeAll(std::span bytes) const { WriteOptions opts; opts.truncate = true; auto data = std::vector(bytes.begin(), bytes.end()); - return _vfs->submitSerialized( - _meta.path, - [opts, data = std::move(data)](FileOperationHandle::OpState& s, std::shared_ptr backend, - const std::string& p, const ExecContext&) mutable { - auto byteSpan = std::span(data.data(), data.size()); - auto inner = backend->writeFile(p, byteSpan, opts); - inner.wait(); - auto st = inner.status(); - if (st == FileOpStatus::Complete || st == FileOpStatus::Partial) { - s.wrote = inner.bytesWritten(); - s.complete(st); - } else { - const auto& err = inner.errorInfo(); - s.setError(err.code == FileError::None ? FileError::IOError : err.code, err.message, err.path, - err.systemError); - s.complete(FileOpStatus::Failed); - } - }); + return _vfs->submitSerialized(_meta.path, + [opts, data = std::move(data)](FileOperationHandle::OpState& s, + const std::shared_ptr& backend, + const std::string& p, const ExecContext&) mutable { + auto byteSpan = std::span(data.data(), data.size()); + auto inner = backend->writeFile(p, byteSpan, opts); + inner.wait(); + auto st = inner.status(); + if (st == FileOpStatus::Complete || st == FileOpStatus::Partial) { + s.wrote = inner.bytesWritten(); + s.complete(st); + } else { + const auto& err = inner.errorInfo(); + s.setError(err.code == FileError::None ? FileError::IOError : err.code, + err.message, err.path, err.systemError); + s.complete(FileOpStatus::Failed); + } + }); } return FileOperationHandle::immediate(FileOpStatus::Failed); } @@ -127,9 +127,9 @@ FileOperationHandle FileHandle::writeAll(std::span bytes, const W if (_backend && _vfs) { auto data = std::vector(bytes.begin(), bytes.end()); return _vfs->submitSerialized( - _meta.path, - [opts, data = std::move(data)](FileOperationHandle::OpState& s, std::shared_ptr backend, - const std::string& p, const ExecContext&) mutable { + _meta.path, [opts, data = std::move(data)](FileOperationHandle::OpState& s, + const std::shared_ptr& backend, + const std::string& p, const ExecContext&) mutable { auto byteSpan = std::span(data.data(), data.size()); if (auto* local = dynamic_cast(backend.get())) { local->doWriteFile(s, p, byteSpan, opts); @@ -159,9 +159,9 @@ FileOperationHandle FileHandle::writeRange(uint64_t offset, std::span(bytes.begin(), bytes.end()); return _vfs->submitSerialized( - _meta.path, - [opts, data = std::move(data)](FileOperationHandle::OpState& s, std::shared_ptr backend, - const std::string& p, const ExecContext&) mutable { + _meta.path, [opts, data = std::move(data)](FileOperationHandle::OpState& s, + const std::shared_ptr& backend, + const std::string& p, const ExecContext&) mutable { auto byteSpan = std::span(data.data(), data.size()); if (auto* local = dynamic_cast(backend.get())) { local->doWriteFile(s, p, byteSpan, opts); @@ -193,7 +193,7 @@ FileOperationHandle FileHandle::writeRange(uint64_t offset, std::span(bytes.begin(), bytes.end()); return _vfs->submitSerialized( _meta.path, [wopts, data = std::move(data)](FileOperationHandle::OpState& s, - std::shared_ptr backend, + const std::shared_ptr& backend, const std::string& p, const ExecContext&) mutable { auto byteSpan = std::span(data.data(), data.size()); if (auto* local = dynamic_cast(backend.get())) { @@ -222,7 +222,7 @@ FileOperationHandle FileHandle::writeLine(size_t lineNumber, std::string_view li auto lineCopy = std::string(line); return _vfs->submitSerialized( _meta.path, [lineNumber, lineCopy = std::move(lineCopy)](FileOperationHandle::OpState& s, - std::shared_ptr backend, + const std::shared_ptr& backend, const std::string& p, const ExecContext&) mutable { if (auto* local = dynamic_cast(backend.get())) { local->doWriteLine(s, p, lineNumber, lineCopy); @@ -252,7 +252,7 @@ FileOperationHandle FileHandle::writeAll(std::string_view text) const { auto textCopy = std::string(text); return _vfs->submitSerialized( _meta.path, [opts, textCopy = std::move(textCopy)](FileOperationHandle::OpState& s, - std::shared_ptr backend, + const std::shared_ptr& backend, const std::string& p, const ExecContext&) mutable { auto spanBytes = std::span(reinterpret_cast(textCopy.data()), textCopy.size()); @@ -282,7 +282,7 @@ FileOperationHandle FileHandle::writeAll(std::string_view text, const WriteOptio auto textCopy = std::string(text); return _vfs->submitSerialized( _meta.path, [opts, textCopy = std::move(textCopy)](FileOperationHandle::OpState& s, - std::shared_ptr backend, + const std::shared_ptr& backend, const std::string& p, const ExecContext&) mutable { auto spanBytes = std::span(reinterpret_cast(textCopy.data()), textCopy.size()); @@ -317,7 +317,7 @@ FileOperationHandle FileHandle::writeLine(size_t lineNumber, std::string_view li FileOperationHandle FileHandle::createEmpty() const { if (_backend && _vfs) { return _vfs->submitSerialized( - _meta.path, [](FileOperationHandle::OpState& s, std::shared_ptr backend, + _meta.path, [](FileOperationHandle::OpState& s, const std::shared_ptr& backend, const std::string& p, const ExecContext&) mutable { if (auto* local = dynamic_cast(backend.get())) { local->doCreateFile(s, p); @@ -342,7 +342,7 @@ FileOperationHandle FileHandle::createEmpty() const { FileOperationHandle FileHandle::remove() const { if (_backend && _vfs) { return _vfs->submitSerialized( - _meta.path, [](FileOperationHandle::OpState& s, std::shared_ptr backend, + _meta.path, [](FileOperationHandle::OpState& s, const std::shared_ptr& backend, const std::string& p, const ExecContext&) mutable { if (auto* local = dynamic_cast(backend.get())) { local->doDeleteFile(s, p); diff --git a/src/VirtualFileSystem/FileStream.cpp b/src/VirtualFileSystem/FileStream.cpp index 1e32a9d..17eb5ef 100644 --- a/src/VirtualFileSystem/FileStream.cpp +++ b/src/VirtualFileSystem/FileStream.cpp @@ -144,7 +144,7 @@ void BufferedFileStream::close() { void BufferedFileStream::flushWriteBuffer() { if (_writePos > 0) { - auto result = _inner->write(std::span(_writeBuffer.data(), _writePos)); + (void)_inner->write(std::span(_writeBuffer.data(), _writePos)); _writePos = 0; _dirty = false; } diff --git a/src/VirtualFileSystem/FileWatchManager.cpp b/src/VirtualFileSystem/FileWatchManager.cpp index 051e428..568d000 100644 --- a/src/VirtualFileSystem/FileWatchManager.cpp +++ b/src/VirtualFileSystem/FileWatchManager.cpp @@ -164,16 +164,16 @@ FileWatch* FileWatchManager::createWatch(const std::string& path, FileWatchCallb } // Allocate slot - auto slot_info = allocateSlot(); - index = slot_info.first; - generation = slot_info.second; + auto slotInfo = allocateSlot(); + index = slotInfo.first; + generation = slotInfo.second; if (index == UINT32_MAX) { ENTROPY_LOG_ERROR("Failed to allocate watch slot (out of slots)"); return nullptr; } // Create FileWatch object (refcount starts at 1) - watch = new FileWatch(this, path, callback, options); + watch = new FileWatch(this, path, std::move(callback), options); // Stamp the object with handle identity using EntropyObject's built-in facility HandleAccess::set(*watch, this, index, generation); diff --git a/src/VirtualFileSystem/VirtualFileSystem.cpp b/src/VirtualFileSystem/VirtualFileSystem.cpp index b622984..87e934a 100644 --- a/src/VirtualFileSystem/VirtualFileSystem.cpp +++ b/src/VirtualFileSystem/VirtualFileSystem.cpp @@ -19,7 +19,7 @@ namespace EntropyEngine::Core::IO // Constructor / Destructor VirtualFileSystem::VirtualFileSystem(EntropyEngine::Core::Concurrency::WorkContractGroup* group, Config cfg) - : _group(group), _cfg(cfg), _watchManager(std::make_unique(this)) {} + : _group(group), _cfg(std::move(cfg)), _watchManager(std::make_unique(this)) {} VirtualFileSystem::~VirtualFileSystem() { // Ensure FileWatchManager is destroyed before WorkContractGroup is potentially destroyed @@ -231,7 +231,7 @@ void VirtualFileSystem::setDefaultBackend(std::shared_ptr ba if (backend) { backend->setVirtualFileSystem(this); } - _defaultBackend = backend; + _defaultBackend = std::move(backend); } void VirtualFileSystem::mountBackend(const std::string& prefix, std::shared_ptr backend) { @@ -239,7 +239,7 @@ void VirtualFileSystem::mountBackend(const std::string& prefix, std::shared_ptr< if (backend) { backend->setVirtualFileSystem(this); } - _mountedBackends[prefix] = backend; + _mountedBackends[prefix] = std::move(backend); } std::shared_ptr VirtualFileSystem::findBackend(const std::string& path) const { diff --git a/src/entropy/entropy_work_contract_handle_c.cpp b/src/entropy/entropy_work_contract_handle_c.cpp index fd96fb6..5a18e92 100644 --- a/src/entropy/entropy_work_contract_handle_c.cpp +++ b/src/entropy/entropy_work_contract_handle_c.cpp @@ -23,7 +23,7 @@ namespace { // Centralized exception translation for WorkContractHandle operations -void translate_exception(EntropyStatus* status) { +void translateException(EntropyStatus* status) { if (!status) return; try { @@ -40,17 +40,12 @@ void translate_exception(EntropyStatus* status) { } // Safe cast from opaque handle to C++ object -inline WorkContractHandle* to_cpp(entropy_WorkContractHandle handle) { +inline WorkContractHandle* toCpp(entropy_WorkContractHandle handle) { return reinterpret_cast(handle); } -// Safe cast from C++ object to opaque handle -inline entropy_WorkContractHandle to_c(WorkContractHandle* handle) { - return reinterpret_cast(handle); -} - // Convert C++ ScheduleResult to C enum -EntropyScheduleResult to_c_schedule_result(ScheduleResult result) { +EntropyScheduleResult toCScheduleResult(ScheduleResult result) { switch (result) { case ScheduleResult::Scheduled: return ENTROPY_SCHEDULE_SCHEDULED; @@ -61,30 +56,11 @@ EntropyScheduleResult to_c_schedule_result(ScheduleResult result) { case ScheduleResult::Executing: return ENTROPY_SCHEDULE_EXECUTING; case ScheduleResult::Invalid: - return ENTROPY_SCHEDULE_INVALID; default: return ENTROPY_SCHEDULE_INVALID; } } -// Convert C++ ContractState to C enum -EntropyContractState to_c_contract_state(ContractState state) { - switch (state) { - case ContractState::Free: - return ENTROPY_CONTRACT_FREE; - case ContractState::Allocated: - return ENTROPY_CONTRACT_ALLOCATED; - case ContractState::Scheduled: - return ENTROPY_CONTRACT_SCHEDULED; - case ContractState::Executing: - return ENTROPY_CONTRACT_EXECUTING; - case ContractState::Completed: - return ENTROPY_CONTRACT_COMPLETED; - default: - return ENTROPY_CONTRACT_FREE; - } -} - } // anonymous namespace // ============================================================================ @@ -103,11 +79,11 @@ EntropyScheduleResult entropy_work_contract_schedule(entropy_WorkContractHandle } try { - WorkContractHandle* cpp_handle = to_cpp(handle); - ScheduleResult result = cpp_handle->schedule(); - return to_c_schedule_result(result); + WorkContractHandle* cppHandle = toCpp(handle); + ScheduleResult result = cppHandle->schedule(); + return toCScheduleResult(result); } catch (...) { - translate_exception(status); + translateException(status); return ENTROPY_SCHEDULE_INVALID; } } @@ -122,11 +98,11 @@ EntropyScheduleResult entropy_work_contract_unschedule(entropy_WorkContractHandl } try { - WorkContractHandle* cpp_handle = to_cpp(handle); - ScheduleResult result = cpp_handle->unschedule(); - return to_c_schedule_result(result); + WorkContractHandle* cppHandle = toCpp(handle); + ScheduleResult result = cppHandle->unschedule(); + return toCScheduleResult(result); } catch (...) { - translate_exception(status); + translateException(status); return ENTROPY_SCHEDULE_INVALID; } } @@ -135,8 +111,8 @@ EntropyBool entropy_work_contract_is_valid(entropy_WorkContractHandle handle) { if (!handle) return ENTROPY_FALSE; try { - WorkContractHandle* cpp_handle = to_cpp(handle); - return cpp_handle->valid() ? ENTROPY_TRUE : ENTROPY_FALSE; + WorkContractHandle* cppHandle = toCpp(handle); + return cppHandle->valid() ? ENTROPY_TRUE : ENTROPY_FALSE; } catch (...) { return ENTROPY_FALSE; } @@ -146,8 +122,8 @@ void entropy_work_contract_release(entropy_WorkContractHandle handle) { if (!handle) return; try { - WorkContractHandle* cpp_handle = to_cpp(handle); - cpp_handle->release(); + WorkContractHandle* cppHandle = toCpp(handle); + cppHandle->release(); // Note: We don't delete the C++ object here because it might still be // referenced by the user. The handle becomes invalid but the object remains. // This matches the C++ API semantics where handles are value types. @@ -160,8 +136,8 @@ void entropy_work_contract_handle_destroy(entropy_WorkContractHandle handle) { if (!handle) return; // Delete the heap-allocated wrapper created by entropy_work_contract_group_create_contract - WorkContractHandle* cpp_handle = to_cpp(handle); - delete cpp_handle; + WorkContractHandle* cppHandle = toCpp(handle); + delete cppHandle; } EntropyBool entropy_work_contract_is_scheduled(entropy_WorkContractHandle handle, EntropyStatus* status) { @@ -174,10 +150,10 @@ EntropyBool entropy_work_contract_is_scheduled(entropy_WorkContractHandle handle } try { - WorkContractHandle* cpp_handle = to_cpp(handle); - return cpp_handle->isScheduled() ? ENTROPY_TRUE : ENTROPY_FALSE; + WorkContractHandle* cppHandle = toCpp(handle); + return cppHandle->isScheduled() ? ENTROPY_TRUE : ENTROPY_FALSE; } catch (...) { - translate_exception(status); + translateException(status); return ENTROPY_FALSE; } } @@ -192,10 +168,10 @@ EntropyBool entropy_work_contract_is_executing(entropy_WorkContractHandle handle } try { - WorkContractHandle* cpp_handle = to_cpp(handle); - return cpp_handle->isExecuting() ? ENTROPY_TRUE : ENTROPY_FALSE; + WorkContractHandle* cppHandle = toCpp(handle); + return cppHandle->isExecuting() ? ENTROPY_TRUE : ENTROPY_FALSE; } catch (...) { - translate_exception(status); + translateException(status); return ENTROPY_FALSE; } } @@ -210,24 +186,24 @@ EntropyContractState entropy_work_contract_get_state(entropy_WorkContractHandle } try { - WorkContractHandle* cpp_handle = to_cpp(handle); + WorkContractHandle* cppHandle = toCpp(handle); // Query the state through the group since WorkContractHandle // doesn't expose getState() directly. We need to get the owner. // Actually, looking at the interface, we can determine state from // the public methods. - if (!cpp_handle->valid()) { + if (!cppHandle->valid()) { return ENTROPY_CONTRACT_FREE; - } else if (cpp_handle->isExecuting()) { + } else if (cppHandle->isExecuting()) { return ENTROPY_CONTRACT_EXECUTING; - } else if (cpp_handle->isScheduled()) { + } else if (cppHandle->isScheduled()) { return ENTROPY_CONTRACT_SCHEDULED; } else { return ENTROPY_CONTRACT_ALLOCATED; } } catch (...) { - translate_exception(status); + translateException(status); return ENTROPY_CONTRACT_FREE; } } From 584090e0e4bff9b1aa8e4ef2109cec1b4b077ade Mon Sep 17 00:00:00 2001 From: "Jonathan \"Geenz\" Goodman" Date: Tue, 30 Dec 2025 20:20:37 -0500 Subject: [PATCH 6/6] Update CMakeLists.txt --- CMakeLists.txt | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CMakeLists.txt b/CMakeLists.txt index 738864b..ee3fffe 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -38,6 +38,11 @@ set(CMAKE_CXX_EXTENSIONS OFF) # and the auto-scanning generates Clang-specific flags that GCC doesn't support set(CMAKE_CXX_SCAN_FOR_MODULES OFF) +# MSVC-specific: Enable conforming preprocessor for C++20 __VA_OPT__ support +if(MSVC) + add_compile_options(/Zc:preprocessor) +endif() + # Configure sanitizers # Configure sanitizers # Always enable ASAN+UBSAN for Debug builds on minimal supported compilers