diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 65bd9cf2c58..b0af076d06a 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -2,7 +2,7 @@ name: Bug report about: Create a report to help us improve title: '' -labels: 'kind/bug report' +labels: 'bug' assignees: '' --- @@ -26,12 +26,12 @@ A clear and concise description of what actually happened. **Config/Logs/Screenshots** If applicable, please attach your node configuration, logs or any screenshots. -**EventStore details** - - EventStore server version: +**KurrentDB details** + - KurrentDB server version: - Operating system: - - EventStore client library and version (if applicable): + - KurrentDB client library and version (if applicable): **Additional context** Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index 641ae4589eb..19f881377d6 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -2,7 +2,7 @@ name: Feature request about: Suggest an idea for this project title: '' -labels: kind/enhancement +labels: enhancement assignees: '' --- diff --git a/.github/labeler.yml b/.github/labeler.yml new file mode 100644 index 00000000000..6972e76d29b --- /dev/null +++ b/.github/labeler.yml @@ -0,0 +1,17 @@ +# Add 'documentation' label if all changes are in the docs directory or to the README file +documentation: + - all: + - changed-files: + - all-globs-to-all-files: + - docs/* + - README.md + +# Add 'ignore-for-release' label if all changes are: +# - changes to workflows, codeowners, etc (.github) +# - changes to tests (*.Tests) +ignore-for-release: + - all: + - changed-files: + - all-globs-to-all-files: + - .github/* + - src/*.Tests/* diff --git a/.github/release.yml b/.github/release.yml new file mode 100644 index 00000000000..e49cb2836a9 --- /dev/null +++ b/.github/release.yml @@ -0,0 +1,23 @@ +changelog: + exclude: + labels: + - ignore-for-release + categories: + - title: Added + labels: + - enhancement + - title: Fixed + labels: + - bug + - title: Changed + labels: + - "*" + - title: Deprecated + labels: + - deprecated + - title: Breaking Changes + labels: + - breaking + - title: Documentation + labels: + - documentation \ No newline at end of file diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml deleted file mode 100644 index 5f9e856db74..00000000000 --- a/.github/workflows/benchmark.yml +++ /dev/null @@ -1,22 +0,0 @@ -name: Request Benchmark - -on: - pull_request: - types: - - labeled - -jobs: - request_benchmark: - runs-on: ubuntu-latest - steps: - - name: Request Benchmark - if: ${{ contains(github.event.pull_request.labels.*.name, 'Benchmark') && github.event.repository.fork == 'false' }} - run: | - curl -X POST "https://api.github.com/repos/EventStore/grpc-tcp-benchmark/dispatches" \ - -H 'Accept: application/vnd.github.everest-preview+json' \ - -u ${{ secrets.GH_PAT }} \ - --data '{"event_type": "benchmark_deployment", "client_payload": {"pr_no": ${{ github.event.number }}, actor : ${{ github.actor }} }}' - - - name: Benchmark is not available on forked repos. - if: ${{ github.event.repository.fork == 'true' }} - run: echo "Benchmark is not available on forked repos." && exit 1 diff --git a/.github/workflows/build-container-alpine.yml b/.github/workflows/build-container-alpine.yml index 5a86091ca20..63e12fb2503 100644 --- a/.github/workflows/build-container-alpine.yml +++ b/.github/workflows/build-container-alpine.yml @@ -9,9 +9,10 @@ on: push: branches: - master - - release/oss-v* + - release/* tags: - oss-v* + - v* paths-ignore: - "docs/**" - "samples/**" diff --git a/.github/workflows/build-container-bookworm-slim.yml b/.github/workflows/build-container-bookworm-slim.yml index e6a0767d990..adb8e5ac096 100644 --- a/.github/workflows/build-container-bookworm-slim.yml +++ b/.github/workflows/build-container-bookworm-slim.yml @@ -9,9 +9,10 @@ on: push: branches: - master - - release/oss-v* + - release/* tags: - oss-v* + - v* paths-ignore: - "docs/**" - "samples/**" diff --git a/.github/workflows/build-container-jammy.yml b/.github/workflows/build-container-jammy.yml index d45733e5231..f032d7204d7 100644 --- a/.github/workflows/build-container-jammy.yml +++ b/.github/workflows/build-container-jammy.yml @@ -9,9 +9,10 @@ on: push: branches: - master - - release/oss-v* + - release/* tags: - oss-v* + - v* paths-ignore: - "docs/**" - "samples/**" diff --git a/.github/workflows/build-container-reusable.yml b/.github/workflows/build-container-reusable.yml index ad33f13a273..5ceeecf3da8 100644 --- a/.github/workflows/build-container-reusable.yml +++ b/.github/workflows/build-container-reusable.yml @@ -55,37 +55,44 @@ jobs: with: context: . load: true - tags: eventstore + tags: kurrentdb build-args: | CONTAINER_RUNTIME=${{ inputs.container-runtime }} RUNTIME=${{ steps.variables.outputs.runtime }} - name: Verify Build run: | - docker run --rm eventstore --insecure --what-if + docker run --rm kurrentdb --insecure --what-if - name: Build Test Container uses: docker/build-push-action@v4 with: context: . load: true target: test - tags: eventstore-test + tags: kurrentdb-test build-args: | CONTAINER_RUNTIME=${{ inputs.container-runtime }} RUNTIME=${{ steps.variables.outputs.runtime }} + # pass env vars so that GitHubActionsTestLogger can populate GitHub job summary - name: Run Tests - run: | - docker run \ - --volume $(pwd)/test-results:/build/test-results \ - --rm \ - eventstore-test + run: > + docker run + --volume $(pwd)/test-results:/build/test-results + --volume ${{ github.step_summary }}:/build/step-summary + --rm + --env GITHUB_STEP_SUMMARY=/build/step-summary + --env GITHUB_SERVER_URL=${{ github.server_url }} + --env GITHUB_REPOSITORY=${{ github.repository }} + --env GITHUB_WORKSPACE=${{ github.workspace }} + --env GITHUB_SHA=${{ github.sha }} + kurrentdb-test - name: Publish Test Results (HTML) - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: always() with: path: test-results/test-results.html name: test-results-${{ inputs.container-runtime }}.html - name: Publish Test Results (All) - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: always() with: name: test-results-${{ inputs.container-runtime }} diff --git a/.github/workflows/build-docs.yml b/.github/workflows/build-docs.yml new file mode 100644 index 00000000000..e6e79d806ba --- /dev/null +++ b/.github/workflows/build-docs.yml @@ -0,0 +1,14 @@ +name: Build Production Site + +on: + push: + branches: [release/*] + paths: + - '**.md' + +jobs: + dispatch: + runs-on: ubuntu-latest + steps: + - name: Trigger build + run: curl -X POST -d {} "${{ secrets.CLOUDFLARE_BUILD_HOOK }}" diff --git a/.github/workflows/build-reusable.yml b/.github/workflows/build-reusable.yml index 86ffb6b240d..580fdec5065 100644 --- a/.github/workflows/build-reusable.yml +++ b/.github/workflows/build-reusable.yml @@ -23,6 +23,12 @@ jobs: uses: actions/checkout@v4 with: fetch-depth: 0 + - name: Login to Cloudsmith + uses: docker/login-action@v3 + with: + registry: docker.kurrent.io + username: ${{ secrets.CLOUDSMITH_CICD_USER }} + password: ${{ secrets.CLOUDSMITH_CICD_TOKEN }} - name: Install netcoreapp3.1 uses: actions/setup-dotnet@v3 with: @@ -42,30 +48,40 @@ jobs: - name: Compile shell: bash run: | - dotnet build --configuration ${{ matrix.configuration }} -p:Platform=${{ inputs.arch }} src/EventStore.sln + dotnet build --configuration ${{ matrix.configuration }} -p:Platform=${{ inputs.arch }} src/KurrentDB.sln - name: Verify Build shell: bash run: | - dotnet run --project src/EventStore.ClusterNode --configuration ${{ matrix.configuration }} -- --insecure --what-if + dotnet run --project src/KurrentDB --configuration ${{ matrix.configuration }} -- --insecure --what-if - name: Run Tests shell: bash - run: | - find ./src -maxdepth 1 -type d -name "*.Tests" -print0 \ - | xargs -I{} -0 -n1 bash -c \ - 'dotnet test --configuration ${{ matrix.configuration }} -p:Platform=${{ inputs.arch }} --blame --blame-hang-timeout 5min --blame-hang-dump-type mini --settings ./ci/ci.runsettings --logger:GitHubActions --logger:html --logger:trx --logger:"console;verbosity=normal" --results-directory=$(pwd)/test-results/$1 $1' - '{}' + run: > + dotnet test + --configuration ${{ matrix.configuration }} + -p:Platform=${{ inputs.arch }} + --blame + --blame-hang-timeout 5min + --blame-hang-dump-type mini + --settings ./ci/ci.runsettings + --logger:GitHubActions + --logger:html + --logger:trx + --logger:"console;verbosity=normal" + --results-directory=$(pwd)/test-results + src/KurrentDB.sln - name: Collect Test Results shell: bash if: always() run: | echo $(find ./test-results -name "*.html" | xargs cat) > test-results.html - name: Publish Test Results (HTML) - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: always() with: path: test-results.html name: test-results-${{ matrix.configuration }}-${{ inputs.os }}.html - name: Publish Test Results (All) - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 if: always() with: name: test-results-${{ matrix.configuration }}-${{ inputs.os }} diff --git a/.github/workflows/build-ubuntu-20.04-arm64.yml b/.github/workflows/build-ubuntu-20.04-arm64.yml index 4008d9d1a2b..ad5e1fcd667 100644 --- a/.github/workflows/build-ubuntu-20.04-arm64.yml +++ b/.github/workflows/build-ubuntu-20.04-arm64.yml @@ -8,9 +8,10 @@ on: push: branches: - master - - release/oss-v* + - release/* tags: - oss-v* + - v* paths-ignore: - "docs/**" - "samples/**" @@ -22,3 +23,4 @@ jobs: with: os: ubuntu-20.04-arm64 arch: arm64 + secrets: inherit diff --git a/.github/workflows/build-ubuntu-22.04.yml b/.github/workflows/build-ubuntu-22.04.yml index 20d2cfd4441..a5bc3d3c780 100644 --- a/.github/workflows/build-ubuntu-22.04.yml +++ b/.github/workflows/build-ubuntu-22.04.yml @@ -9,9 +9,10 @@ on: push: branches: - master - - release/oss-v* + - release/* tags: - oss-v* + - v* paths-ignore: - "docs/**" - "samples/**" @@ -23,3 +24,4 @@ jobs: with: os: ubuntu-22.04 arch: x64 + secrets: inherit diff --git a/.github/workflows/build-windows-2019.yml b/.github/workflows/build-windows-2019.yml index 401ea1f0409..7221ee6ae9c 100644 --- a/.github/workflows/build-windows-2019.yml +++ b/.github/workflows/build-windows-2019.yml @@ -9,9 +9,10 @@ on: push: branches: - master - - release/oss-v* + - release/* tags: - oss-v* + - v* paths-ignore: - "docs/**" - "samples/**" @@ -23,3 +24,4 @@ jobs: with: os: windows-2019 arch: x64 + secrets: inherit diff --git a/.github/workflows/cherry-pick-pr-for-label.yml b/.github/workflows/cherry-pick-pr-for-label.yml index 693dad7bfc4..f7fa3d2b9da 100644 --- a/.github/workflows/cherry-pick-pr-for-label.yml +++ b/.github/workflows/cherry-pick-pr-for-label.yml @@ -9,6 +9,6 @@ jobs: steps: - uses: actions/checkout@v4 - name: Cherry Pick PR for label - uses: EventStore/Automations/cherry-pick-pr-for-label@master + uses: kurrent-io/Automations/cherry-pick-pr-for-label@master with: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/common.yml b/.github/workflows/common.yml index 5c1f1a9df6b..35028df04dd 100644 --- a/.github/workflows/common.yml +++ b/.github/workflows/common.yml @@ -9,9 +9,10 @@ on: push: branches: - master - - release/oss-v* + - release/* tags: - oss-v* + - v* paths-ignore: - "docs/**" - "samples/**" @@ -58,7 +59,7 @@ jobs: password: ${{ secrets.GITHUB_TOKEN }} - name: Docker Compose Smoke Test run: | - docker compose build + RUNTIME=linux-amd64 docker compose build docker compose up --detach printf 'Waiting for cluster' timeout 60 bash -c -- 'until $(curl --output /dev/null --silent --insecure --fail https://localhost:2113/health/live); do printf '.'; sleep 2; done' diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml new file mode 100644 index 00000000000..e3ecf923f01 --- /dev/null +++ b/.github/workflows/labeler.yml @@ -0,0 +1,12 @@ +name: "Pull Request Labeler" +on: +- pull_request_target + +jobs: + labeler: + permissions: + contents: read + pull-requests: write + runs-on: ubuntu-latest + steps: + - uses: actions/labeler@v5 \ No newline at end of file diff --git a/.github/workflows/pull-request-check.yml b/.github/workflows/pull-request-check.yml deleted file mode 100644 index 41dca4580d7..00000000000 --- a/.github/workflows/pull-request-check.yml +++ /dev/null @@ -1,21 +0,0 @@ -name: Pull Request check -on: - pull_request: - paths-ignore: - - "src/*.Tests/**" - - "docs/**" - - "samples/**" - - "**.md" - types: [opened, edited] -jobs: - checkPullRequest: - name: Pull Request check - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v4 - - name: Check pull requests - uses: EventStore/Automations/pr-check@master - with: - include: 'src' - exclude: 'src/EventStore.UI' diff --git a/.github/workflows/release-client.yml b/.github/workflows/release-client.yml deleted file mode 100644 index 9c3f8fbd4f5..00000000000 --- a/.github/workflows/release-client.yml +++ /dev/null @@ -1,20 +0,0 @@ -name: Initiate Client Release - -on: - push: - tags: - - "dotnet-client-v*" - -jobs: - release: - runs-on: ubuntu-latest - steps: - - name: Get the version - id: get_version - run: echo "version=${GITHUB_REF:25}" >> $GITHUB_OUTPUT - - name: Perform Release - run: | - curl -X POST https://api.github.com/repos/EventStore/TrainStation/dispatches \ - -H 'Accept: application/vnd.github.everest-preview+json' \ - -u ${{ secrets.GH_PAT }} \ - --data '{"event_type": "client-release", "client_payload": { "repository": "'"$GITHUB_REPOSITORY"'", "version": "${{ steps.get_version.outputs.version }}" }}' \ No newline at end of file diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml deleted file mode 100644 index 5dea555d2fa..00000000000 --- a/.github/workflows/release.yml +++ /dev/null @@ -1,21 +0,0 @@ -name: Initiate Release - -on: - push: - tags: - - "oss-v*" - -jobs: - release: - runs-on: ubuntu-latest - steps: - - name: Get the version - id: get_version - run: echo "version=${GITHUB_REF:15}" >> $GITHUB_OUTPUT - - - name: Perform Release - run: | - curl -X POST https://api.github.com/repos/EventStore/TrainStation/dispatches \ - -H 'Accept: application/vnd.github.everest-preview+json' \ - -u ${{ secrets.GH_PAT }} \ - --data '{"event_type": "release", "client_payload": { "repository": "'"$GITHUB_REPOSITORY"'", "version": "${{ steps.get_version.outputs.version }}" }}' \ No newline at end of file diff --git a/.gitmodules b/.gitmodules index 1d795090cb9..e69de29bb2d 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +0,0 @@ -[submodule "src/EventStore.UI"] - path = src/EventStore.UI - url = https://github.com/EventStore/EventStore.UI.git diff --git a/CHANGELOG.md b/CHANGELOG.md index 3f775ad65ac..4acdfb147cc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,103 @@ # Changelog -All notable changes to this project will be documented in this file. -## [Unreleased] +**Note:** Changelog updates are no longer tracked in this file. Future changelog updates can be found in [Releases](https://github.com/EventStore/EventStore/releases). + +## [24.10.0] - 2024-11-13 + +### Fixed +- Fixed potential partial read in TFChunk. [EventStore#4608](https://github.com/EventStore/EventStore/pull/4608) +- Runtime information log on startup [EventStore#4572](https://github.com/EventStore/EventStore/pull/4572) +- Enhance shutdown service to handle pre-terminated services. [EventStore#4563](https://github.com/EventStore/EventStore/pull/4563) +- Explicitly deny stream operations in fallback policy. [EventStore#4567](https://github.com/EventStore/EventStore/pull/4567) +- Don't time out projection manager reads. [EventStore#4557](https://github.com/EventStore/EventStore/pull/4557) +- Only use the fallback policy for stream access. Revert to the legacy policy for endpoint access. [EventStore#4537](https://github.com/EventStore/EventStore/pull/4537) +- Fix authorization policy registry being loaded in insecure mode. [EventStore#4530](https://github.com/EventStore/EventStore/pull/4530) +- If the scavenge is unable to write a scavenge point it will now error rather than stall. [EventStore#4512](https://github.com/EventStore/EventStore/pull/4512) +- Reverted free mem stat to be MemAvailable rather than MemFree on linux. [EventStore#4509](https://github.com/EventStore/EventStore/pull/4509) +- Missing BSD-3 attribution since the recent license change. [EventStore#4493](https://github.com/EventStore/EventStore/pull/4493) +- Don't write the database default `ProjectionExecutionTimeout` in the projection persisted state on creation. [EventStore#4432](https://github.com/EventStore/EventStore/pull/4432) +- Ignore dlls that are not .NET assemblies when loading plugins. [EventStore#4380](https://github.com/EventStore/EventStore/pull/4380) +- Gossip on single node. [EventStore#4367](https://github.com/EventStore/EventStore/pull/4367) +- Optimize CPU usage of the timer service when the database is idle. [EventStore#4224](https://github.com/EventStore/EventStore/pull/4224) - thanks [@taspeotis](https://github.com/taspeotis)! +- Redaction: Return the chunk's `MinCompatibleVersion` instead of `Version` when retrieving event positions. [EventStore#4354](https://github.com/EventStore/EventStore/pull/4354) +- Avoid replaying deleted events from the source stream when replaying parked messages for a persistent subscription. [EventStore#4300](https://github.com/EventStore/EventStore/pull/4300) +- LeaderId and Epoch number would sometimes be empty (Guid.empty) for a follower node or a read only replica that would join a cluster whose leader is already elected. https://eventstore.aha.io/develop/requirements/DB-26-4 . [Linear Issue](https://linear.app/eventstore/issue/DB-611/leaderid-sometimes-guidempty-in-telemetry) [EventStore#4256](https://github.com/EventStore/EventStore/pull/4256) +- Remove redundant check which is always true. [EventStore#4265](https://github.com/EventStore/EventStore/pull/4265) +- Typos in option descriptions. [EventStore#4215](https://github.com/EventStore/EventStore/pull/4215) + +### Added +- ReadOnlyReplica and archiver flags to machine metadata in license. [EventStore#4597](https://github.com/EventStore/EventStore/pull/4597) +- License error to `/license` endpoint. [EventStore#4549](https://github.com/EventStore/EventStore/pull/4549) +- Log warning on startup about removed "Plugins" configuration subsection. [EventStore#4540](https://github.com/EventStore/EventStore/pull/4540) +- Telemetry configuration section to telemetry that is sent. [EventStore#4532](https://github.com/EventStore/EventStore/pull/4532) +- License headers to files. [EventStore#4487](https://github.com/EventStore/EventStore/pull/4487) +- Make sure all systems that require a shutdown process complete before exiting. [EventStore#4403](https://github.com/EventStore/EventStore/pull/4403) +- Licence header to source files. [EventStore#4455](https://github.com/EventStore/EventStore/pull/4455) +- Chunk read distribution metric. [EventStore#4445](https://github.com/EventStore/EventStore/pull/4445) +- Operating System to telemetry. [EventStore#4443](https://github.com/EventStore/EventStore/pull/4443) +- `GET /admin/scavenge/last` endpoint. [EventStore#4419](https://github.com/EventStore/EventStore/pull/4419) +- Groundwork for Archive. [EventStore#4427](https://github.com/EventStore/EventStore/pull/4427) and [EventStore#4417](https://github.com/EventStore/EventStore/pull/4417) +- Handling for missing labels in `metricsconfig.json`. [EventStore#4420](https://github.com/EventStore/EventStore/pull/4420) +- Facilitate node-to-node communication over HTTP for plugins. [EventStore#4409](https://github.com/EventStore/EventStore/pull/4409) +- License and Notices to build output. [EventStore#4404](https://github.com/EventStore/EventStore/pull/4404) +- License mappings for Qodana. [EventStore#4398](https://github.com/EventStore/EventStore/pull/4398) +- Qodana to CI. [EventStore#4392](https://github.com/EventStore/EventStore/pull/4392) +- A log for which auth policy plugin is being used. [EventStore#4377](https://github.com/EventStore/EventStore/pull/4377) +- Padding after the SourceContext in the console log output. [EventStore#4376](https://github.com/EventStore/EventStore/pull/4376) +- Allow loading multiple policies for authorization. [EventStore#4305](https://github.com/EventStore/EventStore/pull/4305) +- A banner linking to Event Store Navigator in the UI. [EventStore#4323](https://github.com/EventStore/EventStore/pull/4323) +- Support for chunk data transformation plugins. [EventStore#4258](https://github.com/EventStore/EventStore/pull/4258) +- Chunk version 4 (`Transformed`) file format. [EventStore#4289](https://github.com/EventStore/EventStore/pull/4289) +- Support for forward compatibility in chunks. [EventStore#4289](https://github.com/EventStore/EventStore/pull/4289) +- Added librdkafka redist package. [EventStore#4378](https://github.com/EventStore/EventStore/pull/4378) + +### Changed +- Only use `AuthorizationPolicyRegistryFactory` when the `internal` authorization plugin is enabled. [EventStore#4594](https://github.com/EventStore/EventStore/pull/4594) +- Internal changes to allow bundling of MD5 plugin. [EventStore#4582](https://github.com/EventStore/EventStore/pull/4582) +- Restrict Subscriptions.ProcessMessages operations in the fallback policy. [EventStore#4570](https://github.com/EventStore/EventStore/pull/4570) +- Improved shutdown logging on license validation error. [EventStore#4565](https://github.com/EventStore/EventStore/pull/4565) +- Upgraded to [Jint v4](https://github.com/sebastienros/jint/releases/tag/v4.0.0). [EventStore#4339](https://github.com/EventStore/EventStore/pull/4339) - thanks [@lahma](https://github.com/lahma)! +- `Authorization:PolicyType` option to `Authorization:DefaultPolicyType`. [EventStore#4545](https://github.com/EventStore/EventStore/pull/4545) +- Improved shutdown logging. [EventStore#4548](https://github.com/EventStore/EventStore/pull/4548) +- Notices file to reflect package upgrades. [EventStore#4517](https://github.com/EventStore/EventStore/pull/4517) +- gRPC log level to fatal (actually keeping the same behaviour as previous versions). [EventStore#4521](https://github.com/EventStore/EventStore/pull/4521) +- Moved plugable components out of 'Plugins' section in telemetry. [EventStore#4474](https://github.com/EventStore/EventStore/pull/4474) +- Plugin configuration to no longer be nested in `Plugins` section. [EventStore#4471](https://github.com/EventStore/EventStore/pull/4471) +- (All) plugins can now be configured from the main yaml config file. [EventStore#4470](https://github.com/EventStore/EventStore/pull/4470) +- Software License to [ESLv2](https://www.eventstore.com/blog/introducing-event-store-license-v2-eslv2). [EventStore#4452](https://github.com/EventStore/EventStore/pull/4452) +- Cosmetic changes to modernize the code base. [EventStore#4429](https://github.com/EventStore/EventStore/pull/4429) +- Refactor projections to use `IPublisher` and `ISubscriber` instead of `IQueuedHandler` and `IBus`. [EventStore#4413](https://github.com/EventStore/EventStore/pull/4413) +- `InMemoryBus` becomes asynchronous dispatcher. Async handlers now can use `IAsyncHandle` interface to enable async execution. [EventStore#4408](https://github.com/EventStore/EventStore/pull/4408) +- Group projection processing classes into namespaces. [EventStore#4412](https://github.com/EventStore/EventStore/pull/4412) +- Internal message bus is changed to be lock-free for better performance. Also, the change drives further perf improvements. [EventStore#4390](https://github.com/EventStore/EventStore/pull/4390) +- 3rd party license notices now in NOTICE.html. [EventStore#4402](https://github.com/EventStore/EventStore/pull/4402) +- Take whether a leader is resigning into account before the node priority when selecting the best candidate for an election. [EventStore#4371](https://github.com/EventStore/EventStore/pull/4371) +- Upgraded all serilog packages. [EventStore#4341](https://github.com/EventStore/EventStore/pull/4341) +- Disabled logger init check when debugging. [EventStore#4341](https://github.com/EventStore/EventStore/pull/4341) +- Console logging now includes SourceContext with component name. [EventStore#4341](https://github.com/EventStore/EventStore/pull/4341) +- Default log uses `EventStore` as the name. [EventStore#4341](https://github.com/EventStore/EventStore/pull/4341) +- When adding serilog to the host we now correctly clear all existing providers. [EventStore#4341](https://github.com/EventStore/EventStore/pull/4341) +- If debug, no exception is thrown if logging was not initialized. [EventStore#4341](https://github.com/EventStore/EventStore/pull/4341) +- If debug, any background service exception stops the host. [EventStore#4341](https://github.com/EventStore/EventStore/pull/4341) +- If debug, configuration tries to load appsettings.json and appsettings.Development.json. [EventStore#4341](https://github.com/EventStore/EventStore/pull/4341) +- Separated persistent subscription metrics into multiple instruments. [EventStore#4315](https://github.com/EventStore/EventStore/pull/4315) +- Separated projections metrics into multiple instruments. [EventStore#4312](https://github.com/EventStore/EventStore/pull/4312) +- Unit tests use TCP as a plugin. [EventStore#4210](https://github.com/EventStore/EventStore/pull/4210) + +### Removed +- Byte order marks from source code. [EventStore#4450](https://github.com/EventStore/EventStore/pull/4450) +- Support for extremely old (V1) PTables. [EventStore#4447](https://github.com/EventStore/EventStore/pull/4447) +- Unused `TimeoutScheduler` from projections. [EventStore#4434](https://github.com/EventStore/EventStore/pull/4434) +- `IBus` interface is removed with no replacement to clearly distinguish roles of message dispatcher and message scheduler. Removed unused schedulers and related tests. [EventStore#4408](https://github.com/EventStore/EventStore/pull/4408) +- Replace the generic `PublishSubscribeDisptacher` with a more specific `ReaderSubscriptionDispatcher` in projections. [EventStore#4413](https://github.com/EventStore/EventStore/pull/4413) +- `/histogram/{name}` endpoint [EventStore#4394](https://github.com/EventStore/EventStore/pull/4394) + +### Breaking Changes +- The `/histogram/{name}` endpoint has been removed. [EventStore#4394](https://github.com/EventStore/EventStore/pull/4394) +- Support v1 PTables has been removed. [EventStore#4447](https://github.com/EventStore/EventStore/pull/4447) +- Otel Exporter commercial plugin configuration has changed to be nested within the `EventStore` subsection for consistency with the other plugins. [Upgrade guide](https://developers.eventstore.com/server/v24.10/quick-start/upgrade-guide.html#breaking-changes) +- User certificates commercial plugin configuration is no longer nested in a `Plugins` subsection. [Upgrade guide](https://developers.eventstore.com/server/v24.10/quick-start/upgrade-guide.html#breaking-changes) +- External TCP API and related configuration settings have been removed. [Upgrade guide](https://developers.eventstore.com/server/v24.10/quick-start/upgrade-guide.html#breaking-changes) ## [23.10.3] - 2024-09-18 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c0faa1db3a4..bc68b5dc723 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,52 +1,55 @@ -# Contributing to EventStoreDB +# Contributing to KurrentDB ## Working with the Git -We're using `master` as the main development branch. It contains all changes to the upcoming release. Older releases have dedicated feature branches with the format `release/oss-{version}`. E.g. `release/oss-v5`, `release/oss-v20.10`, `release/oss-v21.2`. Specific releases are tagged from the release branches commits. +KurrentDB uses `master` as the main development branch. It contains all changes to the upcoming release. Older releases of KurrentDB have dedicated feature branches with the format `release/v{version}`. E.g., `release/v25.0`. Specific releases are tagged from the release branch commits. -We attempt to do our best to ensure that the history remains clean and to do so, we generally ask contributors to squash their commits into a set or single logical commit. +Releases of EventStoreDB have feature branches with the format `release/oss-v{version}`. E.g. `release/oss-v24.10`. -To contribute to EventStoreDB: +We do our best to ensure a clean history. To do so, commits are automatically squashed into a single logical commit when pull requests are merged. + +**To contribute to KurrentDB**: 1. Fork the repository. 2. Create a feature branch from the `master` (or release) branch. -3. It's recommended using rebase strategy for feature branches (see more in [Git documentation](https://git-scm.com/book/en/v2/Git-Branching-Rebasing)). Having that, we highly recommend using clear commit messages. Commits should also represent the unit of change. -4. Before sending PR to ensure that you rebased the latest source branch from the main repository. -5. When you're ready to create the [Pull Request on GitHub](https://github.com/EventStore/EventStore/compare). +3. It's recommended that feature branches use a rebase strategy (see more in [Git documentation](https://git-scm.com/book/en/v2/Git-Branching-Rebasing)). We also highly recommend using clear commit messages that represent the unit of change. +4. Rebase the latest source branch from the main repository before sending PR. +5. When ready to create the Pull Request on GitHub [check to see what has previously changed](https://github.com/kurrent-io/KurrentDB/compare). ## Documentation -Documentation is located in the [`docs`](/docs) folder. It's orchestrated in the separate [documentation repository](https://github.com/EventStore/documentation). It's available online at https://developers.eventstore.com/. +Documentation files are in the [`docs`](/docs) folder. They're orchestrated in the separate [documentation repository](https://github.com/kurrent-io/documentation). The Kurrent Documentation site is publicly accessible at https://docs.kurrent.io/. It's recommended to have documentation changes be put together with code changes. -We're supporting multiple versions of the documentation. Versions are kept in: -- the main (`master`) branch: all changes that refer to the upcoming release should be put there. That includes both non-released changes and enhancements to documentation for existing features. -- specific release branches - the last version and older release are kept there (e.g. `release/oss-v5`, `release/oss-v20.10`, `release/oss-v21.2`). We aim to keep the up to date documentation for the last LTS releases and all further. Read more on the release strategy: [link](https://www.eventstore.com/blog/eventstoredb-20.10-lts-has-been-released). +Kurrent supports multiple versions of the documentation. Versions are kept in: +- The main (`master`) branch: The `master` branch should contain all changes related to the upcoming release. This includes both non-released changes and enhancements to documentation for existing features. +- Specific release branches: The latest and previous releases are maintained in specific branches (e.g. `release/v25.0`, `release/oss-v24.10`). We aim to keep the documentation up to date for the latest and previous LTS releases and any STS releases that occur in this timeline. For example, when v24.10 is released, Documentation will continue to be updated for v24.10 and v23.10 (whereas v22.10 will be updated on a "as time allows" basis). Read more on the release strategy [in the docs](https://docs.kurrent.io/server/latest/release-schedule/). -To update the specific database version's docs, it's recommended to create a feature branch based on the particular version release branch. For instance, if you want to change documentation in the `21.2` version, then you should: -- checkout the latest `release/oss-v21.2`, -- create a new branch and add your changes, -- create pull request targeting the `release/oss-v21.2` branch. +To update a specific database version's docs, we recommended creating a feature branch based on that versions release branch. For instance, if you want to change documentation in the `25.0` version, you would: +- Checkout `release/v25.0` +- Create a new branch and add your changes +- Create a pull request targeting the `release/v25.0` branch. -If you're unsure which branch to select, the safe choice is the main branch (`master`). +If you're unsure which branch to select, the safe choice is the main branch (`master`). -It's not needed to send multiple pull requests if your change should be reflected in multiple database versions documentation. Contributors reviewing the pull request should mark it with proper labels (e.g. as `cherry-pick:release/oss-v20.10`). We're using the [GitHub action](/.github/workflows/cherry-pick-pr-for-label.yml) based on the labels that should create pull requests with cherry-picks to the target branches. Action will inform about success or failure via the review comments to the initial pull request tagging the person that merged the pull request. It's recommended for contributors to monitor those notifications and make sure that cherry-picks succeeded. Read more in [action documentation](https://github.com/EventStore/Automations/tree/master/cherry-pick-pr-for-label). +Multiple pull requests are not required for changes that should be reflected in multiple database version documentation. Contributors reviewing the pull request should label it (e.g., `cherry-pick:release/v25.0`). KurrentDB uses the [GitHub action](/.github/workflows/cherry-pick-pr-for-label.yml) based on the labels that create pull requests with cherry-picks to the target branches. It's recommended that contributors monitor notifications and make sure that cherry-picks succeed. Read more in [action documentation](https://github.com/kurrent-io/Automations/tree/master/cherry-pick-pr-for-label). -Taking the previous example. You sent a pull request targeting the `release/oss-v21.2`. You'd like to have it also for the upcoming release and version `20.10`. Contributor should label your pull request with: +Using the previous example, assume a pull request targeting the `release/oss-v24.10` was committed. The changes should also be reflected in the upcoming release and version `25.0`. The contributor should add labels to the pull request for: - `cherry-pick:master`, -- `cherry-pick:release/oss-v20.10`. +- `cherry-pick:release/v25.0`. _**Note:** Cherry-pick action requires changes to be rebased. If there is a merge commit, then cherry-pick will fail. It will also fail if there is a conflict with the target branch (so `target_branch` from label suffix). If those cases happen then, it's needed to do manual cherry-picks._ ## Code style -Coding rules are set up in the [.editorconfig file](/src/.editorconfig). This file is supported by all popular IDE (e.g. Microsoft Visual Studio, Rider, Visual Studio Code). Unless you disabled it manually, it should be automatically applied after opening the solution. We also recommend turning automatic formatting on saving to have all the rules applied. +Coding rules are described in the [.editorconfig file](/src/.editorconfig). This file is supported by all popular IDEs (e.g., Microsoft Visual Studio, Rider, and Visual Studio Code). Unless disabled manually, it should be automatically applied after opening the solution. We also recommend turning automatic formatting on saving to have all the rules applied. ## Licensing and legal rights -By contributing to EventStoreDB: +By contributing to KurrentDB: -1. You assert that contribution is your original work. -2. You assert that you have the right to assign the copyright for the work. -3. You are accepting the [License](LICENSE.md). +1. You assert that contribution is your original work +2. You assert that you have the right to assign the copyright for the work +3. You accept the [Contributor License Agreement](https://gist.github.com/eventstore-bot/7a1e56c21e81f44a625a7462403298bf) (CLA) for your contribution +4. You accept the [License](LICENSE.md) diff --git a/Dockerfile b/Dockerfile index 59d3ecc1af7..2968b8a72f5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,17 +1,20 @@ # "build" image ARG CONTAINER_RUNTIME=jammy +# NOT A BUG: we can't build on alpine so we use jammy as a base image FROM mcr.microsoft.com/dotnet/sdk:8.0-jammy AS build ARG RUNTIME=linux-x64 WORKDIR /build COPY ./LICENSE.md . +COPY ./LICENSE_CONTRIBUTIONS.md . COPY ./NOTICE.html . +COPY ./docker ./scripts WORKDIR /build/ci COPY ./ci ./ WORKDIR /build/src -COPY ./src/EventStore.sln ./src/*/*.csproj ./src/Directory.Build.* ./ +COPY ./src/KurrentDB.sln ./src/*/*.csproj ./src/Directory.Build.* ./ RUN for file in $(ls *.csproj); do mkdir -p ./${file%.*}/ && mv $file ./${file%.*}/; done RUN dotnet restore --runtime=${RUNTIME} COPY ./src . @@ -19,34 +22,25 @@ COPY ./src . WORKDIR /build/.git COPY ./.git/ . -WORKDIR /build/src -RUN find /build/src -maxdepth 1 -type d -name "*.Tests" -print0 | xargs -I{} -0 -n1 sh -c \ - 'dotnet publish --runtime=${RUNTIME} --no-self-contained --configuration Release --output /build/published-tests/`basename $1` $1' - '{}' +RUN /build/scripts/build.sh /build/src /build/published-tests # "test" image FROM mcr.microsoft.com/dotnet/sdk:8.0-${CONTAINER_RUNTIME} as test WORKDIR /build COPY --from=build ./build/published-tests ./published-tests COPY --from=build ./build/ci ./ci -COPY --from=build ./build/src/EventStore.Core.Tests/Services/Transport/Tcp/test_certificates/ca/ca.crt /usr/local/share/ca-certificates/ca_eventstore_test.crt +COPY --from=build ./build/scripts ./scripts +COPY --from=build ./build/src/KurrentDB.Core.Tests/Services/Transport/Tcp/test_certificates/ca/ca.crt /usr/local/share/ca-certificates/ca_kurrentdb_test.crt RUN mkdir ./test-results -RUN printf '#!/usr/bin/env sh\n\ -update-ca-certificates\n\ -find /build/published-tests -maxdepth 1 -type d -name "*.Tests" -print0 | xargs -I{} -0 -n1 sh -c '"'"'proj=`basename $1` && dotnet test --blame --blame-hang-timeout 5min --settings /build/ci/ci.runsettings --logger:"GitHubActions;report-warnings=false" --logger:html --logger:trx --logger:"console;verbosity=normal" --results-directory /build/test-results/$proj $1/$proj.dll'"'"' - '"'"'{}'"'"'\n\ -exit_code=$?\n\ -echo $(find /build/test-results -name "*.html" | xargs cat) > /build/test-results/test-results.html\n\ -exit $exit_code' \ - >> /build/test.sh && \ - chmod +x /build/test.sh -CMD ["/build/test.sh"] +CMD ["/build/scripts/test.sh"] # "publish" image FROM build as publish ARG RUNTIME=linux-x64 RUN dotnet publish --configuration=Release --runtime=${RUNTIME} --self-contained \ - --framework=net8.0 --output /publish EventStore.ClusterNode + --framework=net8.0 --output /publish /build/src/KurrentDB # "runtime" image FROM mcr.microsoft.com/dotnet/runtime-deps:8.0-${CONTAINER_RUNTIME} AS runtime @@ -66,34 +60,34 @@ RUN if [[ "${RUNTIME}" = "linux-musl-x64" ]];\ rm -rf /var/lib/apt/lists/*; \ fi -WORKDIR /opt/eventstore +WORKDIR /opt/kurrentdb -RUN addgroup --gid ${GID} "eventstore" && \ +RUN addgroup --gid ${GID} "kurrent" && \ adduser \ --disabled-password \ --gecos "" \ - --ingroup "eventstore" \ + --ingroup "kurrent" \ --no-create-home \ --uid ${UID} \ - "eventstore" + "kurrent" -COPY --chown=eventstore:eventstore --from=publish /publish ./ +COPY --chown=kurrent:kurrent --from=publish /publish ./ -RUN mkdir -p /var/lib/eventstore && \ - mkdir -p /var/log/eventstore && \ - mkdir -p /etc/eventstore && \ - chown -R eventstore:eventstore /var/lib/eventstore /var/log/eventstore /etc/eventstore +RUN mkdir -p /var/lib/kurrentdb && \ + mkdir -p /var/log/kurrentdb && \ + mkdir -p /etc/kurrentdb && \ + chown -R kurrent:kurrent /var/lib/kurrentdb /var/log/kurrentdb /etc/kurrentdb -USER eventstore +USER kurrent RUN printf "NodeIp: 0.0.0.0\n\ -ReplicationIp: 0.0.0.0" >> /etc/eventstore/eventstore.conf +ReplicationIp: 0.0.0.0" >> /etc/kurrentdb/kurrentdb.conf -VOLUME /var/lib/eventstore /var/log/eventstore +VOLUME /var/lib/kurrentdb /var/log/kurrentdb EXPOSE 1112/tcp 1113/tcp 2113/tcp HEALTHCHECK --interval=5s --timeout=5s --retries=24 \ CMD curl --fail --insecure https://localhost:2113/health/live || curl --fail http://localhost:2113/health/live || exit 1 -ENTRYPOINT ["/opt/eventstore/EventStore.ClusterNode"] +ENTRYPOINT ["/opt/kurrentdb/KurrentDB"] diff --git a/KurrentLogo-Black.png b/KurrentLogo-Black.png new file mode 100644 index 00000000000..0ff9694f953 Binary files /dev/null and b/KurrentLogo-Black.png differ diff --git a/KurrentLogo-Plum.png b/KurrentLogo-Plum.png new file mode 100644 index 00000000000..a27a816472b Binary files /dev/null and b/KurrentLogo-Plum.png differ diff --git a/KurrentLogo-White.png b/KurrentLogo-White.png new file mode 100644 index 00000000000..e49bfb08f8b Binary files /dev/null and b/KurrentLogo-White.png differ diff --git a/LICENSE.md b/LICENSE.md index 6a4fdf559bb..6fd6bf5a20d 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -1,28 +1,57 @@ -# EventStoreDB License - -Copyright (c) 2011-2024, Event Store Ltd. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -Redistributions of source code must retain the above copyright notice, this -list of conditions and the following disclaimer. - -Redistributions in binary form must reproduce the above copyright notice, this -list of conditions and the following disclaimer in the documentation and/or -other materials provided with the distribution. - -Neither the name of Event Store Ltd nor the names of its contributors may be -used to endorse or promote products derived from this software without specific -prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# Kurrent License v1 + +Copyright (c) 2011-2025, Kurrent, Inc. All rights reserved. + +### Acceptance + +By using the software, you agree to all of the terms and conditions below. + +### Copyright License + +The licensor grants you a non-exclusive, royalty-free, worldwide, non-sublicensable, non-transferable license to use, copy, distribute, make available, and prepare derivative works of the software, in each case subject to the limitations and conditions below. + +### Limitations + +You may not provide the software to third parties as a hosted or managed service, where the service provides users with access to any substantial set of the features or functionality of the software. + +Unless authorized in writing by the licensor, you may not move, change, disable, interfere with, or circumvent the license mechanisms in the software, and you may not remove or obscure any functionality in the software that is protected by the license mechanisms. + +You may not alter, remove, or obscure any licensing, copyright, or other notices of the licensor in the software. Any use of the licensor’s trademarks is subject to applicable law. + +### Patents + +The licensor grants you a license, under any patent claims the licensor can license, or becomes able to license, to make, have made, use, sell, offer for sale, import and have imported the software, in each case subject to the limitations and conditions in this license. This license does not cover any patent claims that you cause to be infringed by modifications or additions to the software. If you or your company make any written claim that the software infringes or contributes to infringement of any patent, your patent license for the software granted under these terms ends immediately. If your company makes such a claim, your patent license ends immediately for work on behalf of your company. + +### Notices + +You must ensure that anyone who gets a copy of any part of the software from you also gets a copy of these terms. + +If you modify the software, you must include in any modified copies of the software prominent notices stating that you have modified the software. + +### No Other Rights + +These terms do not imply any licenses other than those expressly granted in these terms. + +### Termination + +If you use the software in violation of these terms, such use is not licensed, and your licenses will automatically terminate. If the licensor provides you with a notice of your violation, and you cease all violation of this license no later than 30 days after you receive that notice, your licenses will be reinstated retroactively. However, if you violate these terms after such reinstatement, any additional violation of these terms will cause your licenses to terminate automatically and permanently. + +### No Liability + +***As far as the law allows, the software comes as is, without any warranty or condition, and the licensor will not be liable to you for any damages arising out of these terms or the use or nature of the software, under any kind of legal claim.*** + +### Definitions + +The **licensor** is the entity offering these terms, and the **software** is the software the licensor makes available under these terms, including any portion of it. + +**licensing mechanisms** refers to functionality that restricts use of the software based on whether you possess a valid license key, including functionality to validate license keys and audit usage of the software to ensure license compliance. + +**you** refers to the individual or entity agreeing to these terms. + +**your company** is any legal entity, sole proprietorship, or other kind of organization that you work for, plus all organizations that have control over, are under the control of, or are under common control with that organization. **control** means ownership of substantially all the assets of an entity, or the power to direct its management and policies by vote, contract, or otherwise. Control can be direct or indirect. + +**your licenses** are all the licenses granted to you for the software under these terms. + +**use** means anything you do with the software requiring one of your licenses. + +**trademark** means trademarks, service marks, and similar rights. diff --git a/LICENSE_CONTRIBUTIONS.md b/LICENSE_CONTRIBUTIONS.md new file mode 100644 index 00000000000..e21611f37cf --- /dev/null +++ b/LICENSE_CONTRIBUTIONS.md @@ -0,0 +1,32 @@ +KurrentDB code and EventStoreDB code after versions 24.6.x is generally subject to the terms of the Kurrent License v1 +(KLv1, see License.md), however, some parts of the code remain subject to the terms of the Event Store License, which +they were contributed under. + +# Event Store License + +Copyright (c) 2011-2024, Event Store Ltd. and Contributors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +Redistributions of source code must retain the above copyright notice, this +list of conditions and the following disclaimer. + +Redistributions in binary form must reproduce the above copyright notice, this +list of conditions and the following disclaimer in the documentation and/or +other materials provided with the distribution. + +Neither the name of Event Store Ltd nor the names of its contributors may be +used to endorse or promote products derived from this software without specific +prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/NOTICE.html b/NOTICE.html index 164ee3ebcb6..ce7f5af5dcc 100644 --- a/NOTICE.html +++ b/NOTICE.html @@ -15,6 +15,41 @@

Third-party software list

+ + AWSSDK.Core3.7.304 +
+ Apache-2.0 +
+ + + + AWSSDK.S33.7.307.32 +
+ Apache-2.0 +
+ + + + AWSSDK.SQS3.7.301.3 +
+ Apache-2.0 +
+ + + + AWSSDK.SecurityToken3.7.300.91 +
+ Apache-2.0 +
+ + + + Acornima1.1.0 +
+ BSD-3-Clause +
+ Adam Simon + BenchmarkDotNet.Annotations0.13.10
@@ -37,52 +72,60 @@

Third-party software list

Copyright (c) 2005 - 2020 Giacomo Stelluti Scala & Contributors - DotNext.IO5.4.0 + DotNext.IO5.18.2
MIT
Copyright © .NET Foundation and Contributors - DotNext.Threading5.4.0 + DotNext.Threading5.18.1
MIT
Copyright © .NET Foundation and Contributors - DotNext.Unsafe5.3.0 + DotNext.Unsafe5.18.0
MIT
Copyright © .NET Foundation and Contributors - DotNext5.3.1 + DotNext5.18.0
MIT
Copyright © .NET Foundation and Contributors - Esprima3.0.4 + EventStore.Plugins25.2.5
- BSD-3-Clause + Kurrent-1.0
- Sebastien Ros + - EventStore.Plugins24.10.0 + Extensions.MudBlazor.StaticInput3.2.0
- Apache-2.0 + MIT
+ Copyright 2024 Ophois + + + FluentStorage.AWS5.5.0
- BSD-3-Clause + MIT
+ Copyright (c) 2023 Robin Rodricks and FluentStorage Contributors + + + FluentStorage5.6.0
- OTHER-PERMISSIVE + MIT
- Copyright 2012-2024 Event Store Ltd + Copyright (c) 2023 Robin Rodricks and FluentStorage Contributors Gee.External.Capstone2.3.0 @@ -173,612 +216,1074 @@

Third-party software list

MIT
+ + + + JetBrains.Annotations2024.3.0 +
+ MIT +
Copyright (c) 2016-2024 JetBrains s.r.o. - Jint3.0.0 + Jint4.0.3
BSD-2-Clause
Sebastien Ros - Microsoft.Bcl.AsyncInterfaces6.0.0 + Markdig0.40.0
- MIT + BSD-2-Clause
- © Microsoft Corporation. All rights reserved. + Alexandre Mutel - Microsoft.CSharp4.7.0 + Microsoft.AspNetCore.Authentication.OpenIdConnect8.0.13
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.CodeAnalysis.Analyzers3.3.3 + Microsoft.AspNetCore.Authorization8.0.13
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.CodeAnalysis.Analyzers3.3.4 + Microsoft.AspNetCore.Components.Analyzers8.0.13
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.CodeAnalysis.CSharp4.1.0 + Microsoft.AspNetCore.Components.Authorization8.0.13
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.CodeAnalysis.CSharp4.7.0 + Microsoft.AspNetCore.Components.Forms8.0.13
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.CodeAnalysis.Common4.1.0 + Microsoft.AspNetCore.Components.Web8.0.13
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.CodeAnalysis.Common4.7.0 + Microsoft.AspNetCore.Components.WebAssembly.Authentication8.0.13
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.Data.Sqlite.Core8.0.0 + Microsoft.AspNetCore.Components.WebAssembly.Server8.0.13
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.Data.Sqlite8.0.0 + Microsoft.AspNetCore.Components.WebAssembly8.0.13
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.Diagnostics.NETCore.Client0.2.452401 + Microsoft.AspNetCore.Components8.0.13
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.Diagnostics.Runtime2.2.332302 + Microsoft.AspNetCore.Http.Abstractions2.3.0
- MIT + Apache-2.0
- Copyright (c) .NET Foundation and Contributors + © Microsoft Corporation. All rights reserved. - Microsoft.Diagnostics.Tracing.TraceEvent3.1.6 + Microsoft.AspNetCore.Http.Features2.3.0
- MIT + Apache-2.0
© Microsoft Corporation. All rights reserved. - Microsoft.DotNet.PlatformAbstractions3.1.6 + Microsoft.AspNetCore.Metadata8.0.13
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.Extensions.Configuration.Abstractions8.0.0 + Microsoft.Bcl.AsyncInterfaces6.0.0
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.Extensions.Configuration.Binder8.0.0 + Microsoft.Bcl.Memory9.0.0
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.Extensions.Configuration.EnvironmentVariables3.1.0 + Microsoft.CSharp4.7.0
- Apache-2.0 + MIT
© Microsoft Corporation. All rights reserved. - Microsoft.Extensions.Configuration.FileExtensions8.0.0 + Microsoft.CodeAnalysis.Analyzers3.3.3
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.Extensions.Configuration.Json8.0.0 + Microsoft.CodeAnalysis.Analyzers3.3.4
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.Extensions.Configuration8.0.0 + Microsoft.CodeAnalysis.CSharp4.1.0
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.Extensions.DependencyInjection.Abstractions8.0.0 + Microsoft.CodeAnalysis.CSharp4.7.0
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.Extensions.DependencyInjection8.0.0 + Microsoft.CodeAnalysis.Common4.1.0
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.Extensions.DependencyModel8.0.1 + Microsoft.CodeAnalysis.Common4.7.0
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.Extensions.FileProviders.Abstractions8.0.0 + Microsoft.Data.Sqlite.Core8.0.0
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.Extensions.FileProviders.Composite8.0.0 + Microsoft.Data.Sqlite8.0.0
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.Extensions.FileProviders.Embedded8.0.0 + Microsoft.Diagnostics.NETCore.Client0.2.553101
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.Extensions.FileProviders.Physical8.0.0 + Microsoft.Diagnostics.Runtime2.2.332302
- MIT + MIT
- © Microsoft Corporation. All rights reserved. + - Microsoft.Extensions.FileSystemGlobbing8.0.0 + Microsoft.Diagnostics.Tracing.TraceEvent3.1.19
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.Extensions.Hosting.Abstractions2.1.0 + Microsoft.DotNet.PlatformAbstractions3.1.6
- Apache-2.0 + MIT
© Microsoft Corporation. All rights reserved. - Microsoft.Extensions.Http6.0.0 + Microsoft.Extensions.Configuration.Abstractions8.0.0
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.Extensions.Logging.Abstractions8.0.0 + Microsoft.Extensions.Configuration.Abstractions9.0.0
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.Extensions.Logging.Configuration3.1.0 + Microsoft.Extensions.Configuration.Binder8.0.0
- Apache-2.0 + MIT
© Microsoft Corporation. All rights reserved. - Microsoft.Extensions.Logging8.0.0 + Microsoft.Extensions.Configuration.Binder8.0.2
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.Extensions.ObjectPool6.0.16 + Microsoft.Extensions.Configuration.Binder9.0.0
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.Extensions.Options.ConfigurationExtensions3.1.0 + Microsoft.Extensions.Configuration.FileExtensions8.0.0
- Apache-2.0 + MIT
© Microsoft Corporation. All rights reserved. - Microsoft.Extensions.Options8.0.0 + Microsoft.Extensions.Configuration.FileExtensions8.0.1
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.Extensions.Primitives8.0.0 + Microsoft.Extensions.Configuration.Json8.0.0
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.FASTER.Core1.9.16 + Microsoft.Extensions.Configuration.Json8.0.1
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.IdentityModel.Abstractions7.6.0 + Microsoft.Extensions.Configuration8.0.0
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.IdentityModel.JsonWebTokens7.6.0 + Microsoft.Extensions.Configuration9.0.0
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.IdentityModel.Logging7.6.0 + Microsoft.Extensions.DependencyInjection.Abstractions8.0.0
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.IdentityModel.Tokens7.6.0 + Microsoft.Extensions.DependencyInjection.Abstractions8.0.2
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.NETCore.Platforms1.1.0 + Microsoft.Extensions.DependencyInjection.Abstractions9.0.0
MIT
- © Microsoft Corporation. All rights reserved. + © Microsoft Corporation. All rights reserved. - Microsoft.NETCore.Platforms1.1.1 + Microsoft.Extensions.DependencyInjection8.0.0
MIT
- © Microsoft Corporation. All rights reserved. + © Microsoft Corporation. All rights reserved. - Microsoft.NETCore.Platforms2.0.0 + Microsoft.Extensions.DependencyInjection8.0.1
MIT
- © Microsoft Corporation. All rights reserved. + © Microsoft Corporation. All rights reserved. - Microsoft.NETCore.Platforms5.0.0 + Microsoft.Extensions.DependencyInjection9.0.0
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.NETCore.Targets1.1.3 + Microsoft.Extensions.DependencyModel8.0.1
MIT
- © Microsoft Corporation. All rights reserved. + © Microsoft Corporation. All rights reserved. - Microsoft.Net.Http.Headers8.0.0 + Microsoft.Extensions.DependencyModel8.0.2
MIT
© Microsoft Corporation. All rights reserved. - Microsoft.Win32.Registry4.4.0 + Microsoft.Extensions.Diagnostics.Abstractions9.0.0
MIT
- © Microsoft Corporation. All rights reserved. + © Microsoft Corporation. All rights reserved. - Microsoft.Win32.Registry5.0.0 + Microsoft.Extensions.FileProviders.Abstractions8.0.0
MIT
© Microsoft Corporation. All rights reserved. - Mono.Posix.NETStandard1.0.0 + Microsoft.Extensions.FileProviders.Abstractions9.0.0
- MIT + MIT
- + © Microsoft Corporation. All rights reserved. - NETStandard.Library2.0.3 + Microsoft.Extensions.FileProviders.Composite8.0.0
- MIT + MIT
- Copyright (c) .NET Foundation and Contributors + © Microsoft Corporation. All rights reserved. - Newtonsoft.Json13.0.3 + Microsoft.Extensions.FileProviders.Embedded8.0.0
MIT
- Copyright © James Newton-King 2008 + © Microsoft Corporation. All rights reserved. - OpenTelemetry.Api1.4.0-rc.1 + Microsoft.Extensions.FileProviders.Physical8.0.0
- Apache-2.0 + MIT
- Copyright The OpenTelemetry Authors + © Microsoft Corporation. All rights reserved. - OpenTelemetry.Exporter.Prometheus.AspNetCore1.4.0-rc.1 + Microsoft.Extensions.FileSystemGlobbing8.0.0
- Apache-2.0 + MIT
- Copyright The OpenTelemetry Authors + © Microsoft Corporation. All rights reserved. - OpenTelemetry.Extensions.DependencyInjection1.4.0-rc.1 + Microsoft.Extensions.Hosting.Abstractions9.0.0
- Apache-2.0 + MIT
- Copyright The OpenTelemetry Authors + © Microsoft Corporation. All rights reserved. - OpenTelemetry.Extensions.Hosting1.4.0-rc.1 + Microsoft.Extensions.Http6.0.0
- Apache-2.0 + MIT
- Copyright The OpenTelemetry Authors + © Microsoft Corporation. All rights reserved. - OpenTelemetry1.4.0-rc.1 + Microsoft.Extensions.Localization.Abstractions8.0.12
- Apache-2.0 + MIT
- Copyright The OpenTelemetry Authors + © Microsoft Corporation. All rights reserved. - Perfolizer0.2.1 + Microsoft.Extensions.Localization8.0.12
MIT
- Copyright (C) 2020 Andrey Akinshin + © Microsoft Corporation. All rights reserved. - Quickenshtein1.5.1 + Microsoft.Extensions.Logging.Abstractions8.0.0
MIT
- Copyright (c) 2020 James Turner + © Microsoft Corporation. All rights reserved. - SQLitePCLRaw.bundle_e_sqlite32.1.6 + Microsoft.Extensions.Logging.Abstractions8.0.3
- Apache-2.0 + MIT
- Copyright 2014-2023 SourceGear, LLC + © Microsoft Corporation. All rights reserved. - SQLitePCLRaw.core2.1.6 + Microsoft.Extensions.Logging.Abstractions9.0.0
- Apache-2.0 + MIT
- Copyright 2014-2023 SourceGear, LLC + © Microsoft Corporation. All rights reserved. - SQLitePCLRaw.lib.e_sqlite32.1.6 + Microsoft.Extensions.Logging.Configuration9.0.0
- Apache-2.0 + MIT
- Copyright 2014-2023 SourceGear, LLC + © Microsoft Corporation. All rights reserved. - SQLitePCLRaw.provider.e_sqlite32.1.6 + Microsoft.Extensions.Logging8.0.0
- Apache-2.0 + MIT
- Copyright 2014-2023 SourceGear, LLC + © Microsoft Corporation. All rights reserved. - Serilog.Enrichers.Process3.0.0 + Microsoft.Extensions.Logging8.0.1
- Apache-2.0 + MIT
- + © Microsoft Corporation. All rights reserved. - Serilog.Enrichers.Thread4.0.0 + Microsoft.Extensions.Logging9.0.0
- Apache-2.0 + MIT
- + © Microsoft Corporation. All rights reserved. - Serilog.Expressions5.0.0 + Microsoft.Extensions.ObjectPool6.0.16
- Apache-2.0 + MIT
- + © Microsoft Corporation. All rights reserved. - Serilog.Extensions.Logging8.0.0 + Microsoft.Extensions.Options.ConfigurationExtensions9.0.0
- Apache-2.0 + MIT
- + © Microsoft Corporation. All rights reserved. - Serilog.Settings.Configuration8.0.2 + Microsoft.Extensions.Options8.0.0
- Apache-2.0 + MIT
- + © Microsoft Corporation. All rights reserved. - Serilog.Sinks.Async2.0.0 + Microsoft.Extensions.Options8.0.2
- Apache-2.0 + MIT
- + © Microsoft Corporation. All rights reserved. - Serilog.Sinks.Console6.0.0 + Microsoft.Extensions.Options9.0.0
- Apache-2.0 + MIT
- + © Microsoft Corporation. All rights reserved. - Serilog.Sinks.File6.0.0 + Microsoft.Extensions.Primitives8.0.0
- Apache-2.0 + MIT
- + © Microsoft Corporation. All rights reserved. - Serilog4.0.1 + Microsoft.Extensions.Primitives9.0.0
- Apache-2.0 + MIT
- Copyright © 2013-24 Serilog Contributors + © Microsoft Corporation. All rights reserved. - SharpDotYaml.Extensions.Configuration0.3.0 + Microsoft.FASTER.Core1.9.16
MIT
- Copyright © 2023 Ville Penttinen + © Microsoft Corporation. All rights reserved. - System.Buffers4.5.1 + Microsoft.IO.RecyclableMemoryStream3.0.1
MIT
© Microsoft Corporation. All rights reserved. - System.CodeDom5.0.0 + Microsoft.IdentityModel.Abstractions7.6.0
MIT
© Microsoft Corporation. All rights reserved. - System.Collections.Immutable5.0.0 + Microsoft.IdentityModel.Abstractions8.6.0
MIT
© Microsoft Corporation. All rights reserved. - System.Collections.Immutable7.0.0 + Microsoft.IdentityModel.JsonWebTokens7.6.0
MIT
© Microsoft Corporation. All rights reserved. - System.ComponentModel.Composition8.0.0 + Microsoft.IdentityModel.JsonWebTokens8.6.0
MIT
© Microsoft Corporation. All rights reserved. - System.Configuration.ConfigurationManager8.0.0 + Microsoft.IdentityModel.Logging7.6.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + Microsoft.IdentityModel.Logging8.6.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + Microsoft.IdentityModel.Protocols.OpenIdConnect8.6.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + Microsoft.IdentityModel.Protocols8.6.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + Microsoft.IdentityModel.Tokens7.6.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + Microsoft.IdentityModel.Tokens8.6.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + Microsoft.JSInterop.WebAssembly8.0.13 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + Microsoft.JSInterop8.0.13 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + Microsoft.NET.ILLink.Tasks8.0.5 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + Microsoft.NET.Sdk.WebAssembly.Pack8.0.5 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + Microsoft.NETCore.Platforms1.1.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + Microsoft.NETCore.Platforms1.1.1 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + Microsoft.NETCore.Platforms5.0.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + Microsoft.NETCore.Targets1.1.3 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + Microsoft.Net.Http.Headers8.0.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + Microsoft.Win32.Registry5.0.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + Mono.Posix.NETStandard1.0.0 +
+ MIT +
+ + + + MudBlazor.Markdown8.0.0 +
+ MIT +
+ Copyright © 2024 MyNihongo + + + MudBlazor8.3.0 +
+ MIT +
+ Copyright 2024 MudBlazor + + + NETStandard.Library2.0.3 +
+ MIT +
+ + + + NetEscapades.Configuration.Yaml3.1.0 +
+ MIT +
+ + + + Newtonsoft.Json13.0.3 +
+ MIT +
+ Copyright © James Newton-King 2008 + + + OpenTelemetry.Api.ProviderBuilderExtensions1.11.2 +
+ Apache-2.0 +
+ Copyright The OpenTelemetry Authors + + + OpenTelemetry.Api1.11.2 +
+ Apache-2.0 +
+ Copyright The OpenTelemetry Authors + + + OpenTelemetry.Exporter.OpenTelemetryProtocol1.11.2 +
+ Apache-2.0 +
+ Copyright The OpenTelemetry Authors + + + OpenTelemetry.Exporter.Prometheus.AspNetCore1.11.2-beta.1 +
+ Apache-2.0 +
+ + + + OpenTelemetry.Extensions.Hosting1.11.2 +
+ Apache-2.0 +
+ Copyright The OpenTelemetry Authors + + + OpenTelemetry1.11.2 +
+ Apache-2.0 +
+ Copyright The OpenTelemetry Authors + + + Perfolizer0.2.1 +
+ MIT +
+ Copyright (C) 2020 Andrey Akinshin + + + Polly.Core8.5.2 +
+ BSD-3-Clause +
+ Copyright (c) 2025, App vNext + + + Quickenshtein1.5.1 +
+ MIT +
+ + + + RestSharp112.1.0 +
+ Apache-2.0 +
+ + + + SQLitePCLRaw.bundle_e_sqlite32.1.6 +
+ Apache-2.0 +
+ Copyright 2014-2023 SourceGear, LLC + + + SQLitePCLRaw.core2.1.6 +
+ Apache-2.0 +
+ Copyright 2014-2023 SourceGear, LLC + + + SQLitePCLRaw.lib.e_sqlite32.1.6 +
+ Apache-2.0 +
+ Copyright 2014-2023 SourceGear, LLC + + + SQLitePCLRaw.provider.e_sqlite32.1.6 +
+ Apache-2.0 +
+ Copyright 2014-2023 SourceGear, LLC + + + Scrutor5.0.2 +
+ MIT +
+ + + + Serilog.Enrichers.Process3.0.0 +
+ Apache-2.0 +
+ + + + Serilog.Enrichers.Thread4.0.0 +
+ Apache-2.0 +
+ + + + Serilog.Expressions5.0.0 +
+ Apache-2.0 +
+ + + + Serilog.Extensions.Logging8.0.0 +
+ Apache-2.0 +
+ + + + Serilog.Settings.Configuration8.0.2 +
+ Apache-2.0 +
+ + + + Serilog.Sinks.Async2.0.0 +
+ Apache-2.0 +
+ + + + Serilog.Sinks.Console6.0.0 +
+ Apache-2.0 +
+ + + + Serilog.Sinks.File6.0.0 +
+ Apache-2.0 +
+ + + + Serilog4.0.1 +
+ Apache-2.0 +
+ Copyright © 2013-24 Serilog Contributors + + + SharpDotYaml.Extensions.Configuration0.3.1 +
+ MIT +
+ Copyright © 2023-2024 Ville Penttinen + + + System.Buffers4.5.1 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + System.CodeDom5.0.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + System.Collections.Concurrent4.3.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + System.Collections.Immutable7.0.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + System.Collections.Immutable8.0.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + System.Collections4.3.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + System.ComponentModel.Composition8.0.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + System.Configuration.ConfigurationManager8.0.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + System.Diagnostics.Debug4.3.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + System.Diagnostics.DiagnosticSource8.0.1 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + System.Diagnostics.DiagnosticSource9.0.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + System.Diagnostics.DiagnosticSource9.0.1 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + System.Diagnostics.EventLog8.0.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + System.Diagnostics.PerformanceCounter8.0.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + System.Diagnostics.Tracing4.3.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + System.Formats.Asn18.0.1 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + System.Globalization.Calendars4.3.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + System.Globalization.Extensions4.3.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + System.Globalization4.3.0
MIT
- © Microsoft Corporation. All rights reserved. + © Microsoft Corporation. All rights reserved. - System.Diagnostics.DiagnosticSource8.0.1 + System.IO.FileSystem.Primitives4.3.0
MIT
- © Microsoft Corporation. All rights reserved. + © Microsoft Corporation. All rights reserved. - System.Diagnostics.EventLog8.0.0 + System.IO.FileSystem4.3.0
MIT
- © Microsoft Corporation. All rights reserved. + © Microsoft Corporation. All rights reserved. - System.Diagnostics.PerformanceCounter8.0.0 + System.IO.Hashing8.0.0
MIT
© Microsoft Corporation. All rights reserved. - System.Formats.Asn18.0.1 + System.IO.Pipelines8.0.0
MIT
© Microsoft Corporation. All rights reserved. - System.IO.Hashing8.0.0 + System.IO4.3.0
MIT
- © Microsoft Corporation. All rights reserved. + © Microsoft Corporation. All rights reserved. - System.IO.Pipelines8.0.0 + System.IdentityModel.Tokens.Jwt8.6.0
MIT
@@ -798,6 +1303,13 @@

Third-party software list

Copyright (c) .NET Foundation and Contributors. + + System.Linq4.3.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + System.Management5.0.0
@@ -826,6 +1338,20 @@

Third-party software list

© Microsoft Corporation. All rights reserved. + + System.Net.Http4.3.4 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + System.Net.Primitives4.3.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + System.Numerics.Vectors4.4.0
@@ -833,6 +1359,13 @@

Third-party software list

© Microsoft Corporation. All rights reserved. + + System.Private.Uri4.3.2 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + System.Reactive6.0.1
@@ -841,26 +1374,47 @@

Third-party software list

Copyright (c) .NET Foundation and Contributors. - System.Reflection.Metadata5.0.0 + System.Reflection.Metadata7.0.0
MIT
© Microsoft Corporation. All rights reserved. - System.Reflection.Metadata7.0.0 + System.Reflection.Metadata8.0.0
MIT
© Microsoft Corporation. All rights reserved. - System.Runtime.CompilerServices.Unsafe5.0.0 + System.Reflection.Primitives4.3.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + System.Reflection.TypeExtensions4.7.0
MIT
© Microsoft Corporation. All rights reserved. + + System.Reflection4.3.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + System.Resources.ResourceManager4.3.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + System.Runtime.CompilerServices.Unsafe6.0.0
@@ -869,19 +1423,40 @@

Third-party software list

© Microsoft Corporation. All rights reserved. - System.Runtime4.3.1 + System.Runtime.Extensions4.3.0
MIT
- © Microsoft Corporation. All rights reserved. + © Microsoft Corporation. All rights reserved. + + + System.Runtime.Handles4.3.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + System.Runtime.InteropServices4.3.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. - System.Security.AccessControl4.4.0 + System.Runtime.Numerics4.3.0
MIT
© Microsoft Corporation. All rights reserved. + + System.Runtime4.3.1 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + System.Security.AccessControl5.0.0
@@ -889,6 +1464,41 @@

Third-party software list

© Microsoft Corporation. All rights reserved. + + System.Security.Cryptography.Algorithms4.3.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + System.Security.Cryptography.Cng4.3.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + System.Security.Cryptography.Csp4.3.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + System.Security.Cryptography.Encoding4.3.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + System.Security.Cryptography.OpenSsl4.3.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + System.Security.Cryptography.Pkcs8.0.0
@@ -897,26 +1507,33 @@

Third-party software list

© Microsoft Corporation. All rights reserved. - System.Security.Cryptography.ProtectedData8.0.0 + System.Security.Cryptography.Primitives4.3.0
MIT
- © Microsoft Corporation. All rights reserved. + © Microsoft Corporation. All rights reserved. - System.Security.Cryptography.Xml8.0.0 + System.Security.Cryptography.ProtectedData8.0.0
MIT
© Microsoft Corporation. All rights reserved. - System.Security.Principal.Windows4.4.0 + System.Security.Cryptography.X509Certificates4.3.0
MIT
© Microsoft Corporation. All rights reserved. + + System.Security.Cryptography.Xml8.0.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + System.Security.Principal.Windows5.0.0
@@ -952,6 +1569,13 @@

Third-party software list

© Microsoft Corporation. All rights reserved. + + System.Text.Encoding4.3.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + System.Text.Encodings.Web8.0.0
@@ -960,7 +1584,7 @@

Third-party software list

© Microsoft Corporation. All rights reserved. - System.Text.Json8.0.4 + System.Text.Json8.0.5
MIT
@@ -974,7 +1598,7 @@

Third-party software list

© Microsoft Corporation. All rights reserved. - System.Threading.Channels8.0.0 + System.Threading.Channels9.0.1
MIT
@@ -987,6 +1611,41 @@

Third-party software list

© Microsoft Corporation. All rights reserved. + + System.Threading.Tasks4.3.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + System.Threading.Thread4.3.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + System.Threading4.3.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + TestableIO.System.IO.Abstractions.Wrappers21.0.22 +
+ MIT +
+ Copyright © Tatham Oddie & friends 2010-2024 + + + TestableIO.System.IO.Abstractions21.0.22 +
+ MIT +
+ Copyright © Tatham Oddie & friends 2010-2024 + ThisAssembly.Constants1.4.1
@@ -999,14 +1658,14 @@

Third-party software list

MIT
- Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Antoine Aubry and contributors + YamlDotnet13.7.1
MIT
- Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2014 Antoine Aubry and contributors + librdkafka.redist2.5.0 @@ -1015,12 +1674,117 @@

Third-party software list

Copyright 2012-2023 + + runtime.debian.8-x64.runtime.native.System.Security.Cryptography.OpenSsl4.3.2 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + runtime.fedora.23-x64.runtime.native.System.Security.Cryptography.OpenSsl4.3.2 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + runtime.fedora.24-x64.runtime.native.System.Security.Cryptography.OpenSsl4.3.2 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + runtime.native.System.Net.Http4.3.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + runtime.native.System.Security.Cryptography.Apple4.3.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + runtime.native.System.Security.Cryptography.OpenSsl4.3.2 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + runtime.native.System4.3.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + runtime.opensuse.13.2-x64.runtime.native.System.Security.Cryptography.OpenSsl4.3.2 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + runtime.opensuse.42.1-x64.runtime.native.System.Security.Cryptography.OpenSsl4.3.2 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + runtime.osx.10.10-x64.runtime.native.System.Security.Cryptography.Apple4.3.0 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + runtime.osx.10.10-x64.runtime.native.System.Security.Cryptography.OpenSsl4.3.2 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + runtime.rhel.7-x64.runtime.native.System.Security.Cryptography.OpenSsl4.3.2 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + runtime.ubuntu.14.04-x64.runtime.native.System.Security.Cryptography.OpenSsl4.3.2 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + runtime.ubuntu.16.04-x64.runtime.native.System.Security.Cryptography.OpenSsl4.3.2 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + + + runtime.ubuntu.16.10-x64.runtime.native.System.Security.Cryptography.OpenSsl4.3.2 +
+ MIT +
+ © Microsoft Corporation. All rights reserved. + system.uritemplate
MIT
- © Microsoft Corporation. All rights reserved. + diff --git a/README.md b/README.md index f6f1ffece56..ad5047299a0 100644 --- a/README.md +++ b/README.md @@ -1,112 +1,125 @@ -EventStoreDB - -EventStoreDB is the event-native database, where business events are immutably stored and streamed. Designed for event-sourced, event-driven, and microservices architectures - -- [What is EventStoreDB ](#what-is-eventstoredb) -- [What is EventStore Cloud ](#what-is-event-store-cloud) + + + + + Kurrent + + + +- [What is Kurrent](#what-is-kurrent) +- [What is KurrentDB](#what-is-kurrentdb) +- [What is Kurrent Cloud](#what-is-kurrent-cloud) - [Licensing](#licensing) - [Documentation](#docs) -- [Getting started with EventStoreDB ](#getting-started-with-eventstoredb) -- [Getting started with EventStore Cloud ](#getting-started-with-eventstoredb) +- [Getting started with KurrentDB](#getting-started-with-kurrentdb) +- [Getting started with Kurrent Cloud](#getting-started-with-kurrent-cloud) - [Client libraries](#client-libraries) - [Deployment](#deployment) - [Communities](#communities) - [Contributing](#contributing) -- [Building EventStoreDB](#building-eventstoredb) -- [Need help?](#need-help) +- [Building KurrentDB](#building-kurrentdb) +- [More resources](#more-resources) + +## What is Kurrent -## What is EventStoreDB +Event Store – the company and the product – are rebranding as Kurrent. -EventStoreDB is a new category of operational database that has evolved from the Event Sourcing community. Powered by the state-transition data model, events are stored with the context of why they have happened. Providing flexible, real-time data insights in the language your business understands. +- The flagship product will be referred to as “the Kurrent event-native data platform” or “the Kurrent platform” or simply “Kurrent" +- EventStoreDB will be referred to as KurrentDB +- Event Store Cloud will now be called Kurrent Cloud -

- -

+Read more about the rebrand in the [rebrand FAQ](https://www.kurrent.io/blog/kurrent-re-brand-faq). -Download the [latest version](https://www.eventstore.com/downloads). -For more product information visit [the website](https://www.eventstore.com/eventstoredb). +## What is KurrentDB -## What is Event Store Cloud? +KurrentDB is a database that's engineered for modern software applications and event-driven architectures. Its event-native design simplifies data modeling and preserves data integrity while the integrated streaming engine solves distributed messaging challenges and ensures data consistency. -Event Store Cloud is a fully managed cloud offering that's designed to make it easy for developers to build and run highly available and secure applications that incorporate EventStoreDB without having to worry about managing the underlying infrastructure. You can provision EventStoreDB clusters in AWS, Azure, and GCP, and connect these services securely to your own cloud resources. +Download the [latest version](https://kurrent.io/downloads). +For more product information visit [the website](https://kurrent.io/kurrent). -For more details visit [the website](https://www.eventstore.com/event-store-cloud). +## What is Kurrent Cloud? + + Kurrent Cloud is a fully managed cloud offering that's designed to make it easy for developers to build and run highly available and secure applications that incorporate KurrentDB without having to worry about managing the underlying infrastructure. You can provision KurrentDB clusters in AWS, Azure, and GCP, and connect these services securely to your own cloud resources. + +For more details visit [the website](https://kurrent.io/kurrent-cloud). ## Licensing -View [Event Store Ltd's licensing information](https://github.com/EventStore/EventStore/blob/master/LICENSE.md). +View [KurrentDB's licensing information](https://github.com/kurrent-io/KurrentDB/blob/master/LICENSE.md). ## Docs -For guidance on installation, development, deployment, and administration, see the [User Documentation](https://developers.eventstore.com/). +For guidance on installation, development, deployment, and administration, see the [User Documentation](https://docs.kurrent.io/). -## Getting started with EventStoreDB +## Getting started with KurrentDB -Follow the [getting started guide](https://developers.eventstore.com/latest.html). +Follow the [getting started guide](https://docs.kurrent.io/latest.html). -## Getting started with Event Store Cloud +## Getting started with Kurrent Cloud -Event Store can manage EventStoreDB for you, so you don't have to run your own clusters. -See the online documentation: [Getting started with Event Store Cloud](https://developers.eventstore.com/cloud/). +Kurrent can manage KurrentDB for you, so you don't have to run your own clusters. +See the online documentation: [Getting started with Kurrent Cloud](https://docs.kurrent.io/cloud/). ## Client libraries -This guide shows you how to get started with EventStoreDB by setting up an instance or cluster and configuring it. -EventStoreDB supports two protocols: gRPC and TCP(legacy). +[This guide](https://docs.kurrent.io/clients/grpc/getting-started.html) shows you how to get started with KurrentDB by setting up an instance or cluster and configuring it. +KurrentDB supports the gRPC protocol. -EventStoreDB supported gRPC clients +### KurrentDB supported clients - Python: [pyeventsourcing/esdbclient](https://pypi.org/project/esdbclient/) -- Node.js (javascript/typescript): [EventStore/EventStore-Client-NodeJS](https://github.com/EventStore/EventStore-Client-NodeJS) -- Java: [(EventStore/EventStoreDB-Client-Java](https://github.com/EventStore/EventStoreDB-Client-Java) -- .NET: [EventStore/EventStore-Client-Dotnet](https://github.com/EventStore/EventStore-Client-Dotnet) -- Go: [EventStore/EventStore-Client-Go](https://github.com/EventStore/EventStore-Client-Go) -- Rust: [EventStore/EventStoreDB-Client-Rust](https://github.com/EventStore/EventStoreDB-Client-Rust) -- Read more in the [gRPC clients documentation](https://developers.eventstore.com/clients/grpc) +- Node.js (javascript/typescript): [kurrent-io/KurrentDB-Client-NodeJS](https://github.com/kurrent-io/KurrentDB-Client-NodeJS) +- Java: [(kurrent-io/KurrentDB-Client-Java](https://github.com/kurrent-io/KurrentDB-Client-Java) +- .NET: [kurrent-io/EventStore-Client-Dotnet](https://github.com/kurrent-io/EventStore-Client-Dotnet) +- Go: [kurrent-io/KurrentDB-Client-Go](https://github.com/kurrent-io/KurrentDB-Client-Go) +- Rust: [kurrent-io/KurrentDB-Client-Rust](https://github.com/kurrent-io/KurrentDB-Client-Rust) +- Read more in the [gRPC clients documentation](https://docs.kurrent.io/clients/grpc) -Community supported gRPC clients +### Community supported clients - Elixir: [NFIBrokerage/spear](https://github.com/NFIBrokerage/spear) - Ruby: [yousty/event_store_client](https://github.com/yousty/event_store_client) -Read more in the [documentation](https://developers.eventstore.com/server/v22.10/#protocols-clients-and-sdks). +Read more in the [documentation](https://docs.kurrent.io/server/latest/quick-start/#protocols-clients-and-sdks). -Legacy TCP Clients (support ends with 23.10 LTS) +### Legacy clients (support ends with EventStoreDB v23.10 LTS) -- .Net: [EventStoreDB-Client-Dotnet-Legacy](https://github.com/EventStore/EventStoreDB-Client-Dotnet-Legacy) +- .NET: [EventStoreDB-Client-Dotnet-Legacy](https://github.com/kurrent-io/EventStoreDB-Client-Dotnet-Legacy) ## Deployment -- Event Store Cloud - [steps to get started in Cloud](https://developers.eventstore.com/cloud/). -- Self-managed - [steps to host EventStoreDB yourself](https://developers.eventstore.com/server/v22.10/#getting-started). +- Kurrent Cloud - [steps to get started in Kurrent Cloud](https://docs.kurrent.io/cloud/). +- Self-managed - [steps to host KurrentDB yourself](https://docs.kurrent.io/latest/quick-start/installation). ## Communities -- [Discuss](https://discuss.eventstore.com/) -- [Discord (Event Store)](https://discord.gg/Phn9pmCw3t) +[Join our global community](https://www.kurrent.io/community) of developers. + +- [Discuss](https://discuss.kurrent.io/) +- [Discord (Kurrent)](https://discord.gg/Phn9pmCw3t) - [Discord (ddd-cqrs-es)](https://discord.com/invite/sEZGSHNNbH) ## Contributing Development is done on the `master` branch. -We attempt to do our best to ensure that the history remains clean and to do so, we generally ask contributors to squash their commits into a set or single logical commit. +We attempt to do our best to ensure that the history remains clean and to do so, commits are automatically squashed into a single logical commit when pull requests are merged. -If you want to switch to a particular release, you can check out the release branch for that particular release. For example: -`git checkout release/oss-v22.10` +If you want to switch to a particular release, you can check out the release branch for that particular release. For example: +`git checkout release/v25.0` -- [Create an issue](https://github.com/EventStore/EventStore/issues) -- [Documentation](https://developers.eventstore.com/) -- [Contributing guide](https://github.com/EventStore/EventStore/blob/master/CONTRIBUTING.md) +- [Create an issue](https://github.com/kurrent-io/KurrentDB/issues) +- [Documentation](https://docs.kurrent.io/) +- [Contributing guide](CONTRIBUTING.md) -## Building EventStoreDB +## Building KurrentDB -EventStoreDB is written in a mixture of C# and JavaScript. It can run on Windows, Linux and macOS (using Docker) using the .NET Core runtime. +KurrentDB is written in a mixture of C# and JavaScript. It can run on Windows, Linux and macOS (using Docker) using the .NET Core runtime. **Prerequisites** -- [.NET Core SDK 8.0](https://dotnet.microsoft.com/download/dotnet/8.0) +- [.NET SDK 8.0](https://dotnet.microsoft.com/download/dotnet/8.0) -Once you've installed the prerequisites for your system, you can launch a `Release` build of EventStore as follows: +Once you've installed the prerequisites for your system, you can launch a `Release` build of KurrentDB as follows: ``` dotnet build -c Release src @@ -117,7 +130,7 @@ The build scripts: `build.sh` and `build.ps1` are also available for Linux and W To start a single node, you can then run: ``` -dotnet ./src/EventStore.ClusterNode/bin/x64/Release/net8.0/EventStore.ClusterNode.dll --dev --db ./tmp/data --index ./tmp/index --log ./tmp/log +dotnet ./src/KurrentDB/bin/Release/net8.0/KurrentDB.dll --dev --db ./tmp/data --index ./tmp/index --log ./tmp/log ``` ### Running the tests @@ -125,15 +138,15 @@ dotnet ./src/EventStore.ClusterNode/bin/x64/Release/net8.0/EventStore.ClusterNod You can launch the tests as follows: ``` -dotnet test src/EventStore.sln +dotnet test src/KurrentDB.sln ``` -### Build EventStoreDB Docker image +### Build KurrentDB Docker image You can also build a Docker image by running the command: ``` -docker build --tag myeventstore . \ +docker build --tag mykurrentdb . \ --build-arg CONTAINER_RUNTIME={container-runtime} --build-arg RUNTIME={runtime} ``` @@ -141,7 +154,7 @@ docker build --tag myeventstore . \ For instance: ``` -docker build --tag myeventstore . \ +docker build --tag mykurrentdb . \ --build-arg CONTAINER_RUNTIME=bookworm-slim \ --build-arg RUNTIME=linux-x64 ``` @@ -149,7 +162,7 @@ docker build --tag myeventstore . \ **_Note:_** Because of the [Docker issue](https://github.com/moby/buildkit/issues/1900), if you're building a Docker image on Windows, you may need to set the `DOCKER_BUILDKIT=0` environment variable. For instance, running in PowerShell: ``` -$env:DOCKER_BUILDKIT=0; docker build --tag myeventstore . ` +$env:DOCKER_BUILDKIT=0; docker build --tag mykurrentdb . ` --build-arg CONTAINER_RUNTIME=bookworm-slim ` --build-arg RUNTIME=linux-x64 ``` @@ -174,13 +187,13 @@ Currently, we support the following configurations: You can verify the built image by running: ``` -docker run --rm myeventstore --insecure --what-if +docker run --rm mykurrentdb --insecure --what-if ``` -## Need help? +## More resources -- [Release notes ](https://www.eventstore.com/blog/release-notes) -- [Beginners Guide to Event Sourcing](https://www.eventstore.com/event-sourcing) -- [Articles](https://www.eventstore.com/blog) -- [Webinars ](https://www.eventstore.com/webinars) -- [Contact us](https://www.eventstore.com/contact) +- [Release notes](https://docs.kurrent.io/server/latest/release-schedule/release-notes.html) +- [Beginners Guide to Event Sourcing](https://kurrent.io/event-sourcing) +- [Articles](https://kurrent.io/blog) +- [Webinars](https://kurrent.io/webinars) +- [Contact us](https://kurrent.io/contact) diff --git a/SECURITY.md b/SECURITY.md index 27237b3b2b3..5bd160867ea 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -1,3 +1,3 @@ If you find a vulnerability in our software, please contact us. -You can find how to reach out us and report it at https://www.eventstore.com/security#security +You can find how to reach out us and report it at https://www.kurrent.io/security#security Thank you very much for supporting our software. diff --git a/build.cmd b/build.cmd index 75284ca55d0..1054e205086 100644 --- a/build.cmd +++ b/build.cmd @@ -12,8 +12,7 @@ exit /B %errorlevel% :help echo Usage: -echo build.cmd ^[-Version=0.0.0.0^] ^[-Configuration=Debug^|Release^] ^[-BuildUI=yes^|no^] +echo build.cmd ^[-Version=0.0.0.0^] ^[-Configuration=Debug^|Release^] echo. echo Prerequisites: -echo Building EventStore database requires .NET Core SDK 5.0.4 -echo Building the UI requires Node.js (v8.11.4+) +echo Building KurrentDB database requires .NET Core SDK 8.0 diff --git a/build.ps1 b/build.ps1 index fdc5234cafc..41eb7a5a5c8 100644 --- a/build.ps1 +++ b/build.ps1 @@ -5,9 +5,6 @@ Param( [Parameter(HelpMessage="Configuration (Debug, Release)")] [ValidateSet("Debug","Release")] [string]$Configuration = "Release", - [Parameter(HelpMessage="Build UI (yes,no)")] - [ValidateSet("yes","no")] - [string]$BuildUI = "no", [Parameter(HelpMessage="Run Tests (yes,no)")] [ValidateSet("yes","no")] [string]$RunTests = "no" @@ -45,11 +42,7 @@ Function Start-Build{ $baseDirectory = $PSScriptRoot $srcDirectory = Join-Path $baseDirectory "src" $binDirectory = Join-Path $baseDirectory "bin" - $libsDirectory = Join-Path $srcDirectory "libs" - $eventStoreSolution = Join-Path $srcDirectory "EventStore.sln" - - $uiSrcDirectory = Join-Path $srcDirectory "EventStore.UI\" - $uiDistDirectory = Join-Path $srcDirectory "EventStore.ClusterNode.Web\clusternode-web\" + $kurrentDbSolution = Join-Path $srcDirectory "KurrentDB.sln" Write-Info "Build Configuration" Write-Info "-------------------" @@ -57,34 +50,14 @@ Function Start-Build{ Write-Info "Version: $Version" Write-Info "Platform: $platform" Write-Info "Configuration: $Configuration" - Write-Info "Build UI: $BuildUI" Write-Info "Run Tests: $RunTests" - #Build Event Store UI - if ($BuildUI -eq "yes") { - #Build the UI - if (Test-Path $uiDistDirectory) { - Remove-Item -Recurse -Force $uiDistDirectory - } - Push-Location $uiSrcDirectory - if(-Not (Test-Path (Join-Path $uiSrcDirectory "package.json"))) { - Exec { git submodule update --init ./ } - } - Exec { npm install bower@~1.8.14 -g } - Exec { bower install --allow-root } - Exec { npm install gulp-cli -g } - Exec { npm install } - Exec { gulp dist } - Exec { mv es-dist $uiDistDirectory } - Pop-Location - } - - #Build Event Store (Patch AssemblyInfo, Build, Revert AssemblyInfo) + #Build KurrentDB (Patch AssemblyInfo, Build, Revert AssemblyInfo) Remove-Item -Force -Recurse $binDirectory -ErrorAction SilentlyContinue > $null - $versionInfoFile = Resolve-Path (Join-Path $srcDirectory (Join-Path "EventStore.Common" (Join-Path "Utils" "VersionInfo.cs"))) -Relative + $versionInfoFile = Resolve-Path (Join-Path $srcDirectory (Join-Path "KurrentDB.Common" (Join-Path "Utils" "VersionInfo.cs"))) -Relative try { - Exec { dotnet build -c $configuration /p:Version=$Version /p:Platform=x64 $eventStoreSolution } + Exec { dotnet build -c $configuration /p:Version=$Version /p:Platform=x64 $kurrentDbSolution } } finally { Write-Info "Reverting $versionInfoFile to original state." & { git checkout --quiet $versionInfoFile } diff --git a/build.sh b/build.sh index 163c7f4ab63..ba5e4eb3beb 100755 --- a/build.sh +++ b/build.sh @@ -1,28 +1,25 @@ #!/usr/bin/env bash BASE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -PRODUCTNAME="Event Store Open Source" -COMPANYNAME="Event Store Ltd" -COPYRIGHT="Copyright 2021 Event Store Ltd. All rights reserved." +PRODUCTNAME="KurrentDB" +COMPANYNAME="Kurrent, Inc" +COPYRIGHT="Copyright 2025 Kurrent, Inc. All rights reserved." # ------------ End of configuration ------------- CONFIGURATION="Release" -BUILD_UI="no" NET_FRAMEWORK="net8.0" function usage() { cat <] [] [] + $0 [] [] -version: EventStore build version. Versions must be complete four part identifiers valid for use on a .NET assembly. +version: KurrentDB build version. Versions must be complete four part identifiers valid for use on a .NET assembly. configuration: Build configuration. Valid configurations are: Debug, Release -build_ui: Whether or not to build the EventStore UI. Building the UI requires an installation of Node.js (v8.11.4+) - EOF exit 1 } @@ -49,9 +46,8 @@ function checkParams() { version=$1 configuration=$2 - build_ui=$3 - [[ $# -gt 3 ]] && usage + [[ $# -gt 2 ]] && usage if [[ "$version" == "" ]] ; then VERSIONSTRING="0.0.0.0" @@ -73,19 +69,6 @@ function checkParams() { usage fi fi - - if [[ "$build_ui" == "" ]]; then - BUILD_UI="no" - echo "Build UI defaulted to: $BUILD_UI" - else - if [[ "$build_ui" == "yes" || "$build_ui" == "no" ]]; then - BUILD_UI=$build_ui - echo "Build UI set to: $BUILD_UI" - else - echo "Invalid Build UI value: $build_ui" - usage - fi - fi } function revertVersionInfo() { @@ -135,32 +118,10 @@ function patchVersionInfo { done } -function buildUI { - if [[ "$BUILD_UI" != "yes" ]] ; then - echo "Skipping UI Build" - return - fi - - rm -rf src/EventStore.ClusterNode.Web/clusternode-web/ - pushd src/EventStore.UI - - if [ ! -f ./package.json ]; then - git submodule update --init ./ - fi - - npm install bower@~1.8.14 -g - bower install --allow-root - npm install gulp-cli -g - npm install - gulp dist - mv es-dist ../EventStore.ClusterNode.Web/clusternode-web/ - popd -} - -function buildEventStore { +function buildKurrentDB { patchVersionInfo rm -rf bin/ - dotnet build -c $CONFIGURATION /p:Platform=x64 /p:Version=$VERSIONSTRING --framework=$NET_FRAMEWORK src/EventStore.sln || err + dotnet build -c $CONFIGURATION /p:Platform=x64 /p:Version=$VERSIONSTRING --framework=$NET_FRAMEWORK src/KurrentDB.sln || err revertVersionInfo } @@ -170,8 +131,7 @@ function exitWithError { } detectOS -checkParams "$1" "$2" "$3" +checkParams "$1" "$2" echo "Running from base directory: $BASE_DIR" -buildUI -buildEventStore +buildKurrentDB diff --git a/ci/README.md b/ci/README.md deleted file mode 100644 index 13024280cfa..00000000000 --- a/ci/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# CI scripts -This directory contains scripts used by our CI pipeline - -## Docker images -The following docker image repositories are used for linux builds: -CentOS builds: https://github.com/EventStore/eventstore-ci-centos7 -Ubuntu builds: https://github.com/EventStore/eventstore-ci-ubuntu-14.04 - -These images are hosted at https://hub.docker.com/r/eventstore/ \ No newline at end of file diff --git a/ci/ap-build-linux.sh b/ci/ap-build-linux.sh deleted file mode 100755 index a5761f64a14..00000000000 --- a/ci/ap-build-linux.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit -set -o pipefail -set -o xtrace - -dotnet build -c $EventStoreBuildConfig src/EventStore.sln \ No newline at end of file diff --git a/ci/ap-setup-osx.sh b/ci/ap-setup-osx.sh deleted file mode 100755 index b667bb8753c..00000000000 --- a/ci/ap-setup-osx.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit -set -o pipefail -set -o xtrace - -# Install Mono -curl https://download.mono-project.com/archive/5.16.0/macos-10-universal/MonoFramework-MDK-5.16.0.220.macos10.xamarin.universal.pkg -o mono.pkg -sudo installer -store -pkg mono.pkg -target / - -# Print the versions of mono and dotnet which are on PATH -mono --version -dotnet --version - -# Ensure the "first run experience" isn't co-mingled with build output but does get -# run. It appears that if it is _not_ run, builds fail. -cd "${TMPDIR}" && dotnet new \ No newline at end of file diff --git a/ci/ap-test-linux.sh b/ci/ap-test-linux.sh deleted file mode 100755 index df640d3d3bb..00000000000 --- a/ci/ap-test-linux.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit -set -o pipefail -set -o xtrace - -find ./src -maxdepth 1 -type d -name '*.Tests' -print0| xargs -0 -n1 dotnet test -v normal -c $EventStoreBuildConfig --logger trx \ No newline at end of file diff --git a/ci/ci.runsettings b/ci/ci.runsettings index 01ecc95a28a..7bc9c1c6fc7 100644 --- a/ci/ci.runsettings +++ b/ci/ci.runsettings @@ -6,6 +6,6 @@ 2147483647 - FullyQualifiedName!~EventStore.Core.Tests.LogFormat+V3 + (FullyQualifiedName!~EventStore.Core.Tests.LogFormat+V3) & (FullyQualifiedName!~KurrentDB.Core.Tests.LogFormat+V3) diff --git a/docker-compose.yml b/docker-compose.yml index a80c992566c..8c9c7ca3608 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -24,74 +24,83 @@ services: - volumes-provisioner esdb-node1: - build: ./ + build: + context: . + args: + RUNTIME: ${RUNTIME} env_file: - shared.env environment: - - EVENTSTORE_GOSSIP_SEED=172.30.240.12:2113,172.30.240.13:2113 - - EVENTSTORE_INT_IP=172.30.240.11 - - EVENTSTORE_CERTIFICATE_FILE=/etc/eventstore/certs/node1/node.crt - - EVENTSTORE_CERTIFICATE_PRIVATE_KEY_FILE=/etc/eventstore/certs/node1/node.key - - EVENTSTORE_ADVERTISE_HOST_TO_CLIENT_AS=127.0.0.1 - - EVENTSTORE_ADVERTISE_HTTP_PORT_TO_CLIENT_AS=2111 + - KURRENTDB_GOSSIP_SEED=172.30.240.12:2113,172.30.240.13:2113 + - KURRENTDB_REPLICATION_IP=172.30.240.11 + - KURRENTDB_CERTIFICATE_FILE=/etc/kurrentdb/certs/node1/node.crt + - KURRENTDB_CERTIFICATE_PRIVATE_KEY_FILE=/etc/kurrentdb/certs/node1/node.key + - KURRENTDB_ADVERTISE_HOST_TO_CLIENT_AS=127.0.0.1 + - KURRENTDB_ADVERTISE_NODE_PORT_TO_CLIENT_AS=2111 ports: - 2111:2113 networks: clusternetwork: ipv4_address: 172.30.240.11 volumes: - - ./certs:/etc/eventstore/certs + - ./certs:/etc/kurrentdb/certs restart: unless-stopped depends_on: - cert-gen esdb-node2: - build: ./ + build: + context: . + args: + RUNTIME: ${RUNTIME} env_file: - shared.env environment: - - EVENTSTORE_GOSSIP_SEED=172.30.240.11:2113,172.30.240.13:2113 - - EVENTSTORE_INT_IP=172.30.240.12 - - EVENTSTORE_CERTIFICATE_FILE=/etc/eventstore/certs/node2/node.crt - - EVENTSTORE_CERTIFICATE_PRIVATE_KEY_FILE=/etc/eventstore/certs/node2/node.key - - EVENTSTORE_ADVERTISE_HOST_TO_CLIENT_AS=127.0.0.1 - - EVENTSTORE_ADVERTISE_HTTP_PORT_TO_CLIENT_AS=2112 + - KURRENTDB_GOSSIP_SEED=172.30.240.11:2113,172.30.240.13:2113 + - KURRENTDB_REPLICATION_IP=172.30.240.12 + - KURRENTDB_CERTIFICATE_FILE=/etc/kurrentdb/certs/node2/node.crt + - KURRENTDB_CERTIFICATE_PRIVATE_KEY_FILE=/etc/kurrentdb/certs/node2/node.key + - KURRENTDB_ADVERTISE_HOST_TO_CLIENT_AS=127.0.0.1 + - KURRENTDB_ADVERTISE_NODE_PORT_TO_CLIENT_AS=2112 ports: - 2112:2113 networks: clusternetwork: ipv4_address: 172.30.240.12 volumes: - - ./certs:/etc/eventstore/certs + - ./certs:/etc/kurrentdb/certs restart: unless-stopped depends_on: - cert-gen esdb-node3: - build: ./ + build: + context: . + args: + RUNTIME: ${RUNTIME} env_file: - shared.env environment: - - EVENTSTORE_GOSSIP_SEED=172.30.240.11:2113,172.30.240.12:2113 - - EVENTSTORE_INT_IP=172.30.240.13 - - EVENTSTORE_CERTIFICATE_FILE=/etc/eventstore/certs/node3/node.crt - - EVENTSTORE_CERTIFICATE_PRIVATE_KEY_FILE=/etc/eventstore/certs/node3/node.key - - EVENTSTORE_ADVERTISE_HOST_TO_CLIENT_AS=127.0.0.1 - - EVENTSTORE_ADVERTISE_HTTP_PORT_TO_CLIENT_AS=2113 + - KURRENTDB_GOSSIP_SEED=172.30.240.11:2113,172.30.240.12:2113 + - KURRENTDB_REPLICATION_IP=172.30.240.13 + - KURRENTDB_CERTIFICATE_FILE=/etc/kurrentdb/certs/node3/node.crt + - KURRENTDB_CERTIFICATE_PRIVATE_KEY_FILE=/etc/kurrentdb/certs/node3/node.key + - KURRENTDB_ADVERTISE_HOST_TO_CLIENT_AS=127.0.0.1 + - KURRENTDB_ADVERTISE_NODE_PORT_TO_CLIENT_AS=2113 ports: - 2113:2113 networks: clusternetwork: ipv4_address: 172.30.240.13 volumes: - - ./certs:/etc/eventstore/certs + - ./certs:/etc/kurrentdb/certs restart: unless-stopped depends_on: - cert-gen networks: clusternetwork: - name: eventstoredb.local + name: kurrentdb.local driver: bridge ipam: driver: default diff --git a/docker/build.sh b/docker/build.sh new file mode 100755 index 00000000000..e8d058e800b --- /dev/null +++ b/docker/build.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env sh + +set -e +set -x + +source_directory=$1 +output_directory=$2 + +tests=$(find "$source_directory" -maxdepth 1 -type d -name "*.Tests") + +# Publish tests +for test in $tests; do + echo Publishing tests for $test + + dotnet publish \ + --runtime="$RUNTIME" \ + --no-self-contained \ + --configuration Release \ + --output "$output_directory/$(basename "$test")" \ + "$test" +done + +cp "$source_directory"/*.sln "$output_directory" diff --git a/docker/test.sh b/docker/test.sh new file mode 100755 index 00000000000..bf568e03d7e --- /dev/null +++ b/docker/test.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env sh +set -e + +tests_directory=/build/published-tests +settings=/build/ci/ci.runsettings +output_directory=/build/test-results + +tests=$(find "$tests_directory" -maxdepth 1 -type d -name "*.Tests") + +for test in $tests; do + proj=$(basename "$test") + + dotnet test \ + --blame \ + --blame-hang-timeout 5min \ + --settings "$settings" \ + --logger:"GitHubActions;report-warnings=false" \ + --logger:html \ + --logger:trx \ + --logger:"console;verbosity=normal" \ + --results-directory "$output_directory/$proj" "$test/$proj.dll" + + html_files=$(find "$output_directory/$proj" -name "*.html") + + for html in $html_files; do + cat "$html" > "$output_directory/test-results.html" + done +done diff --git a/docs/.eslintignore b/docs/.eslintignore new file mode 100644 index 00000000000..f4e822b6f58 --- /dev/null +++ b/docs/.eslintignore @@ -0,0 +1,6 @@ +!.vuepress/ +!.*.js +.cache/ +.temp/ +node_modules/ +dist/ \ No newline at end of file diff --git a/docs/.eslintrc.cjs b/docs/.eslintrc.cjs new file mode 100644 index 00000000000..92de49d892a --- /dev/null +++ b/docs/.eslintrc.cjs @@ -0,0 +1,13 @@ +module.exports = { + root: true, + extends: 'vuepress', + overrides: [ + { + files: ['*.ts', '*.vue'], + extends: 'vuepress-typescript', + parserOptions: { + project: ['tsconfig.json'], + }, + }, + ], +} \ No newline at end of file diff --git a/docs/.node-version b/docs/.node-version new file mode 100644 index 00000000000..2edeafb09db --- /dev/null +++ b/docs/.node-version @@ -0,0 +1 @@ +20 \ No newline at end of file diff --git a/docs/.vuepress/client.ts b/docs/.vuepress/client.ts new file mode 100644 index 00000000000..3c187a0c265 --- /dev/null +++ b/docs/.vuepress/client.ts @@ -0,0 +1,18 @@ +import type {Router} from "vue-router"; +import {defineClientConfig} from 'vuepress/client'; +import CloudBanner from "./components/CloudBanner.vue"; + +interface ClientConfig { + enhance?: (context: { + app: any; + router: Router; + siteData: any; + }) => void | Promise; + setup?: () => void; +} + +export default defineClientConfig({ + enhance({app, router, siteData}) { + app.component("CloudBanner", CloudBanner); + } +} satisfies ClientConfig); \ No newline at end of file diff --git a/docs/.vuepress/components/CloudBanner.vue b/docs/.vuepress/components/CloudBanner.vue new file mode 100644 index 00000000000..87053f9303a --- /dev/null +++ b/docs/.vuepress/components/CloudBanner.vue @@ -0,0 +1,17 @@ + \ No newline at end of file diff --git a/docs/.vuepress/config.ts b/docs/.vuepress/config.ts index 85fe5bd6a0e..55bc1a1f139 100644 --- a/docs/.vuepress/config.ts +++ b/docs/.vuepress/config.ts @@ -1,19 +1,22 @@ +import { dl } from "@mdit/plugin-dl"; +import viteBundler from "@vuepress/bundler-vite"; +import vueDevTools from 'vite-plugin-vue-devtools' import {defineUserConfig} from "vuepress"; -import containers from "./lib/containers"; -import {importCodePlugin} from "./markdown/xode/importCodePlugin"; +import {fs} from "vuepress/utils"; +import {hopeTheme} from "vuepress-theme-hope"; import {resolveMultiSamplesPath} from "./lib/samples"; import {linkCheckPlugin} from "./markdown/linkCheck"; import {replaceLinkPlugin} from "./markdown/replaceLink"; -import viteBundler from "@vuepress/bundler-vite"; -import {defaultTheme} from "@vuepress/theme-default"; -import {containerPlugin} from "@vuepress/plugin-container"; +import {importCodePlugin} from "./markdown/xode/importCodePlugin"; const projectionSamplesPath = "https://raw.githubusercontent.com/EventStore/EventStore/53f84e55ea56ccfb981aff0e432581d72c23fbf6/samples/http-api/data/"; export default defineUserConfig({ - title: "EventStoreDB Documentation", - description: "The stream database built for Event Sourcing", - bundler: viteBundler(), + base: "/", + dest: "public", + title: "KurrentDB Docs", + description: "Event-native database", + bundler: viteBundler({viteOptions: {plugins: [vueDevTools(),],}}), markdown: {importCode: false}, extendsMarkdown: md => { md.use(importCodePlugin, { @@ -22,43 +25,60 @@ export default defineUserConfig({ md.use(linkCheckPlugin); md.use(replaceLinkPlugin, { replaceLink: (link: string, _) => link - .replace("@server", "") + .replace("@server", "/server") .replace("@clients/http-api/", "/http-api/") .replace("@clients/httpapi/", "/http-api/") .replace("@httpapi/data/", projectionSamplesPath) .replace("@httpapi", "/http-api") }); + md.use(dl); }, - theme: defaultTheme({ - sidebarDepth: 2, + theme: hopeTheme({ + logo: "/eventstore-dev-logo-dark.svg", + logoDark: "/eventstore-logo-alt.svg", docsDir: ".", + toc: true, sidebar: { - "/": require("../sidebar"), - "/http-api/": require("../http-api/sidebar") + "/server/": "structure", + "/http-api/": "structure" }, navbar: [ { text: "Server", - link: "/", + link: "/server/quick-start/", }, { text: "HTTP API", link: "/http-api/" } - ] + ], + markdown: { + figure: true, + imgLazyload: true, + imgMark: true, + imgSize: true, + tabs: true, + codeTabs: true, + component: true, + mermaid: true, + highlighter: { + type: "shiki", + themes: { + light: "one-light", + dark: "one-dark-pro", + } + } + }, + plugins: { + search: {}, + sitemap:{ + devServer: process.env.NODE_ENV === 'development', + modifyTimeGetter: (page, app) => + fs.statSync(app.dir.source(page.filePathRelative!)).mtime.toISOString() + }, + components: { + components: ["Badge", "VPBanner", "VPCard", "VidStack"] + }, + } }), - plugins: [ - containers("tabs", "TabView", type => `${type ? ` type='${type}'` : ""}`), - containers("tab", "TabPanel", label => `header="${label}"`), - containerPlugin( { - type: "note", - before: title => `

${title === "" ? "NOTE" : title}

`, - after: _ => `
` - }), - containerPlugin ({ - type: "card", - before: _ => `` - }), - ], }); diff --git a/docs/.vuepress/lib/containers.ts b/docs/.vuepress/lib/containers.ts deleted file mode 100644 index 131487df955..00000000000 --- a/docs/.vuepress/lib/containers.ts +++ /dev/null @@ -1,13 +0,0 @@ -import {containerPlugin} from "@vuepress/plugin-container"; -import type {Plugin} from "vuepress"; - -type getAttr = (t: string) => string; - -export default function (name: string, tag: string, attr: getAttr): Plugin { - return containerPlugin( - { - type: name, - before: x => `<${tag}${attr ? " " + attr(x) : ""}>`, - after: _ => ``, - }); -} diff --git a/docs/.vuepress/lib/log.ts b/docs/.vuepress/lib/log.ts new file mode 100644 index 00000000000..92b3140b2b9 --- /dev/null +++ b/docs/.vuepress/lib/log.ts @@ -0,0 +1,12 @@ +export default { + error(message: string) { + console.log("\x1b[41m%s\x1b[0m", ' ERROR ', `${message}\n`) + process.exit(0) + }, + info(message: string) { + console.log("\x1b[44m%s\x1b[0m", ' INFO ', `${message}\n`) + }, + success(message: string) { + console.log("\x1b[42m\x1b[30m%s\x1b[0m", ' DONE ', `${message}\n`) + } +} diff --git a/docs/.vuepress/lib/samples.ts b/docs/.vuepress/lib/samples.ts index 96ed0008bfd..c0ce2a66567 100644 --- a/docs/.vuepress/lib/samples.ts +++ b/docs/.vuepress/lib/samples.ts @@ -1,6 +1,6 @@ import {type ResolvedImport} from "../markdown/xode/types"; import version from "./version"; -import {path} from "@vuepress/utils"; +import {path} from "vuepress/utils"; const base = "../../samples"; diff --git a/docs/.vuepress/lib/types.ts b/docs/.vuepress/lib/types.ts new file mode 100644 index 00000000000..3520540782e --- /dev/null +++ b/docs/.vuepress/lib/types.ts @@ -0,0 +1,18 @@ +import type {NavItemOptions, SidebarLinkOptions} from "vuepress-theme-hope"; + +export interface EsSidebarGroupOptions extends NavItemOptions { + group?: string; + collapsible?: boolean; + title?: string; + version?: string; + prefix?: string; + link?: string; + children: EsSidebarItemOptions[] | string[]; +} + +export type EsSidebarItemOptions = SidebarLinkOptions | EsSidebarGroupOptions | string; +export type EsSidebarArrayOptions = EsSidebarItemOptions[]; +export type EsSidebarObjectOptions = Record; +export type EsSidebarOptions = EsSidebarArrayOptions | EsSidebarObjectOptions; + +export type ImportedSidebarArrayOptions = EsSidebarGroupOptions[]; \ No newline at end of file diff --git a/docs/.vuepress/lib/version.ts b/docs/.vuepress/lib/version.ts index 6a950a90d3a..92fcbf59dc1 100644 --- a/docs/.vuepress/lib/version.ts +++ b/docs/.vuepress/lib/version.ts @@ -1,5 +1,15 @@ -const versionRegex = /(v?)((\d+\.)?(\d+\.)?(\*|\d+))/; -export default { +const versionRegex = /v((\d+\.)?(\d+\.)?(\*|\d+))/; +const nightly = "nightly"; +const v = { isVersion: (v: string) => versionRegex.test(v), - parseVersion: (v: string) => versionRegex.exec(v) -} + parseVersion: (v: string) => versionRegex.exec(v), + getVersion: (path: string): string | undefined => { + if (path.includes(nightly)) { + return nightly; + } + const ref = path.split("#")[0]; + const split = ref.split("/"); + return split.find(x => v.isVersion(x)); + } +}; +export default v; diff --git a/docs/.vuepress/lib/versioning.ts b/docs/.vuepress/lib/versioning.ts new file mode 100644 index 00000000000..ea5eb033444 --- /dev/null +++ b/docs/.vuepress/lib/versioning.ts @@ -0,0 +1,135 @@ +import * as fs from "fs"; +import {path} from 'vuepress/utils'; +import log from "./log"; +import {createRequire} from 'node:module'; +// import references from "../versions.json"; +import type { + EsSidebarGroupOptions, EsSidebarObjectOptions, + ImportedSidebarArrayOptions +} from "./types"; + +interface VersionDetail { + version: string, + path: string, + startPage: string +} + +interface Version { + id: string, + group: string, + basePath: string, + versions: VersionDetail[] +} + +const createSidebarItem = (item: EsSidebarGroupOptions, path: string, version: string, group: string): EsSidebarGroupOptions => { + const xp = `/${path}/`; + let ch = item.children as string[]; + if (item.collapsible !== undefined) { + ch = ch.map(x => !x.startsWith('../') ? '../' + x : x); + } + const childPath = item.prefix ? `/${path}${item.prefix}` : xp; + const children = ch.map(x => x.split(xp).join("")); + return { + ...item, + children: children.map(x => `${childPath}${x}`), + prefix: undefined, + group, + version, + text: item.text || item.title || "" + } +} + +export class versioning { + versions: Version[] = []; + + constructor() { + // const require = createRequire(import.meta.url) + // references.forEach(p => { + // const fileName = path.resolve(__dirname, p); + // if (fs.existsSync(fileName)) { + // log.info(`Importing versions from ${fileName}`); + // const list: Version[] = require(fileName); + // list.forEach(v => { + // const existing = this.versions.find(x => x.id === v.id); + // if (existing === undefined) { + // this.versions.push(v); + // } else { + // existing.versions.push(...v.versions); + // } + // }); + // } else { + // log.info(`File ${fileName} doesn't exist, ignoring`); + // } + // }); + } + + get latestSemver(): string { + const serverDocs = this.versions.find(v => v.id === "server"); + if (!serverDocs) { + throw new Error("Server docs not found"); + } + return serverDocs.versions[0].path; + } + + // latest stable release + get latest(): string { + const serverDocs = this.versions.find(v => v.id === "server"); + if (!serverDocs) { + throw new Error("Server docs not found"); + } + return `${serverDocs.basePath}/${serverDocs.versions[0].path}`; + } + + get all() { + return this.versions + } + + // Generate a single object that represents all versions from each sidebar + getSidebars() { + let sidebars: EsSidebarObjectOptions = {}; + const require = createRequire(import.meta.url); + + this.versions.forEach(version => { + version.versions.forEach(v => { + const p = `${version.basePath}/${v.path}`; + const sidebarPath = path.resolve(__dirname, `../../${p}`); + const sidebarBase = path.join(sidebarPath, "sidebar"); + const sidebarJs = `${sidebarBase}.js`; + const sidebarCjs = `${sidebarBase}.cjs`; + fs.copyFileSync(sidebarJs, sidebarCjs); + log.info(`Importing sidebar from ${sidebarJs}`); + const sidebar: ImportedSidebarArrayOptions = require(sidebarCjs); + sidebars[`/${p}/`] = sidebar.map(item => createSidebarItem(item, p, v.version, version.group)); + fs.rmSync(sidebarCjs); + }); + }) + + console.log(JSON.stringify(sidebars, null, 2)); + return sidebars; + } + + version(id: string) { + const ret = this.versions.find(x => x.id === id); + if (ret === undefined) log.error(`Version ${id} not defined`); + return ret; + } + + // Build dropdown items for each version + linksFor(id: string, url?: string) { + const links: { text: string, link: string }[] = []; + const version = this.version(id); + if (version === undefined) return links; + + version.versions.forEach(v => { + const path = `${version.basePath}/${v.path}`; + const pageUrl = (url ? url : v.startPage ? v.startPage : ""); + const link = `/${path}/${pageUrl}`; + const item = {text: v.version, link: link}; + links.push(item); + }); + + return links; + } +} + +export const instance: versioning = new versioning(); diff --git a/docs/.vuepress/markdown/resolver.ts b/docs/.vuepress/markdown/resolver.ts new file mode 100644 index 00000000000..b7078695411 --- /dev/null +++ b/docs/.vuepress/markdown/resolver.ts @@ -0,0 +1,30 @@ +import {logger, path} from "vuepress/utils"; +import version from "../lib/version"; +import {instance} from "../lib/versioning"; + +export const resolveVersionedPath = (importPath: string, filePath: string | null | undefined) => { + let importFilePath = importPath; + let error: string | null = null; + + if (!path.isAbsolute(importPath)) { + // if the importPath is a relative path, we need to resolve it + // according to the markdown filePath + if (!filePath) { + logger.error(`Unable to resolve code path: ${filePath}`); + return { + importFilePath: null, + error: 'Error when resolving path', + }; + } + importFilePath = path.resolve(filePath, '..', importPath); + } + + // if (importFilePath.includes("{version}")) { + // const ver = version.getVersion(filePath!) ?? instance.latestSemver; + // if (ver) { + // importFilePath = importFilePath.replace("{version}", ver); + // } + // } + + return {importFilePath, error}; +} \ No newline at end of file diff --git a/docs/.vuepress/markdown/xode/createImportCodeBlockRule.ts b/docs/.vuepress/markdown/xode/createImportCodeBlockRule.ts index 7017743b28f..69fdaf01c7e 100644 --- a/docs/.vuepress/markdown/xode/createImportCodeBlockRule.ts +++ b/docs/.vuepress/markdown/xode/createImportCodeBlockRule.ts @@ -1,8 +1,8 @@ -// @ts-ignore -import type {RuleBlock} from "markdown-it/lib/parser_block" import {path} from "vuepress/utils" import type {ExtendedCodeImportPluginOptions, ImportCodeTokenMeta, ResolvedImport} from "./types"; // @ts-ignore +import type {RuleBlock} from "markdown-it/lib/parser_block"; +// @ts-ignore import type {StateBlock} from "markdown-it"; // min length of the import code syntax, i.e. '@[code]()' @@ -19,6 +19,8 @@ const replaceKnownPrismExtensions = (ext: string): string => knownPrismIssues[ex // regexp to match the import syntax const SYNTAX_RE = /^@\[code(?:{(\d+)?-(\d+)?})?(?:{(.+)?})?(?: ([^\]]+))?]\(([^)]*)\)/; +const name = "tabs"; + export const createImportCodeBlockRule = ({ handleImportPath = (str) => [{importPath: str}], }: ExtendedCodeImportPluginOptions): RuleBlock => ( @@ -53,7 +55,7 @@ export const createImportCodeBlockRule = ({ const resolvedImports = handleImportPath(importPath); - const addBlock = (r: ResolvedImport) => { + const addCodeBlock = (r: ResolvedImport) => { const meta: ImportCodeTokenMeta = { importPath: r.importPath, lineStart: lineStart ? Number.parseInt(lineStart, 10) : 0, @@ -61,7 +63,7 @@ export const createImportCodeBlockRule = ({ region: region }; - // create a import_code token + // create an import_code token const token = state.push('import_code', 'code', 0); // use user specified info, or fallback to file ext @@ -72,28 +74,30 @@ export const createImportCodeBlockRule = ({ token.meta = meta; } - const addGroup = (r: ResolvedImport) => { - const token = state.push('container_code-group-item', "CodeGroupItem", 1); + const addGroupItem = (r: ResolvedImport) => { + const token = state.push(`${name}_tab_open`, "", 1); token.block = true; - token.attrSet("title", r.label); + token.info = r.label; + token.meta = {active: false}; - addBlock(r); + addCodeBlock(r); - state.push('container_code-group-item', "CodeGroupItem", -1); + state.push(`${name}_tab_close`, "", -1); } const experiment = resolvedImports.length > 1; if (experiment) { - const token = state.push('container_code-group_open', "div", 1); - token.block = true; + const token = state.push(`${name}_tabs_open`, "", 1); + token.info = name; + token.meta = {id: "code"}; for (const resolved of resolvedImports) { - addGroup(resolved); + addGroupItem(resolved); } - state.push('container_code-group_close', "div", -1); + state.push(`${name}_tabs_close`, "", -1); } else { - addBlock(resolvedImports[0]); + addCodeBlock(resolvedImports[0]); } state.line = startLine + 1; diff --git a/docs/.vuepress/markdown/xode/resolveImportCode.ts b/docs/.vuepress/markdown/xode/resolveImportCode.ts index ad47b969956..93b8f56591a 100644 --- a/docs/.vuepress/markdown/xode/resolveImportCode.ts +++ b/docs/.vuepress/markdown/xode/resolveImportCode.ts @@ -1,6 +1,7 @@ -import {fs, path, logger} from "vuepress/utils"; +import {fs} from "vuepress/utils"; import type {MarkdownEnv} from "../types"; import type {ImportCodeTokenMeta} from "./types"; +import {resolveVersionedPath} from "../resolver"; function testLine(line: string, regexp: RegExp, regionName: string, end = false) { const [full, tag, name] = regexp.exec(line.trim()) || []; @@ -21,7 +22,7 @@ function findRegion(lines: string[] | null, regionName: string) { /^$/, // HTML, markdown /^#(End Region) ([\w*-]+)$/, // Visual Basic /^::#(endregion) ([\w*-]+)$/, // Bat - /^# ?((?:end)?region) ([\w*-]+)$/ // C#, PHP, Powershell, Python, perl & misc + /^# ?((?:end)?region) ([\w*-]+)$/ // C#, PHP, PowerShell, Python, perl & misc ]; let regexp = null; @@ -51,28 +52,9 @@ export const resolveImportCode = ( importFilePath: string | null importCode: string } => { - let importFilePath = importPath; - - if (!path.isAbsolute(importPath)) { - // if the importPath is relative path, we need to resolve it - // according to the markdown filePath - if (!filePath) { - logger.error(`Unable to resolve code path: ${filePath}`); - return { - importFilePath: null, - importCode: 'Error when resolving path', - }; - } - importFilePath = path.resolve(filePath, '..', importPath); - } - - // check file existence - if (!fs.existsSync(importFilePath)) { - logger.error(`Code file can't be found: ${importFilePath}`); - return { - importFilePath, - importCode: 'File not found!', - }; + const {importFilePath, error} = resolveVersionedPath(importPath, filePath); + if (importFilePath === null || error !== null){ + return {importFilePath, importCode: error!}; } // read file content @@ -82,10 +64,11 @@ export const resolveImportCode = ( if (l.length === 0) return l; const spaces = l[0].length - l[0].trimStart().length; if (spaces === 0) return l; - return l.map((v, i) => v.substr(spaces)); + return l.map(v => v.substr(spaces)); } - const allLines = fileContent.split('\n'); + const allLines = (fileContent != null) ? fileContent.split('\n') : null; + if (!allLines) return {importFilePath, importCode: "Code is empty"}; if (region) { const reg = findRegion(allLines, region); if (reg) { diff --git a/docs/.vuepress/markdown/xode/types.ts b/docs/.vuepress/markdown/xode/types.ts index c1ebff2b90b..f3a34d621b3 100644 --- a/docs/.vuepress/markdown/xode/types.ts +++ b/docs/.vuepress/markdown/xode/types.ts @@ -4,6 +4,7 @@ export interface ImportCodeTokenMeta { lineEnd?: number; region?: string; } + export interface ResolvedImport { label?: string; importPath: string; diff --git a/docs/.vuepress/public/cloud.png b/docs/.vuepress/public/cloud.png new file mode 100644 index 00000000000..c9c61b8f5bf Binary files /dev/null and b/docs/.vuepress/public/cloud.png differ diff --git a/docs/.vuepress/public/eventstore-dev-logo-dark.svg b/docs/.vuepress/public/eventstore-dev-logo-dark.svg new file mode 100644 index 00000000000..6df513cb180 --- /dev/null +++ b/docs/.vuepress/public/eventstore-dev-logo-dark.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/.vuepress/public/eventstore-logo-alt.svg b/docs/.vuepress/public/eventstore-logo-alt.svg new file mode 100644 index 00000000000..100a664a88b --- /dev/null +++ b/docs/.vuepress/public/eventstore-logo-alt.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/.vuepress/public/eventstore-logo.svg b/docs/.vuepress/public/eventstore-logo.svg new file mode 100644 index 00000000000..6df513cb180 --- /dev/null +++ b/docs/.vuepress/public/eventstore-logo.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/docs/.vuepress/public/fonts/Solina-Bold.woff b/docs/.vuepress/public/fonts/Solina-Bold.woff new file mode 100644 index 00000000000..83746eecdd1 Binary files /dev/null and b/docs/.vuepress/public/fonts/Solina-Bold.woff differ diff --git a/docs/.vuepress/public/fonts/Solina-Bold.woff2 b/docs/.vuepress/public/fonts/Solina-Bold.woff2 new file mode 100644 index 00000000000..813770ef1b2 Binary files /dev/null and b/docs/.vuepress/public/fonts/Solina-Bold.woff2 differ diff --git a/docs/.vuepress/public/fonts/Solina-Light.woff b/docs/.vuepress/public/fonts/Solina-Light.woff new file mode 100644 index 00000000000..236d5011ce7 Binary files /dev/null and b/docs/.vuepress/public/fonts/Solina-Light.woff differ diff --git a/docs/.vuepress/public/fonts/Solina-Light.woff2 b/docs/.vuepress/public/fonts/Solina-Light.woff2 new file mode 100644 index 00000000000..3566766cf5d Binary files /dev/null and b/docs/.vuepress/public/fonts/Solina-Light.woff2 differ diff --git a/docs/.vuepress/public/fonts/Solina-Medium.woff b/docs/.vuepress/public/fonts/Solina-Medium.woff new file mode 100644 index 00000000000..e9d8fc9b33e Binary files /dev/null and b/docs/.vuepress/public/fonts/Solina-Medium.woff differ diff --git a/docs/.vuepress/public/fonts/Solina-Medium.woff2 b/docs/.vuepress/public/fonts/Solina-Medium.woff2 new file mode 100644 index 00000000000..8598d1216ef Binary files /dev/null and b/docs/.vuepress/public/fonts/Solina-Medium.woff2 differ diff --git a/docs/.vuepress/public/fonts/Solina-Regular.woff b/docs/.vuepress/public/fonts/Solina-Regular.woff new file mode 100644 index 00000000000..2753ec2fd2d Binary files /dev/null and b/docs/.vuepress/public/fonts/Solina-Regular.woff differ diff --git a/docs/.vuepress/public/fonts/Solina-Regular.woff2 b/docs/.vuepress/public/fonts/Solina-Regular.woff2 new file mode 100644 index 00000000000..29565392eed Binary files /dev/null and b/docs/.vuepress/public/fonts/Solina-Regular.woff2 differ diff --git a/docs/.vuepress/styles/index.scss b/docs/.vuepress/styles/index.scss index 9c1564fb023..1b7ae2f3025 100644 --- a/docs/.vuepress/styles/index.scss +++ b/docs/.vuepress/styles/index.scss @@ -1,21 +1,139 @@ -:root { - --content-width: 940px; +@use "vuepress-theme-hope/presets/shinning-feature-panel"; + +@font-face { + font-family: "Solina"; + src: url("/fonts/Solina-Light.woff2") format("woff2"), url("/fonts/Solina-Light.woff") format("woff"); + font-weight: 400; + font-style: normal; +} + +@font-face { + font-family: "Solina"; + src: url("/fonts/Solina-Regular.woff2") format("woff2"), url("/fonts/Solina-Regular.woff") format("woff"); + font-weight: 450; + font-style: normal; +} + +@font-face { + font-family: "Solina"; + src: url("/fonts/Solina-Medium.woff2") format("woff2"), url("/fonts/Solina-Medium.woff") format("woff"); + font-weight: 500; + font-style: normal; +} + +@font-face { + font-family: "Solina"; + src: url("/fonts/Solina-Bold.woff2") format("woff2"), url("/fonts/Solina-Bold.woff") format("woff"); + font-weight: 700; + font-style: normal; +} + +/* VuePress changes to match our style*/ + +.vp-site-name { + display: none; +} + +body, +html { + font-size: 16px; +} + +body:has(#kapa-widget-container) .vp-back-to-top-button { + bottom: 7.5rem !important; +} + +.vp-feature-title .icon { + font-size: 1.5em; + vertical-align: middle; +} + +.vp-nav-links { + font-size: 1rem; +} + +.theme-hope-content:not(.custom) > p, +.theme-hope-content:not(.custom) > ol p, +.theme-hope-content:not(.custom) > ul p { + text-align: left; +} + +html[data-theme="dark"] #kapa-widget-portal { + code { + background: rgba(127, 127, 127, 0.12); + } + + blockquote { + border-color: #eee; + } +} + +/* Fix for center-alignment issue on `.not-found` pages */ +.vp-page.not-found .theme-hope-content, +.vp-page.not-found .vp-breadcrumb, +.vp-page.not-found .vp-page-title, +.vp-page.not-found .vp-toc-placeholder { + text-align: initial; + width: var(--content-width, 740px); +} + +.vp-hero-info { + text-align: center; } -.custom-container.note { - border-color: var(--nprogress-color); - background-color: var(--c-tip-bg); - color: var(--c-tip-text); - padding: 0.1rem 1.5rem; - border-left-width: 0.5rem; - border-left-style: solid; - margin: 1rem 0; +.DocSearch-Button-Keys { + display: none; } -.custom-container-title { - color: var(--c-tip-title); +.DocSearch-Button-Placeholder { + padding-right: 30px; } -.site-name { - display: none; -} \ No newline at end of file +#main-description { + max-width: 50rem; +} + +.vp-highlight-header { + text-align: center; +} + +.vp-highlight-info-wrapper:only-child { + flex: 1; +} + +/*Remove deprecation notice*/ +div[style*="z-index: 2147483647"] { + display: none !important; + visibility: hidden !important; + opacity: 0 !important; + position: absolute !important; + pointer-events: none !important; +} + +.vp-hero-title { + margin: 0.5rem 0; + background: linear-gradient( 120deg, var(--vp-c-accent-hover), var(--vp-c-accent) 30%, var(--end-gradient) 120% ); + -webkit-background-clip: text; + background-clip: text; + font-weight: bold; + font-size: 3.6rem; + font-family: var(--vp-font); + line-height: 1.5; + -webkit-text-fill-color: transparent; +} + +.vp-highlight-item { + list-style: none; +} + +@media screen and (max-width: 480px) { + .vp-action, + .custom-class-kapa, + .kapa-container { + display: none; + } +} + +.vp-highlight-title { + font-size: 1.4rem; +} diff --git a/docs/.vuepress/styles/palette.scss b/docs/.vuepress/styles/palette.scss index e69de29bb2d..f87d09207dd 100644 --- a/docs/.vuepress/styles/palette.scss +++ b/docs/.vuepress/styles/palette.scss @@ -0,0 +1,193 @@ +@use 'sass:color'; + +/* Primary colors */ +$main-color: #631b3a; +$secondary-color: #B75781; + +/* Light Theme Colors */ +$color-background-light: #ffffff; +$color-foreground-light: #000000; +$color-link-light: #479bff; +$color-title-light: $main-color; + +/* Dark Theme Colors */ +$color-background-dark: #080b0d; +$color-foreground-dark: #f0f0f0; +$color-title-dark: #f0f0f0; + +$theme-color-light: color.mix(black, $main-color, 10%); // Darken by 10% +$theme-color-soft: #{color.scale(#631b3a, $lightness: 85%)}; +$theme-color-dark: color.mix(white, $main-color, 10%); // Lighten by 10% +$theme-color-mask: color.adjust( $secondary-color, $alpha: -0.85 ); // Reduce opacity + +/* CSS Variable Definitions */ +:root { + --main-color: #{$main-color}; + --secondary-color: #{$secondary-color}; + --end-gradient: #{$secondary-color}; + --color-link: #{$color-link-light}; + --theme-name: light; + --theme-shade: light; + --theme-contrast: low; + --color-background: #{$color-background-light}; + --color-foreground: #{$color-foreground-light}; + --color-title: var(--main-color); + --color-title-contrast: #ffffff; + --color-highlight: var(--main-color); + --color-highlight-contrast: #ffffff; + --color-focus: #8d5fff; + --color-focus-contrast: #ffffff; + --color-error: #b75781; + --color-error-contrast: #ffffff; + --color-error-alt: #dbabc0; + --color-error-alt-contrast: #000000; + --color-warning: #7d6f05; + --color-warning-contrast: #ffffff; + --color-warning-alt: #fcf29b; + --color-warning-alt-contrast: #000000; + --color-success: #2c667d; + --color-success-contrast: #ffffff; + --color-success-alt: #dbf9ee; + --color-success-alt-contrast: #000000; + --color-info: #4855a7; + --color-info-contrast: #ffffff; + --color-info-alt: #edf1fc; + --color-info-alt-contrast: #000000; + --color-shade-10: #f5f8f9; + --color-shade-20: #ecf1f3; + --color-shade-30: #e3eaed; + --color-shade-40: #d9e3e8; + --color-shade-50: #d0dce2; + --color-shade-60: #b4c8d1; + --color-overlay: #a79ab8; + --color-overlay-alpha: 0.75; + --color-disabled: #ecf1f3; + --color-disabled-border: #ecf1f3; + --color-disabled-contrast: #000000; + --code-fg: #383a42; + --code-bg: #fffffe; + --code-literal: #0184bc; + --code-symbol: #4078f2; + --code-keyword: #a626a4; + --code-string: #50a14f; + --code-error: #e45649; + --code-variable: #986801; + --code-class: #c18401; + --code-comment: #6f7f90; +} + +/* Dark Theme */ +@media (prefers-color-scheme: dark) { + :root { + --theme-name: dark; + --theme-shade: dark; + --theme-contrast: low; + --end-gradient: #{$main-color}; + --color-background: #{$color-background-dark}; + --color-foreground: #{$color-foreground-dark}; + --theme-name: dark; + --theme-shade: dark; + --theme-contrast: low; + --color-background: #080b0d; + --color-foreground: #f0f0f0; + --color-title: #f0f0f0; + --color-title-contrast: #080b0d; + --color-highlight: #64edbb; + --color-highlight-contrast: #080b0d; + --color-link: #479bff; + --color-link-contrast: #080b0d; + --color-focus: #8d5fff; + --color-focus-contrast: #080b0d; + --color-error: #ff4a80; + --color-error-contrast: #080b0d; + --color-error-alt: #631b3a; + --color-error-alt-contrast: #ffffff; + --color-warning: #ffed4e; + --color-warning-contrast: #080b0d; + --color-warning-alt: #7d6f05; + --color-warning-alt-contrast: #ffffff; + --color-success: #64edbb; + --color-success-contrast: #080b0d; + --color-success-alt: #2c667d; + --color-success-alt-contrast: #ffffff; + --color-info: #479bff; + --color-info-contrast: #080b0d; + --color-info-alt: #4855a7; + --color-info-alt-contrast: #ffffff; + --color-shade-10: #161c1e; + --color-shade-20: #1e2528; + --color-shade-30: #262e31; + --color-shade-40: #2e383b; + --color-shade-50: #364146; + --color-shade-60: #3f4c50; + --color-overlay: #2c667d; + --color-overlay-alpha: 0.75; + --color-disabled: #1e2528; + --color-disabled-border: #1e2528; + --color-disabled-contrast: #f0f0f0; + --code-fg: #d7dae0; + --code-bg: #313440; + --code-literal: #e5c07b; + --code-symbol: #56b6c2; + --code-keyword: #c678dd; + --code-string: #98c379; + --code-error: #e05252; + --code-variable: #e06c75; + --code-class: #e5c07b; + --code-comment: #5c6370; + } +} + +$vp-font: 'Solina, -apple-system, BlinkMacSystemFont, Segoe UI, Roboto, "Helvetica Neue", Arial, "Noto Sans", STHeiti, "Microsoft YaHei", SimSun, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji"'; +$vp-font-heading: 'Solina, -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, "Noto Sans", STHeiti, "Microsoft YaHei", SimSun, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji"'; + +html[data-theme="dark"] { + --theme-color: var(--secondary-color); + --vp-c-accent-hover: var(--secondary-color); + --vp-c-accent: var(--secondary-color); + --vp-c-accent-bg: var(--secondary-color); + --vp-c-accent-soft: var(--main-color); + --theme-color-light: #{$theme-color-light}; + --theme-color-dark: #{$theme-color-dark}; + --theme-color-mask: #{$theme-color-mask}; + --end-gradient: #{$main-color}; +} + +html[data-theme="light"] { + --theme-color: var(--main-color); + --vp-c-accent-hover: var(--main-color); + --vp-c-accent: var(--main-color); + --vp-c-accent-bg: var(--main-color); + --vp-c-accent-soft: #{$theme-color-soft}; + --theme-color-light: #{$theme-color-light}; + --theme-color-dark: #{$theme-color-dark}; + --theme-color-mask: #{$theme-color-mask}; +} + +$vp-c-bg: ( light: #fff, dark: #080b0d, ); + +$vp-c-bg-elv-soft: ( light: #fff, dark: #080b0d, ); + +$font-body: 15px; + +/* Override Code Block Background */ +$sidebar-width: 18rem; + +$vp-c-text: ( light: #000, dark: #f0f0f0, ); + +.hint-container.important a, +.hint-container.info a, +.hint-container.note a, +.hint-container.tip a, +.hint-container.warning a, +.hint-container.caution a { + color: var(--color-link-light); + + html[data-theme="dark"] & { + color: var(--color-link); + } +} + +.vp-feature-item { + flex-basis: 21%; +} diff --git a/docs/README.md b/docs/README.md deleted file mode 100644 index ea06495eef9..00000000000 --- a/docs/README.md +++ /dev/null @@ -1,81 +0,0 @@ -# Introduction - -Welcome to the EventStoreDB documentation. - -EventStoreDB is a database designed for [Event Sourcing](https://eventstore.com/blog/what-is-event-sourcing/). This documentation introduces key concepts of EventStoreDB and explains its installation, configuration, and operational concerns. - -EventStoreDB is available in both an Open-Source and a Commercial version: - -- EventStoreDB OSS is the [open-source](https://github.com/EventStore/EventStore) and free-to-use edition of EventStoreDB. -- EventStoreDB Commercial is available for customers with an EventStoreDB [paid support subscription](https://eventstore.com/support/). EventStoreDB Commercial adds enterprise-focused features such as LDAP and X.509 authentication, OpenTelemetry Exporter, correlation event sequence visualisation, and management CLI tool. - -## What's new - -Find out [what's new](whatsnew.md) in this release to get details on new features and other changes. - -## Getting started - -Check the [getting started guide](/getting-started.md) for resources on the principles of EventStoreDB, Event Sourcing, database installation guidelines and choosing a client SDK. - -## Support - -### EventStoreDB community - -Users of EventStoreDB OSS can use the [community forum](https://discuss.eventstore.com) for questions, discussions and getting help from community members. - -### Enterprise customers - -Customers with the paid [support plan](https://eventstore.com/support/) can open tickets using the [support portal](https://eventstore.freshdesk.com). - -### Issues - -Since EventStoreDB is an open-source product, we track most of the issues openly in the EventStoreDB [repository on GitHub](https://github.com/EventStore/EventStore). Before opening an issue, please ensure that a similar issue hasn't been opened already. Also, try searching closed issues that might contain a solution or workaround for your problem. - -When opening an issue, follow our [guidelines](https://github.com/EventStore/EventStore/blob/master/CONTRIBUTING.md) for bug reports and feature requests. By doing so, you will greatly help us to solve your concerns most efficiently. - -## Protocols, clients, and SDKs - -EventStoreDB supports one client protocol, which is described below. The older TCP client API has been deprecated in version 20.2 and removed in version 24.2. The final version with TCP API support is 23.10. More information can be found in our [blog post](https://www.eventstore.com/blog/sunsetting-eventstoredb-tcp-based-client-protocol). - -Since version 24.6, the legacy protocol is available as a [commercial plugin](networking.md#external-tcp) available for Event Store customers. - -### Client protocol - -The client protocol is based on [open standards](https://grpc.io/) and is widely supported by many programming languages. EventStoreDB uses gRPC to communicate between the cluster nodes as well as for client-server communication. - -When developing software that uses EventStoreDB, we recommend using one of the official SDKs. - -#### EventStoreDB supported clients - -- Python: [pyeventsourcing/esdbclient](https://pypi.org/project/esdbclient/) -- Node.js (JavaScript/TypeScript): [EventStore/EventStore-Client-NodeJS](https://github.com/EventStore/EventStore-Client-NodeJS) -- Java: [(EventStore/EventStoreDB-Client-Java](https://github.com/EventStore/EventStoreDB-Client-Java) -- .NET: [EventStore/EventStore-Client-Dotnet](https://github.com/EventStore/EventStore-Client-Dotnet) -- Go: [EventStore/EventStore-Client-Go](https://github.com/EventStore/EventStore-Client-Go) -- Rust: [EventStore/EventStoreDB-Client-Rust](https://github.com/EventStore/EventStoreDB-Client-Rust) - -Read more in the [gRPC clients documentation](@clients/grpc/README.md). - -#### Community developed clients - -- [Ruby (yousty/event_store_client)](https://github.com/yousty/event_store_client) -- [Elixir (NFIBrokerage/spear)](https://github.com/NFIBrokerage/spear) - -### HTTP - -EventStoreDB also offers an HTTP-based interface. It consists of the REST-oriented API, and a realtime subscription feature based on the [AtomPub protocol](https://datatracker.ietf.org/doc/html/rfc5023). As it operates over HTTP, this is less efficient, but nearly every environment supports it. - -Find out more about configuring the HTTP protocol on the [HTTP configuration](networking.md#http-configuration) page. - -::: warning Deprecation Note -The current AtomPub-based HTTP application API is disabled by default since v20 of EventStoreDB. You can enable it by adding an [option](networking.md#atompub) to the server configuration. Although we plan to remove AtomPub support from future server versions, the server management HTTP API will remain available. -You need to enable the AtomPub protocol to have a fully functioning database user interface. -::: - -Learn more about the EventStoreDB HTTP interface in the [HTTP documentation](@clients/http-api/README.md). - - -#### Community developed clients - -- [PHP (prooph/event-store-http-client)](https://github.com/prooph/event-store-http-client/) -- [Ruby (yousty/event_store_client)](https://github.com/yousty/event_store_client) diff --git a/docs/admin-ui.md b/docs/admin-ui.md deleted file mode 100644 index 864dbde9588..00000000000 --- a/docs/admin-ui.md +++ /dev/null @@ -1,111 +0,0 @@ ---- -title: Admin UI ---- - -# Admin UI - -The EventStoreDB Admin UI is available at _SERVER_IP:2113_ and helps you interact with and manage a cluster in a visual way. This guide explains the tabs of the interface and what they do. - -::: tip -The embedded EventStoreDB web interface is reaching its end of life. We are working on a replacement, and you can try using the Early Preview of [Event Store Navigator](https://learn.eventstore.com/event-store-navigator-preview) app instead. It doesn't have full feature parity with the embedded web UI, but it already has usability improvements compared to it. -::: - -## Dashboard - -![Web admin interface dashboard](./images/wai-dashboard.png) - -The _Dashboard_ tab shows an overview of active queues with associated statistics in the top half. The _+_ icon indicates a queue group, click it to reveal the queues that are part of the group. - -The second half of the tab shows active connections to EventStoreDB and information about them. - -Click the _Snapshot_ button in the top right to output a snapshot of all queue statistics at the time you clicked the button. - -## Stream browser - -![Web admin interface stream browser tab](./images/wai-stream-browser.png) - -The _Stream Browser_ tab gives an overview of recently created and changed streams, clicking on an individual stream shows details about the individual stream. - -### Event stream - -![Web admin interface stream details](./images/wai-stream-details.png) - -Each stream shows pages of the events in a stream with an overview of the event. Click the _Name_ to see the EventId, and _JSON_ to see the event data. The buttons above change depending on what you are viewing in the interface. The _Back_ button takes you to the parent screen. - -The buttons on the top right when you are viewing an event stream are: - -- _Pause_: Stop showing events arriving into this stream. -- _Resume_: Resume showing events arriving into this stream. -- _Edit ACL_: Edit [the access control lists](security.md#access-control-lists) for a stream. -- _Add Event_: [Add a new event](@clients/http-api/README.md#appending-events) to the stream. -- _Delete_: [Delete a stream](@clients/http-api/README.md#deleting-a-stream) to the stream. -- _Query_: - -The buttons on the left above the events when you are viewing an event stream are: - -- _self_: Takes you to the overview of the stream. -- _first_: Takes you to the first page of events in a stream. -- _previous_: Takes you to the previous page of events in a stream. -- _metadata_: Shows the metadata of a stream. - - On the metadata screen, click _Add New Like This_ to add a new event to the stream. - -## Projections - -![Web admin interface projections tab](./images/wai-projections.png) - -The _Projections_ tab shows system and user created projections defined in EventStoreDB, the buttons above the list do the following: - -- _Disable All_: Disable all running projections. -- _Enable All_: Enable all stopped projections. -- _Include Queries_: Toggle displaying queries in the Projections table. -- _New Projection_: [Create a user-defined projection](projections.md#user-defined-projections) with the Admin UI. - -Clicking an individual projection shows further details. - -![Web admin interface projection details](./images/wai-projection-details.jpg) - -On the left is the projection definition. On the right are the stats, results, and state of the projection. The buttons above the details do the following: - -- _Start_: Start a stopped projection. -- _Stop_: Stop a running projection. -- _Edit_: Edit the projection definition. -- _Config_: [Set configuration options](projections.md#configuring-projections) for a projection. -- _Debug_: Opens [the debugging interface](projections.md#debugging) to debug what effect a projection is having on events. -- _Delete_: Delete a projection. -- _Reset_: Reset a projection. -- _Back_: Returns you to the parent screen. - -## Query - -The _Query_ tab has a code editor field where you can create transient and short-lived projections for quick analysis of your event streams. - -![Web admin interface query details](./images/wai-query-details.png) - -## Persistent subscriptions - -The _Persistent Subscriptions_ tab shows an overview of [persistent subscriptions](persistent-subscriptions.md) configured on streams. The button above the list does the following: - -- _New Subscription_: Create a new subscription. - -Clicking the _+_ icon next to a stream name reveals the subscription name and more buttons. The _Back_ button takes you to the parent screen. - -- _Edit_: Edit the subscription. -- _Delete_: Delete the subscription. -- _Detail_: Shows the subscription configuration options. -- _Replay Parked Messages_: Replay events in subscription to return state. - -## Admin - -![Web admin interface projections tab](./images/wai-admin.png) - -The _Admin_ tab shows subsystems enabled (currently only [projections](projections.md)) on EventStoreDB and [scavenges](operations.md#scavenging) run. You can start a new scavenge operation by clicking the _Scavenge_ button, and shut EventStoreDB down by clicking the _Shutdown Server_ button. - -## Users - -![Web admin interface projections tab](./images/wai-users.png) - -The _Users_ tab shows [the users defined in EventStoreDB](security.md#authentication), clicking an individual user shows a JSON representation of that user's details. - -## Log out - -Logs you out of the Admin UI interface. diff --git a/docs/cluster.md b/docs/cluster.md deleted file mode 100644 index 283e9acd7e2..00000000000 --- a/docs/cluster.md +++ /dev/null @@ -1,247 +0,0 @@ ---- -title: Clustering ---- - -## Highly-available cluster - -EventStoreDB allows you to run more than one node in a cluster for high availability. - -::: info Cluster member authentication -EventStoreDB starts in secure mode by default, which requires configuration [settings for certificates](security.md#certificates-configuration). -Cluster members authenticate each other using the certificate Common Name. All the cluster nodes must have the same common name in their certificates. -::: - -### Cluster nodes - -EventStoreDB clusters follow a "shared nothing" philosophy, meaning that clustering requires no shared disks. Instead, each node has a copy of the data to ensure it is not lost in case of a drive failure or a node crashing. - -::: tip -Lean more about [node roles](#node-roles). -::: - -EventStoreDB uses a quorum-based replication model, in which a majority of nodes in the cluster must acknowledge that they have received a copy of the write before the write is acknowledged to the client. This means that to be able to tolerate the failure of _n_ nodes, the cluster must be of size _(2n + 1)_. A three node cluster can continue to accept writes if one node is unavailable. A five node cluster can continue to accept writes if two nodes are unavailable, and so forth. - -### Cluster size - -For any cluster configuration, you first need to decide how many nodes you want, provision each node and set the cluster size option on each node. - -The cluster size is a pre-defined value. The cluster expects the number of nodes to match this predefined number, otherwise the cluster would be incomplete and therefore unhealthy. - -The cluster cannot be dynamically scaled. If you need to change the number of cluster nodes, the cluster size setting must be changed on all nodes before the new node can join. - -Use the `ClusterSize` option to tell each cluster node about how many nodes the cluster should have. - -| Format | Syntax | -|:---------------------|:--------------------------| -| Command line | `--cluster-size` | -| YAML | `ClusterSize` | -| Environment variable | `EVENTSTORE_CLUSTER_SIZE` | - -**Default**: `1` (single node, no high-availability). - -Common values for the `ClusterSize` setting are three or five (to have a majority of two nodes and a majority of three nodes). We recommended setting this to an odd number of nodes to minimise the chance of a tie during elections, which would lengthen the election process. - -### Discovering cluster members - -Cluster nodes use the gossip protocol to discover each other and elect the cluster leader. - -Cluster nodes need to know about one another to gossip. To start this process, you provide gossip seeds for each node. - -Configure cluster nodes to discover other nodes in one of two ways: -- [via a DNS entry](#cluster-with-dns) and a well-known [gossip port](#gossip-port) -- [via a list of addresses](#cluster-with-gossip-seeds) of other cluster nodes - -The multi-address DNS name cluster discovery only works for clusters that use certificates signed by a private certificate authority, or run insecure. For other scenarios you need to provide the gossip seed using hostnames of other cluster nodes. - -### Internal communication - -When setting up a cluster, the nodes must be able to reach each other over both the HTTP channel, and the internal TCP channel. You should ensure that these ports are open on firewalls on the machines and between the machines. - -Learn more about [internal TCP configuration](networking.md#replication-protocol) and [HTTP configuration](networking.md#http-configuration) to set up the cluster properly. - -## Cluster with DNS - -When you tell EventStoreDB to use DNS for its gossip, the server will resolve the DNS name to a list of IP addresses and connect to each of those addresses to find other nodes. This method is very flexible because you can change the list of nodes on your DNS server without changing the cluster configuration. The DNS method is also useful in automated deployment scenarios when you control both the cluster deployment and the DNS server from your infrastructure-as-code scripts. - -To use DNS discovery, you need to set the `ClusterDns` option to the DNS name that allows making an HTTP call to it. When the server starts, it will attempt to make a gRPC call using the `https://:` URL (`http` if the cluster is insecure). - -When using a certificate signed by a publicly trusted CA, you'd normally use the wildcard certificate. Ensure that the cluster DNS name fits the wildcard, otherwise the request will fail on SSL check. - -When using certificates signed by a private CA, the cluster DNS name must be included to the certificate of each node as SAN (subject alternative name). - -You also need to have the `DiscoverViaDns` option to be set to `true` but it is its default value. - -| Format | Syntax | -|:---------------------|:-------------------------| -| Command line | `--cluster-dns` | -| YAML | `ClusterDns` | -| Environment variable | `EVENTSTORE_CLUSTER_DNS` | - -**Default**: `fake.dns`, which doesn't resolve to anything. You have to set it to a proper DNS name when used in combination to the DNS discovery (next setting). - -| Format | Syntax | -|:---------------------|:------------------------------| -| Command line | `--discover-via-dns` | -| YAML | `DiscoverViaDns` | -| Environment variable | `EVENTSTORE_DISCOVER_VIA_DNS` | - -**Default**: `true`, the DNS discovery is enabled by default. - -It will be used only if the cluster has more than one node. You must set the `ClusterDns` setting to a proper DNS name. - -When using DNS for cluster gossip, you might need to set the `GossipPort` setting to the HTTP port if the external HTTP port setting is not set to `2113` default port. Refer to [gossip port](#gossip-port) option documentation to learn more. - -## Cluster with gossip seeds - -If you don't want or cannot use the DNS-based configuration, it is possible to tell cluster nodes to call other nodes using their IP addresses. This method is a bit more cumbersome, because each node has to have the list of addresses for other nodes configured, but not its own address. - -The setting accepts a comma-separated list of IP addresses or host names with their gossip port values. - -| Format | Syntax | -|:---------------------|:-------------------------| -| Command line | `--gossip-seed` | -| YAML | `GossipSeed` | -| Environment variable | `EVENTSTORE_GOSSIP_SEED` | - -## Gossip protocol - -EventStoreDB uses a quorum-based replication model. When working normally, a cluster has one node known as a leader, and the remaining nodes are followers. The leader node is responsible for coordinating writes while it is the leader. Cluster nodes use a consensus algorithm to determine which node should be the leader and which should be followers. EventStoreDB bases the decision as to which node should be the leader on a number of factors. - -For a cluster node to have this information available to them, the nodes gossip with other nodes in the cluster. Gossip runs over HTTP interfaces of cluster nodes. - -The gossip protocol configuration can be changed using the settings listed below. Pay attention to the settings related to time, like intervals and timeouts, when running in a cloud environment. - -### Gossip port - -The gossip port is used for constructing the URL for making a gossip request to other nodes that are discovered via DNS. It is not used when using gossip seeds, because in that case the list contains IP addresses and the port. - -::: warning -Normally, the cluster gossip port is the same as the HTTP port, so you don't need to change this setting. -::: - -| Format | Syntax | -|:---------------------|:---------------------------------| -| Command line | `--cluster-gossip-port` | -| YAML | `ClusterGossipPort` | -| Environment variable | `EVENTSTORE_CLUSTER_GOSSIP_PORT` | - -**Default**: HTTP port - -### Gossip interval - -Cluster nodes try to ensure that the communication with their neighbour nodes is not broken. They use the gossip protocol and call each other after a specified period of time. This period is called the gossip interval. You can change the `GossipInvervalMs` setting so cluster nodes check in with each other more or less frequently. - -The default value is two seconds (2000 ms). - -| Format | Syntax | -|:---------------------|:--------------------------------| -| Command line | `--gossip-interval-ms` | -| YAML | `GossipIntervalMs` | -| Environment variable | `EVENTSTORE_GOSSIP_INTERVAL_MS` | - -**Default**: `2000` (in milliseconds), which is two seconds. - -### Time difference toleration - -EventStoreDB expects the time on cluster nodes to be in sync within a given tolerance. - -If different nodes have their clock out of sync for a number of milliseconds that exceeds the value of this setting, the gossip is rejected and the node will not be accepted as a cluster member. - -| Format | Syntax | -|:---------------------|:------------------------------------------| -| Command line | `--gossip-allowed-difference-ms` | -| YAML | `GossipAllowedDifferenceMs` | -| Environment variable | `EVENTSTORE_GOSSIP_ALLOWED_DIFFERENCE_MS` | - -**Default**: `60000` (in milliseconds), which is one minute. - -### Gossip timeout - -When nodes call each other using the gossip protocol to understand the cluster status, a busy node might delay the response. When a node is not getting a response from another node, it might consider that other node as dead. Such a situation might trigger the election process. - -If your cluster network is congested, you might increase the gossip timeout using the `GossipTimeoutMs` setting, so nodes will be more tolerant to delayed gossip responses. The default value is 2.5 seconds (2500 ms). - -| Format | Syntax | -|:---------------------|:-------------------------------| -| Command line | `--gossip-timeout-ms` | -| YAML | `GossipTimeoutMs` | -| Environment variable | `EVENTSTORE_GOSSIP_TIMEOUT_MS` | - -**Default**: `2500` (in milliseconds). - -### Gossip on single node - -You can connect using gossip seeds regardless of whether you have a cluster or not. In the previous versions of EventStoreDB gossip on a single node was disabled. Starting from 21.2 it is enabled by default. - -::: warning -Please note that the `GossipOnSingleNode` option is deprecated and will be removed in a future version. The gossip endpoint is now unconditionally available for any deployment topology. -::: - -| Format | Syntax | -|:---------------------|:-----------------------------------| -| Command line | `--gossip-on-single-node` | -| YAML | `GossipOnSingleNode` | -| Environment variable | `EVENTSTORE_GOSSIP_ON_SINGLE_NODE` | - -**Default**: `true` - -### Leader election timeout - -The leader elections are separate to the node gossip, and are used to elect a node as Leader. - -In some cases the leader election messages may be delayed, which can result in elections taking longer than they should. If you start seeing election timeouts in the logs or if you have needed to increase the gossip timeout due to a congested network, then you should consider increasing the leader election timeout as well. - -| Format | Syntax | -|:---------------------|:----------------------------------------| -| Command line | `--leader-election-timeout-ms` | -| YAML | `LeaderElectionTimeoutMs` | -| Environment variable | `EVENTSTORE_LEADER_ELECTION_TIMEOUT_MS` | - -**Default**: `1000` (in milliseconds). - -## Node roles - -Every node in a stable EventStoreDB deployment settles into one of three roles: Leader, Follower, and ReadOnlyReplica. The cluster is composed of the Leader and Followers. - -### Leader - -The leader ensures that writes are persisted to its own disk, replicated to a majority of cluster nodes, and indexed on the leader so that they can be read from the leader, before acknowledging the write as successful to the client. - -### Follower - -A cluster assigns the follower role based on an election process. A cluster uses one or more nodes with the follower role to form the quorum, or the majority of nodes necessary to confirm that the write is persisted. - -### Read-only replica - -You can add read-only replica nodes, which will not become cluster members and will not take part in elections. Read-only replicas can be used for scaling up reads if you have many catch-up subscriptions and want to off-load cluster members. - -A cluster asynchronously replicates data one way to a node with the read-only replica role. The read-only replica node is not part of the cluster, so does not add to the replication requirements needed to acknowledge a write. For this reason a node with a read-only replica role does not add much overhead to the other nodes. - -You need to explicitly configure the node as a read-only replica using this setting: - -| Format | Syntax | -|:---------------------|:-------------------------------| -| Command line | `--read-only-replica` | -| YAML | `ReadOnlyReplica` | -| Environment variable | `EVENTSTORE_READ_ONLY_REPLICA` | - -**Default**: `false`, set to `true` for a read-only replica node. - -The replica node needs to have the cluster gossip DNS or seed configured. For the gossip seed, use DNS names or IP addresses of all other cluster nodes, except read-only replicas. - -### Node priority - -You can control which clones the cluster promotes with the `NodePriority` setting. The default value is `0`, and the cluster is more likely to promote nodes with higher values. - -| Format | Syntax | -|:---------------------|:--------------------------| -| Command line | `--node-priority` | -| YAML | `NodePriority` | -| Environment variable | `EVENTSTORE_NODE_PRORITY` | - -**Default**: `0`. - -::: tip -Changing `NodePriority` does not guarantee that the cluster will not promote the node. It is only one of the criteria that the Election Service considers. -::: - diff --git a/docs/configuration.md b/docs/configuration.md deleted file mode 100644 index 6b8110e6bda..00000000000 --- a/docs/configuration.md +++ /dev/null @@ -1,418 +0,0 @@ ---- -title: "How-to" ---- - -## Configuration options - -EventStoreDB has a number of configuration options that can be changed. You can find all the options described -in details in this section. - -When you don't change the configuration, EventStoreDB will use sensible defaults, but they might not suit your -needs. You can always instruct EventStoreDB to use a different set of options. There are multiple ways to -configure EventStoreDB server, described below. - -### Version and help - -You can check what version of EventStoreDB you have installed by using the `--version` parameter in the -command line. For example: - -:::: code-group -::: code Linux -```bash -```bash:no-line-numbers -$ eventstored --version -EventStoreDB version 24.6.0.0 (oss-v24.6.0-alpha-16-g8e06f9f77/8e06f9f77, 2023-10-24T22:05:57-05:00) -``` -::: -::: code Windows -``` -> EventStore.ClusterNode.exe --version -EventStoreDB version 24.6.0.0 (oss-v24.6.0-alpha-16-g8e06f9f77/8e06f9f77, 2023-10-24T22:05:57-05:00) -``` -::: -:::: - -The full list of available options is available from the currently installed server by using the `--help` -option in the command line. - -### Configuration file - -You would use the configuration file when you want the server to run with the same set of options every time. -YAML files are better for large installations as you can centrally distribute and manage them, or generate -them from a configuration management system. - -The default configuration file name is `eventstore.conf` and it's located in -- **Linux:** `/etc/eventstore/` -- **Windows:** EventStoreDB installation directory - -The configuration file has YAML-compatible format. The basic format of the YAML configuration file is as -follows: - -```yaml:no-line-numbers ---- -Db: "/volumes/data" -Log: "/esdb/logs" -``` - -::: tip -You need to use the three dashes and spacing in your YAML file. -::: - -The default configuration file name is `eventstore.conf`. It is located in `/etc/eventstore/` on Linux and the -server installation directory on Windows. You can either change this file or create another file and instruct -EventStoreDB to use it. - -To tell the EventStoreDB server to use a different configuration file, you pass the file path on the command -line with `--config=filename`, or use the `CONFIG` -environment variable. - -### Environment variables - -You can also set all arguments with environment variables. All variables are prefixed with `EVENTSTORE_` and -normally follow the pattern `EVENTSTORE_{option}`. For example, setting the `EVENTSTORE_LOG` -variable would instruct the server to use a custom location for log files. - -Environment variables override all the options specified in configuration files. - -### Command line - -You can also override options from both configuration files and environment variables using the command line. - -For example, starting EventStoreDB with the `--log` option will override the default log files location: - -:::: code-group -::: code-group-item Linux -```bash:no-line-numbers -eventstored --log /tmp/eventstore/logs -``` -::: -::: code-group-item Windows -```bash:no-line-numbers -EventStore.ClusterNode.exe --log C:\Temp\EventStore\Logs -``` -::: -:::: - -### Testing the configuration - -If more than one method is used to configure the server, it might be hard to find out what the effective -configuration will be when the server starts. To help to find out just that, you can use the `--what-if` -option. - -When you run EventStoreDB with this option, it will print out the effective configuration applied from all -available sources (default and custom configuration file, environment variables and command line parameters) -and print it out to the console. - -::: details Click here to see a WhatIf example - -``` -$ eventstored --what-if -[68443, 1,17:38:39.178,INF] -[68443, 1,17:38:39.186,INF] "OS ARCHITECTURE:" X64 -[68443, 1,17:38:39.207,INF] "OS:" Linux ("Unix 5.19.0.1025") -[68443, 1,17:38:39.210,INF] "RUNTIME:" ".NET 6.0.21/e40b3abf1" (64-bit) -[68443, 1,17:38:39.211,INF] "GC:" "3 GENERATIONS" "IsServerGC: False" "Latency Mode: Interactive" -[68443, 1,17:38:39.212,INF] "LOGS:" "/var/log/eventstore" -[68443, 1,17:38:39.251,INF] -MODIFIED OPTIONS: - Application Options: - WHAT IF: true (Command Line) - - -DEFAULT OPTIONS: - Application Options: - ALLOW ANONYMOUS ENDPOINT ACCESS: True () - ALLOW ANONYMOUS STREAM ACCESS: True () - ALLOW UNKNOWN OPTIONS: False () - CONFIG: /etc/eventstore/eventstore.conf () - DISABLE HTTP CACHING: False () - HELP: False () - LOG FAILED AUTHENTICATION ATTEMPTS: False () - LOG HTTP REQUESTS: False () - MAX APPEND SIZE: 1048576 () - SKIP INDEX SCAN ON READS: False () - STATS PERIOD SEC: 30 () - VERSION: False () - WORKER THREADS: 0 () - - Authentication/Authorization Options: - AUTHENTICATION CONFIG: () - AUTHENTICATION TYPE: internal () - AUTHORIZATION CONFIG: () - AUTHORIZATION TYPE: internal () - DISABLE FIRST LEVEL HTTP AUTHORIZATION: False () - - Certificate Options: - CERTIFICATE RESERVED NODE COMMON NAME: eventstoredb-node () - TRUSTED ROOT CERTIFICATES PATH: /etc/ssl/certs () - - Certificate Options (from file): - CERTIFICATE FILE: () - CERTIFICATE PASSWORD: () - CERTIFICATE PRIVATE KEY FILE: () - CERTIFICATE PRIVATE KEY PASSWORD: () - - Certificate Options (from store): - CERTIFICATE STORE LOCATION: () - CERTIFICATE STORE NAME: () - CERTIFICATE SUBJECT NAME: () - CERTIFICATE THUMBPRINT: () - TRUSTED ROOT CERTIFICATE STORE LOCATION: () - TRUSTED ROOT CERTIFICATE STORE NAME: () - TRUSTED ROOT CERTIFICATE SUBJECT NAME: () - TRUSTED ROOT CERTIFICATE THUMBPRINT: () - - Cluster Options: - CLUSTER DNS: fake.dns () - CLUSTER GOSSIP PORT: 2113 () - CLUSTER SIZE: 1 () - DEAD MEMBER REMOVAL PERIOD SEC: 1800 () - DISCOVER VIA DNS: True () - GOSSIP ALLOWED DIFFERENCE MS: 60000 () - GOSSIP INTERVAL MS: 2000 () - GOSSIP SEED: () - GOSSIP TIMEOUT MS: 2500 () - LEADER ELECTION TIMEOUT MS: 1000 () - NODE PRIORITY: 0 () - QUORUM SIZE: 1 () - READ ONLY REPLICA: False () - STREAM INFO CACHE CAPACITY: 0 () - UNSAFE ALLOW SURPLUS NODES: False () - - Database Options: - ALWAYS KEEP SCAVENGED: False () - CACHED CHUNKS: -1 () - CHUNK INITIAL READER COUNT: 5 () - CHUNK SIZE: 268435456 () - CHUNKS CACHE SIZE: 536871424 () - COMMIT TIMEOUT MS: 2000 () - DB: /var/lib/eventstore () - DB LOG FORMAT: V2 () - DISABLE SCAVENGE MERGING: False () - HASH COLLISION READ LIMIT: 100 () - INDEX: () - INDEX CACHE DEPTH: 16 () - INDEX CACHE SIZE: 0 () - INITIALIZATION THREADS: 1 () - MAX AUTO MERGE INDEX LEVEL: 2147483647 () - MAX MEM TABLE SIZE: 1000000 () - MAX TRUNCATION: 268435456 () - MEM DB: False () - MIN FLUSH DELAY MS: 2 () - OPTIMIZE INDEX MERGE: False () - PREPARE TIMEOUT MS: 2000 () - READER THREADS COUNT: 0 () - REDUCE FILE CACHE PRESSURE: False () - SCAVENGE BACKEND CACHE SIZE: 67108864 () - SCAVENGE BACKEND PAGE SIZE: 16384 () - SCAVENGE HASH USERS CACHE CAPACITY: 100000 () - SCAVENGE HISTORY MAX AGE: 30 () - SKIP DB VERIFY: False () - SKIP INDEX VERIFY: False () - STATS STORAGE: File () - STREAM EXISTENCE FILTER SIZE: 256000000 () - UNBUFFERED: False () - UNSAFE DISABLE FLUSH TO DISK: False () - UNSAFE IGNORE HARD DELETE: False () - USE INDEX BLOOM FILTERS: True () - WRITE STATS TO DB: False () - WRITE THROUGH: False () - WRITE TIMEOUT MS: 2000 () - - Default User Options: - DEFAULT ADMIN PASSWORD: **** () - DEFAULT OPS PASSWORD: **** () - - Dev mode Options: - DEV: False () - REMOVE DEV CERTS: False () - - gRPC Options: - KEEP ALIVE INTERVAL: 10000 () - KEEP ALIVE TIMEOUT: 10000 () - - Interface Options: - ADVERTISE HOST TO CLIENT AS: () - ADVERTISE HTTP PORT TO CLIENT AS: 0 () - ADVERTISE NODE PORT TO CLIENT AS: 0 () - CONNECTION PENDING SEND BYTES THRESHOLD: 10485760 () - CONNECTION QUEUE SIZE THRESHOLD: 50000 () - DISABLE ADMIN UI: False () - DISABLE GOSSIP ON HTTP: False () - DISABLE INTERNAL TCP TLS: False () - DISABLE STATS ON HTTP: False () - ENABLE ATOM PUB OVER HTTP: False () - ENABLE TRUSTED AUTH: False () - ENABLE UNIX SOCKET: False () - EXT HOST ADVERTISE AS: () - EXT IP: 127.0.0.1 () - GOSSIP ON SINGLE NODE: () - HTTP PORT: 2113 () - HTTP PORT ADVERTISE AS: 0 () - INT HOST ADVERTISE AS: () - INT IP: 127.0.0.1 () - INT TCP HEARTBEAT INTERVAL: 700 () - INT TCP HEARTBEAT TIMEOUT: 700 () - INT TCP PORT: 1112 () - INT TCP PORT ADVERTISE AS: 0 () - NODE HEARTBEAT INTERVAL: 2000 () - NODE HEARTBEAT TIMEOUT: 1000 () - NODE HOST ADVERTISE AS: () - NODE IP: 127.0.0.1 () - NODE PORT: 2113 () - NODE PORT ADVERTISE AS: 0 () - REPLICATION HEARTBEAT INTERVAL: 700 () - REPLICATION HEARTBEAT TIMEOUT: 700 () - REPLICATION HOST ADVERTISE AS: () - REPLICATION IP: 127.0.0.1 () - REPLICATION PORT: 1112 () - REPLICATION TCP PORT ADVERTISE AS: 0 () - - Logging Options: - DISABLE LOG FILE: False () - LOG: /var/log/eventstore () - LOG CONFIG: logconfig.json () - LOG CONSOLE FORMAT: Plain () - LOG FILE INTERVAL: Day () - LOG FILE RETENTION COUNT: 31 () - LOG FILE SIZE: 1073741824 () - LOG LEVEL: Default () - - Projection Options: - FAULT OUT OF ORDER PROJECTIONS: False () - PROJECTION COMPILATION TIMEOUT: 500 () - PROJECTION EXECUTION TIMEOUT: 250 () - PROJECTION THREADS: 3 () - PROJECTIONS QUERY EXPIRY: 5 () - RUN PROJECTIONS: None () - START STANDARD PROJECTIONS: False () - -``` -::: - -::: note -Version 21.6 introduced a stricter configuration check: the server will _not start_ when an unknown -configuration options is passed in either the configuration file, environment variable or command line. - -E.g: the following will prevent the server from starting: - -* `--UnknownConfig` on the command line -* `EVENTSTORE_UnknownConfig` through environment variable -* `UnknownConfig: value` in the config file - -And will output on `stdout` -only: _Error while parsing options: The option UnknownConfig is not a known option. (Parameter 'UnknownConfig')_. -::: - -## Autoconfigured options - -Some options are configured at startup to make better use of the available resources on larger instances or -machines. - -These options are `StreamInfoCacheCapacity`, `ReaderThreadsCount`, and `WorkerThreads`. - -Autoconfiguration does not apply in containerized environments. - -### StreamInfoCacheCapacity - -This option sets the maximum number of entries to keep in the stream info cache. This is the lookup that -contains the information of any stream that has recently been read or written to. Having entries in this cache -significantly improves write and read performance to cached streams on larger databases. - -By default, the cache dynamically resizes according to the amount of free memory. The minimum that it can be set to is 100,000 entries. - -| Format | Syntax | -|:---------------------|:----------------------------------------| -| Command line | `--stream-info-cache-capacity` | -| YAML | `StreamInfoCacheCapacity` | -| Environment variable | `EVENTSTORE_STREAM_INFO_CACHE_CAPACITY` | - -The option is set to 0 by default, which enables dynamic resizing. The default on previous versions of -EventStoreDb was 100,000 entries. - -::: note -The default value of 0 for `StreamInfoCacheCapacity` might not always be the best value for optimal performance. Ideally, it should be set to double the number of streams in the anticipated working set. - -The total number of streams can be obtained by checking the event count in the `$streams` system stream. This stream is created by the [$streams system projection](projections.md#streams-projection). - -It should be noted that the total number of streams does not necessarily give you the anticipated working set. The working set of streams is the set of streams that you intend on actively reading, writing, and/or subscribing to. This can be much lower than the total number of streams in certain cases, especially in systems that have many short-lived streams. -::: - -### ReaderThreadsCount - -This option configures the number of reader threads available to EventStoreDb. Having more reader threads -allows more concurrent reads to be processed. - -The reader threads count will be set at startup to twice the number of available processors, with a minimum of -4 and a maximum of 16 threads. - -| Format | Syntax | -|:---------------------|:----------------------------------| -| Command line | `--reader-threads-count` | -| YAML | `ReaderThreadsCount` | -| Environment variable | `EVENTSTORE_READER_THREADS_COUNT` | - -The option is set to 0 by default, which enables autoconfiguration. The default on previous versions of -EventStoreDb was 4 threads. - -::: warning -Increasing the reader threads count too high can cause read timeouts if your disk cannot handle the -increased load. -::: - -### WorkerThreads - -The `WorkerThreads` option configures the number of threads available to the pool of worker services. - -At startup the number of worker threads will be set to 10 if there are more than 4 reader threads. Otherwise, -it will be set to have 5 threads available. - -| Format | Syntax | -|:---------------------|:----------------------------| -| Command line | `--worker-threads` | -| YAML | `WorkerThreads` | -| Environment variable | `EVENTSTORE_WORKER_THREADS` | - -The option is set to 0 by default, which enables autoconfiguration. The default on previous versions of -EventStoreDb was 5 threads. - -## Plugins configuration - -The commercial edition of EventStoreDB ships with several plugins that augment the behavior of the open source server. Each plugin is documented in relevant sections. For example, under Security, you will find the User Certificates plugin documentation. - -The plugins (apart from the `ldap` plugin) are configured separately to the main server configuration and can be configured via `json` files and `environment variables`. - -Environment variables take precedence. - -#### JSON files - -Each configurable plugin comes with a sample JSON file in the `/plugins/` directory. -Here is an example file called `user-certificates-plugin-example.json` to enable the user certificates plugin. - -```json -{ - "EventStore": { - "Plugins": { - "UserCertificates": { - "Enabled": true - } - } - } -} -``` - -The system looks for JSON configuration files in a `/config/` directory, and on Linux and OS X, the server additionally looks in `/etc/eventstore/config/`. To take effect, JSON configuration must be saved to one of these locations. - -The JSON configuration may be split across multiple files or combined into a single file. Apart from the `.json` extension, the names of the files is not important. - -#### Environment variables - -Any configuration can alternatively be provided via environment variables. - -Use `__` as the level delimiter. - -For example, the key configured above in json can also be set with the following environment variable: - -```:no-line-numbers -EventStore__Plugins__UserCertificates__Enabled -``` diff --git a/docs/db-config.md b/docs/db-config.md deleted file mode 100644 index 11ff3274dc3..00000000000 --- a/docs/db-config.md +++ /dev/null @@ -1,171 +0,0 @@ ---- -title: Database ---- - -## Database settings - -On this page, you find settings that tune the database server behaviour. Only modify these settings if you know what you are doing or when requested by Event Store support personnel. - -### Database location - -EventStoreDB has a single database, which is spread across ever-growing number of physical files on the file system. Those files are called chunks and new data is always appended to the end of the latest chunk. When the chunk grows over 256 MiB, the server closes the chunk and opens a new one. - -Normally, you'd want to keep the database files separated from the OS and other application files. The `Db` setting tells EventStoreDB where to put those chunk files. If the database server doesn't find anything at the specified location, it will create a new database. - -| Format | Syntax | -|:---------------------|:----------------| -| Command line | `--db` | -| YAML | `Db` | -| Environment variable | `EVENTSTORE_DB` | - -**Default**: the default database location is platform specific. On Windows, the database will be stored in the `data` directory inside the EventStoreDB installation location. On Linux, it will be `/var/lib/eventstore`. - -### In-memory database - -When running EventStoreDB for educational purposes or in an automated test environment, you might want to prevent it from saving any data to the disk. EventStoreDB can keep the data in memory as soon as it has enough available RAM. When you shut down the instance that uses in-memory database, all the data will be lost. - -| Format | Syntax | -|:---------------------|:--------------------| -| Command line | `--mem-db` | -| YAML | `MemDb` | -| Environment variable | `EVENTSTORE_MEM_DB` | - -**Default**: `false` - -### Skip database verification - -When the database node restarts, it checks the database files to ensure they aren't corrupted. It is a lengthy process and can take hours on a large database. EventStoreDB normally flushes every write to disk, so database files are unlikely to get corrupted. In an environment where nodes restart often for some reason, you might want to disable the database verification to allow faster startup of the node. - -| Format | Syntax | -|:---------------------|:----------------------------| -| Command line | `--skip-db-verify` | -| YAML | `SkipDbVerify` | -| Environment variable | `EVENTSTORE_SKIP_DB_VERIFY` | - -**Default**: `false` - -### Chunk cache - -You can increase the number of chunk files that are cached if you have free memory on the nodes. - -The [stats response](diagnostics/README.md#statistics) contains two fields: `es-readIndex-cachedRecord` and `es-readIndex-notCachedRecord`. Statistic values for those fields tell you how many times a chunk file was retrieved from the cache and from the disk. We expect the two most recent chunks (the current chunk and the previous one) to be most frequently accessed and therefore cached. - -If you observe that the `es-readIndex-notCachedRecord` stats value gets significantly higher than the `es-readIndex-cachedRecord`, you can try adjusting the chunk cache. - -The setting `ChunkCacheSize` tells the server how much memory it can use to cache chunks (in bytes). The chunk size is 256 MiB max, so the default cache size (two chunks) is 0.5 GiB. To increase the cache size to four chunks, you can set the value to 1 GiB (`1073741824` bytes). - -Remember that only the latest chunks get cached. Also consider that the OS has its own file cache and increasing the chunk cache might not bring the desired performance benefit. - -| Format | Syntax | -|:---------------------|:-------------------------------| -| Command line | `--chunks-cache-size` | -| YAML | `ChunksCacheSize ` | -| Environment variable | `EVENTSTORE_CHUNKS_CACHE_SIZE` | - -**Default**: `536871424` - -You would also need to adjust the `CachedChunks` setting to tell the server how many chunk files you want to keep in cache. For example, to double the number of cached chunks, set the `CachedChunks` setting to `4`. - -| Format | Syntax | -|:---------------------|:---------------------------| -| Command line | `--cached-chunks` | -| YAML | `CachedChunks ` | -| Environment variable | `EVENTSTORE_CACHED_CHUNKS` | - -**Default**: `-1` (all) - -### Prepare and Commit timeouts - -Having prepare and commit timeouts of 2000 ms (default) means that any write done on the cluster may take up to 2000 ms before the server replies to the client that this write has timed out. - -Depending on your client operation timeout settings (default is 7 seconds), increasing the timeout may block a client for up to the minimum of those two timeouts which may reduce the throughput if set to a large value. The server also needs to keep track of all these writes and retries in memory until the time out is over. If a large number of them accumulate, it may slow things down. - -| Format | Syntax | -|:---------------------|:--------------------------------| -| Command line | `--prepare-timeout-ms` | -| YAML | `PrepareTimeoutMs` | -| Environment variable | `EVENTSTORE_PREPARE_TIMEOUT_MS` | - -**Default**: `2000` (in milliseconds) - -| Format | Syntax | -|:---------------------|:-------------------------------| -| Command line | `--commit-timeout-ms` | -| YAML | `CommitTimeoutMs` | -| Environment variable | `EVENTSTORE_COMMIT_TIMEOUT_MS` | - -**Default**: `2000` (in milliseconds) - -### Disable flush to disk - -:::warning -Using this option might cause data loss. -::: - -This will prevent EventStoreDB from forcing the flush to disk after writes. Please note that this is unsafe in case of a power outage. - -With this option enabled, EventStoreDB will still write data to the disk at the application level but not necessarily at the OS level. Usually, the OS should flush its buffers at regular intervals or when a process exits but it is something that's opaque to EventStoreDB. - -| Format | Syntax | -|:---------------------|:------------------------------------------| -| Command line | `--unsafe-disable-flush-to-disk` | -| YAML | `UnsafeDisableFlushToDisk` | -| Environment variable | `EVENTSTORE_UNSAFE_DISABLE_FLUSH_TO_DISK` | - -**Default**: `false` - -### Minimum flush delay - -The minimum flush delay in milliseconds. - - - -| Format | Syntax | -|:---------------------|:---------------------------------| -| Command line | `--min-flush-delay-ms` | -| YAML | `MinFlushDelayMs` | -| Environment variable | `EVENTSTORE_MIN_FLUSH_DELAY_MS ` | - -**Default**: `2` (ms) - -## Threading - -### Worker threads - -A variety of undifferentiated work is carried out on general purpose worker threads, including sending over a network, authentication, and completion of HTTP requests. - -Increasing the number of threads beyond that necessary to satisfy the workload generated by other components in the system may result in additional unnecessary context switching overhead. - -| Format | Syntax | -|:---------------------|:----------------------------| -| Command line | `--worker-threads` | -| YAML | `WorkerThreads` | -| Environment variable | `EVENTSTORE_WORKER_THREADS` | - -**Default**: `5` - -### Reader threads count - -Reader threads are used for all read operations on data files - whether the requests originate from the client or internal requests to the database. There are a number of things that cause operations to be dispatched to reader threads, including: - -- All index reads, including those to service stream and event number-based reads and for idempotent handling of write operations when an expected version is specified. -- Data to service requests originating from either the HTTP or client APIs. -- Projections needing to read data for processing. -- Authorization needing to read access control lists from stream metadata. - -| Format | Syntax | -|:---------------------|:----------------------------------| -| Command line | `--reader-threads-count` | -| YAML | `ReaderThreadsCount` | -| Environment variable | `EVENTSTORE_READER_THREADS_COUNT` | - -**Default**: `4` - -Reads are queued until a reader thread becomes available to service them. If an operation doesn't complete within an internal deadline window, a disk operation is not dispatched by the worker thread which processes the operation. - -The size of read operations is dependent on the size of the events appended, not on the database chunk size, which has a fixed logical size. Larger reads (subject to the operating system page size) result in more time being spent in system calls and less availability of reader threads. - -A higher reader count can be useful, if disks are able to support more concurrent operations. Context switching incurs additional costs in terms of performance. If disks are already saturated, adding more reader threads can exacerbate that issue and lead to more failed requests. - -Increasing the count of reader threads can improve performance up to a point, but it is likely to rapidly tail off once that limit is reached. - diff --git a/docs/diagnostics/README.md b/docs/diagnostics/README.md deleted file mode 100644 index 55861dda6e1..00000000000 --- a/docs/diagnostics/README.md +++ /dev/null @@ -1,365 +0,0 @@ -# Diagnostics - -EventStoreDB provides several ways to diagnose and troubleshoot issues. - -- [Logging](logs.md): structured or plain-text logs on the console and in log files. -- [Metrics](metrics.md): collect standard metrics using Prometheus or OpenTelemetry. -- [Stats](#statistics): stats collection and HTTP endpoint. - -You can also use external tools to measure the performance of EventStoreDB and monitor the cluster health. Learn more on the [Integrations](./integrations.md) page. - -## Statistics - -EventStoreDB servers collect internal statistics and make it available via HTTP over -the `https://:2113/stats` in JSON format. Here, `2113` is the default HTTP port. Monitoring applications -and metric collectors can use this endpoint to gather the information about the cluster node. The `stats` -endpoint only exposes information about the node where you fetch it from and doesn't contain any cluster -information. - -What you see in the `stats` endpoint response is the last collected state of the server. The server collects -this information using events that are appended to the statistics stream. Each node has one. We use a reserved -name for the stats stream, `$stats-`. For example, for a single node running locally the stream -name would be `$stats-127.0.0.1:2113`. - -As all other events, stats events are also linked in the `$all` stream. These events have a reserved event -type `$statsCollected`. - -::: details Click here to see an example of a stats event - -```json -{ - "proc-startTime": "2020-06-25T10:13:26.8281750Z", - "proc-id": 5465, - "proc-mem": 118648832, - "proc-cpu": 2.44386363, - "proc-cpuScaled": 0.152741477, - "proc-threadsCount": 10, - "proc-contentionsRate": 0.9012223, - "proc-thrownExceptionsRate": 0.0, - "sys-cpu": 100.0, - "sys-freeMem": 25100288, - "proc-gc-allocationSpeed": 0.0, - "proc-gc-gen0ItemsCount": 8, - "proc-gc-gen0Size": 0, - "proc-gc-gen1ItemsCount": 2, - "proc-gc-gen1Size": 0, - "proc-gc-gen2ItemsCount": 0, - "proc-gc-gen2Size": 0, - "proc-gc-largeHeapSize": 0, - "proc-gc-timeInGc": 0.0, - "proc-gc-totalBytesInHeaps": 0, - "proc-tcp-connections": 0, - "proc-tcp-receivingSpeed": 0.0, - "proc-tcp-sendingSpeed": 0.0, - "proc-tcp-inSend": 0, - "proc-tcp-measureTime": "00:00:19.0534210", - "proc-tcp-pendingReceived": 0, - "proc-tcp-pendingSend": 0, - "proc-tcp-receivedBytesSinceLastRun": 0, - "proc-tcp-receivedBytesTotal": 0, - "proc-tcp-sentBytesSinceLastRun": 0, - "proc-tcp-sentBytesTotal": 0, - "es-checksum": 1613144, - "es-checksumNonFlushed": 1613144, - "sys-drive-/System/Volumes/Data-availableBytes": 545628151808, - "sys-drive-/System/Volumes/Data-totalBytes": 2000481927168, - "sys-drive-/System/Volumes/Data-usage": "72%", - "sys-drive-/System/Volumes/Data-usedBytes": 1454853775360, - "es-queue-Index Committer-queueName": "Index Committer", - "es-queue-Index Committer-groupName": "", - "es-queue-Index Committer-avgItemsPerSecond": 0, - "es-queue-Index Committer-avgProcessingTime": 0.0, - "es-queue-Index Committer-currentIdleTime": "0:00:00:29.9895180", - "es-queue-Index Committer-currentItemProcessingTime": null, - "es-queue-Index Committer-idleTimePercent": 100.0, - "es-queue-Index Committer-length": 0, - "es-queue-Index Committer-lengthCurrentTryPeak": 0, - "es-queue-Index Committer-lengthLifetimePeak": 0, - "es-queue-Index Committer-totalItemsProcessed": 0, - "es-queue-Index Committer-inProgressMessage": "", - "es-queue-Index Committer-lastProcessedMessage": "", - "es-queue-MainQueue-queueName": "MainQueue", - "es-queue-MainQueue-groupName": "", - "es-queue-MainQueue-avgItemsPerSecond": 14, - "es-queue-MainQueue-avgProcessingTime": 0.0093527972027972021, - "es-queue-MainQueue-currentIdleTime": "0:00:00:00.8050567", - "es-queue-MainQueue-currentItemProcessingTime": null, - "es-queue-MainQueue-idleTimePercent": 99.986616840364917, - "es-queue-MainQueue-length": 0, - "es-queue-MainQueue-lengthCurrentTryPeak": 3, - "es-queue-MainQueue-lengthLifetimePeak": 6, - "es-queue-MainQueue-totalItemsProcessed": 452, - "es-queue-MainQueue-inProgressMessage": "", - "es-queue-MainQueue-lastProcessedMessage": "Schedule", - "es-queue-MonitoringQueue-queueName": "MonitoringQueue", - "es-queue-MonitoringQueue-groupName": "", - "es-queue-MonitoringQueue-avgItemsPerSecond": 0, - "es-queue-MonitoringQueue-avgProcessingTime": 1.94455, - "es-queue-MonitoringQueue-currentIdleTime": "0:00:00:19.0601186", - "es-queue-MonitoringQueue-currentItemProcessingTime": null, - "es-queue-MonitoringQueue-idleTimePercent": 99.980537727681721, - "es-queue-MonitoringQueue-length": 0, - "es-queue-MonitoringQueue-lengthCurrentTryPeak": 0, - "es-queue-MonitoringQueue-lengthLifetimePeak": 0, - "es-queue-MonitoringQueue-totalItemsProcessed": 14, - "es-queue-MonitoringQueue-inProgressMessage": "", - "es-queue-MonitoringQueue-lastProcessedMessage": "GetFreshTcpConnectionStats", - "es-queue-PersistentSubscriptions-queueName": "PersistentSubscriptions", - "es-queue-PersistentSubscriptions-groupName": "", - "es-queue-PersistentSubscriptions-avgItemsPerSecond": 1, - "es-queue-PersistentSubscriptions-avgProcessingTime": 0.010400000000000001, - "es-queue-PersistentSubscriptions-currentIdleTime": "0:00:00:00.8052015", - "es-queue-PersistentSubscriptions-currentItemProcessingTime": null, - "es-queue-PersistentSubscriptions-idleTimePercent": 99.998954276430226, - "es-queue-PersistentSubscriptions-length": 0, - "es-queue-PersistentSubscriptions-lengthCurrentTryPeak": 0, - "es-queue-PersistentSubscriptions-lengthLifetimePeak": 0, - "es-queue-PersistentSubscriptions-totalItemsProcessed": 32, - "es-queue-PersistentSubscriptions-inProgressMessage": "", - "es-queue-PersistentSubscriptions-lastProcessedMessage": "PersistentSubscriptionTimerTick", - "es-queue-Projection Core #0-queueName": "Projection Core #0", - "es-queue-Projection Core #0-groupName": "Projection Core", - "es-queue-Projection Core #0-avgItemsPerSecond": 0, - "es-queue-Projection Core #0-avgProcessingTime": 0.0, - "es-queue-Projection Core #0-currentIdleTime": "0:00:00:29.9480513", - "es-queue-Projection Core #0-currentItemProcessingTime": null, - "es-queue-Projection Core #0-idleTimePercent": 100.0, - "es-queue-Projection Core #0-length": 0, - "es-queue-Projection Core #0-lengthCurrentTryPeak": 0, - "es-queue-Projection Core #0-lengthLifetimePeak": 0, - "es-queue-Projection Core #0-totalItemsProcessed": 2, - "es-queue-Projection Core #0-inProgressMessage": "", - "es-queue-Projection Core #0-lastProcessedMessage": "SubComponentStarted", - "es-queue-Projections Master-queueName": "Projections Master", - "es-queue-Projections Master-groupName": "", - "es-queue-Projections Master-avgItemsPerSecond": 0, - "es-queue-Projections Master-avgProcessingTime": 0.0, - "es-queue-Projections Master-currentIdleTime": "0:00:00:29.8467445", - "es-queue-Projections Master-currentItemProcessingTime": null, - "es-queue-Projections Master-idleTimePercent": 100.0, - "es-queue-Projections Master-length": 0, - "es-queue-Projections Master-lengthCurrentTryPeak": 0, - "es-queue-Projections Master-lengthLifetimePeak": 3, - "es-queue-Projections Master-totalItemsProcessed": 10, - "es-queue-Projections Master-inProgressMessage": "", - "es-queue-Projections Master-lastProcessedMessage": "RegularTimeout", - "es-queue-Storage Chaser-queueName": "Storage Chaser", - "es-queue-Storage Chaser-groupName": "", - "es-queue-Storage Chaser-avgItemsPerSecond": 94, - "es-queue-Storage Chaser-avgProcessingTime": 0.0043385023898035047, - "es-queue-Storage Chaser-currentIdleTime": "0:00:00:00.0002530", - "es-queue-Storage Chaser-currentItemProcessingTime": null, - "es-queue-Storage Chaser-idleTimePercent": 99.959003031702224, - "es-queue-Storage Chaser-length": 0, - "es-queue-Storage Chaser-lengthCurrentTryPeak": 0, - "es-queue-Storage Chaser-lengthLifetimePeak": 0, - "es-queue-Storage Chaser-totalItemsProcessed": 2835, - "es-queue-Storage Chaser-inProgressMessage": "", - "es-queue-Storage Chaser-lastProcessedMessage": "ChaserCheckpointFlush", - "es-queue-StorageReaderQueue #1-queueName": "StorageReaderQueue #1", - "es-queue-StorageReaderQueue #1-groupName": "StorageReaderQueue", - "es-queue-StorageReaderQueue #1-avgItemsPerSecond": 0, - "es-queue-StorageReaderQueue #1-avgProcessingTime": 0.22461000000000003, - "es-queue-StorageReaderQueue #1-currentIdleTime": "0:00:00:00.9863988", - "es-queue-StorageReaderQueue #1-currentItemProcessingTime": null, - "es-queue-StorageReaderQueue #1-idleTimePercent": 99.988756844383616, - "es-queue-StorageReaderQueue #1-length": 0, - "es-queue-StorageReaderQueue #1-lengthCurrentTryPeak": 0, - "es-queue-StorageReaderQueue #1-lengthLifetimePeak": 0, - "es-queue-StorageReaderQueue #1-totalItemsProcessed": 15, - "es-queue-StorageReaderQueue #1-inProgressMessage": "", - "es-queue-StorageReaderQueue #1-lastProcessedMessage": "ReadStreamEventsBackward", - "es-queue-StorageReaderQueue #2-queueName": "StorageReaderQueue #2", - "es-queue-StorageReaderQueue #2-groupName": "StorageReaderQueue", - "es-queue-StorageReaderQueue #2-avgItemsPerSecond": 0, - "es-queue-StorageReaderQueue #2-avgProcessingTime": 8.83216, - "es-queue-StorageReaderQueue #2-currentIdleTime": "0:00:00:00.8051068", - "es-queue-StorageReaderQueue #2-currentItemProcessingTime": null, - "es-queue-StorageReaderQueue #2-idleTimePercent": 99.557874170777851, - "es-queue-StorageReaderQueue #2-length": 0, - "es-queue-StorageReaderQueue #2-lengthCurrentTryPeak": 0, - "es-queue-StorageReaderQueue #2-lengthLifetimePeak": 0, - "es-queue-StorageReaderQueue #2-totalItemsProcessed": 16, - "es-queue-StorageReaderQueue #2-inProgressMessage": "", - "es-queue-StorageReaderQueue #2-lastProcessedMessage": "ReadStreamEventsForward", - "es-queue-StorageReaderQueue #3-queueName": "StorageReaderQueue #3", - "es-queue-StorageReaderQueue #3-groupName": "StorageReaderQueue", - "es-queue-StorageReaderQueue #3-avgItemsPerSecond": 0, - "es-queue-StorageReaderQueue #3-avgProcessingTime": 6.4189888888888893, - "es-queue-StorageReaderQueue #3-currentIdleTime": "0:00:00:02.8228372", - "es-queue-StorageReaderQueue #3-currentItemProcessingTime": null, - "es-queue-StorageReaderQueue #3-idleTimePercent": 99.710808119472517, - "es-queue-StorageReaderQueue #3-length": 0, - "es-queue-StorageReaderQueue #3-lengthCurrentTryPeak": 0, - "es-queue-StorageReaderQueue #3-lengthLifetimePeak": 0, - "es-queue-StorageReaderQueue #3-totalItemsProcessed": 14, - "es-queue-StorageReaderQueue #3-inProgressMessage": "", - "es-queue-StorageReaderQueue #3-lastProcessedMessage": "ReadStreamEventsForward", - "es-queue-StorageReaderQueue #4-queueName": "StorageReaderQueue #4", - "es-queue-StorageReaderQueue #4-groupName": "StorageReaderQueue", - "es-queue-StorageReaderQueue #4-avgItemsPerSecond": 0, - "es-queue-StorageReaderQueue #4-avgProcessingTime": 0.36447, - "es-queue-StorageReaderQueue #4-currentIdleTime": "0:00:00:01.8144419", - "es-queue-StorageReaderQueue #4-currentItemProcessingTime": null, - "es-queue-StorageReaderQueue #4-idleTimePercent": 99.981747643099709, - "es-queue-StorageReaderQueue #4-length": 0, - "es-queue-StorageReaderQueue #4-lengthCurrentTryPeak": 0, - "es-queue-StorageReaderQueue #4-lengthLifetimePeak": 0, - "es-queue-StorageReaderQueue #4-totalItemsProcessed": 14, - "es-queue-StorageReaderQueue #4-inProgressMessage": "", - "es-queue-StorageReaderQueue #4-lastProcessedMessage": "ReadStreamEventsForward", - "es-queue-StorageWriterQueue-queueName": "StorageWriterQueue", - "es-queue-StorageWriterQueue-groupName": "", - "es-queue-StorageWriterQueue-avgItemsPerSecond": 0, - "es-queue-StorageWriterQueue-avgProcessingTime": 0.0, - "es-queue-StorageWriterQueue-currentIdleTime": "0:00:00:29.9437790", - "es-queue-StorageWriterQueue-currentItemProcessingTime": null, - "es-queue-StorageWriterQueue-idleTimePercent": 100.0, - "es-queue-StorageWriterQueue-length": 0, - "es-queue-StorageWriterQueue-lengthCurrentTryPeak": 0, - "es-queue-StorageWriterQueue-lengthLifetimePeak": 0, - "es-queue-StorageWriterQueue-totalItemsProcessed": 6, - "es-queue-StorageWriterQueue-inProgressMessage": "", - "es-queue-StorageWriterQueue-lastProcessedMessage": "WritePrepares", - "es-queue-Subscriptions-queueName": "Subscriptions", - "es-queue-Subscriptions-groupName": "", - "es-queue-Subscriptions-avgItemsPerSecond": 1, - "es-queue-Subscriptions-avgProcessingTime": 0.057019047619047622, - "es-queue-Subscriptions-currentIdleTime": "0:00:00:00.8153708", - "es-queue-Subscriptions-currentItemProcessingTime": null, - "es-queue-Subscriptions-idleTimePercent": 99.993992971356, - "es-queue-Subscriptions-length": 0, - "es-queue-Subscriptions-lengthCurrentTryPeak": 0, - "es-queue-Subscriptions-lengthLifetimePeak": 0, - "es-queue-Subscriptions-totalItemsProcessed": 31, - "es-queue-Subscriptions-inProgressMessage": "", - "es-queue-Subscriptions-lastProcessedMessage": "CheckPollTimeout", - "es-queue-Timer-queueName": "Timer", - "es-queue-Timer-groupName": "", - "es-queue-Timer-avgItemsPerSecond": 14, - "es-queue-Timer-avgProcessingTime": 0.038568989547038329, - "es-queue-Timer-currentIdleTime": "0:00:00:00.0002752", - "es-queue-Timer-currentItemProcessingTime": null, - "es-queue-Timer-idleTimePercent": 99.94364205726194, - "es-queue-Timer-length": 17, - "es-queue-Timer-lengthCurrentTryPeak": 17, - "es-queue-Timer-lengthLifetimePeak": 17, - "es-queue-Timer-totalItemsProcessed": 419, - "es-queue-Timer-inProgressMessage": "", - "es-queue-Timer-lastProcessedMessage": "ExecuteScheduledTasks", - "es-queue-Worker #1-queueName": "Worker #1", - "es-queue-Worker #1-groupName": "Workers", - "es-queue-Worker #1-avgItemsPerSecond": 2, - "es-queue-Worker #1-avgProcessingTime": 0.076058695652173922, - "es-queue-Worker #1-currentIdleTime": "0:00:00:00.8050943", - "es-queue-Worker #1-currentItemProcessingTime": null, - "es-queue-Worker #1-idleTimePercent": 99.982484504768721, - "es-queue-Worker #1-length": 0, - "es-queue-Worker #1-lengthCurrentTryPeak": 0, - "es-queue-Worker #1-lengthLifetimePeak": 0, - "es-queue-Worker #1-totalItemsProcessed": 73, - "es-queue-Worker #1-inProgressMessage": "", - "es-queue-Worker #1-lastProcessedMessage": "ReadStreamEventsForwardCompleted", - "es-queue-Worker #2-queueName": "Worker #2", - "es-queue-Worker #2-groupName": "Workers", - "es-queue-Worker #2-avgItemsPerSecond": 2, - "es-queue-Worker #2-avgProcessingTime": 0.19399347826086957, - "es-queue-Worker #2-currentIdleTime": "0:00:00:00.8356863", - "es-queue-Worker #2-currentItemProcessingTime": null, - "es-queue-Worker #2-idleTimePercent": 99.955350254886739, - "es-queue-Worker #2-length": 0, - "es-queue-Worker #2-lengthCurrentTryPeak": 0, - "es-queue-Worker #2-lengthLifetimePeak": 0, - "es-queue-Worker #2-totalItemsProcessed": 69, - "es-queue-Worker #2-inProgressMessage": "", - "es-queue-Worker #2-lastProcessedMessage": "PurgeTimedOutRequests", - "es-queue-Worker #3-queueName": "Worker #3", - "es-queue-Worker #3-groupName": "Workers", - "es-queue-Worker #3-avgItemsPerSecond": 2, - "es-queue-Worker #3-avgProcessingTime": 0.068475555555555567, - "es-queue-Worker #3-currentIdleTime": "0:00:00:00.8356754", - "es-queue-Worker #3-currentItemProcessingTime": null, - "es-queue-Worker #3-idleTimePercent": 99.984583460721979, - "es-queue-Worker #3-length": 0, - "es-queue-Worker #3-lengthCurrentTryPeak": 0, - "es-queue-Worker #3-lengthLifetimePeak": 0, - "es-queue-Worker #3-totalItemsProcessed": 68, - "es-queue-Worker #3-inProgressMessage": "", - "es-queue-Worker #3-lastProcessedMessage": "PurgeTimedOutRequests", - "es-queue-Worker #4-queueName": "Worker #4", - "es-queue-Worker #4-groupName": "Workers", - "es-queue-Worker #4-avgItemsPerSecond": 2, - "es-queue-Worker #4-avgProcessingTime": 0.040221428571428575, - "es-queue-Worker #4-currentIdleTime": "0:00:00:00.8356870", - "es-queue-Worker #4-currentItemProcessingTime": null, - "es-queue-Worker #4-idleTimePercent": 99.99154911144629, - "es-queue-Worker #4-length": 0, - "es-queue-Worker #4-lengthCurrentTryPeak": 0, - "es-queue-Worker #4-lengthLifetimePeak": 0, - "es-queue-Worker #4-totalItemsProcessed": 65, - "es-queue-Worker #4-inProgressMessage": "", - "es-queue-Worker #4-lastProcessedMessage": "PurgeTimedOutRequests", - "es-queue-Worker #5-queueName": "Worker #5", - "es-queue-Worker #5-groupName": "Workers", - "es-queue-Worker #5-avgItemsPerSecond": 2, - "es-queue-Worker #5-avgProcessingTime": 0.17759268292682928, - "es-queue-Worker #5-currentIdleTime": "0:00:00:00.8052165", - "es-queue-Worker #5-currentItemProcessingTime": null, - "es-queue-Worker #5-idleTimePercent": 99.9635548548067, - "es-queue-Worker #5-length": 0, - "es-queue-Worker #5-lengthCurrentTryPeak": 0, - "es-queue-Worker #5-lengthLifetimePeak": 0, - "es-queue-Worker #5-totalItemsProcessed": 70, - "es-queue-Worker #5-inProgressMessage": "", - "es-queue-Worker #5-lastProcessedMessage": "IODispatcherDelayedMessage", - "es-writer-lastFlushSize": 0, - "es-writer-lastFlushDelayMs": 0.0134, - "es-writer-meanFlushSize": 0, - "es-writer-meanFlushDelayMs": 0.0134, - "es-writer-maxFlushSize": 0, - "es-writer-maxFlushDelayMs": 0.0134, - "es-writer-queuedFlushMessages": 0, - "es-readIndex-cachedRecord": 676, - "es-readIndex-notCachedRecord": 0, - "es-readIndex-cachedStreamInfo": 171, - "es-readIndex-notCachedStreamInfo": 32, - "es-readIndex-cachedTransInfo": 0, - "es-readIndex-notCachedTransInfo": 0 -} -``` - -::: - -Stats stream has the max time-to-live set to 24 hours, so all the events that are older than 24 hours will be -deleted. - -### Stats period - -Using this setting you can control how often stats events are generated. By default, the node will produce one -event in 30 seconds. If you want to decrease network pressure on subscribers to the `$all` stream, you can -tell EventStoreDB to produce stats less often. - -| Format | Syntax | -| :------------------- | :---------------------------- | -| Command line | `--stats-period-sec` | -| YAML | `StatsPeriodSec` | -| Environment variable | `EVENTSTORE_STATS_PERIOD_SEC` | - -**Default**: `30` - -### Write stats to database - -As mentioned before, stats events are quite large and whilst it is sometimes beneficial to keep the stats -history, it is most of the time not necessary. Therefore, we do not write stats events to the database by -default. When this option is set to `true`, all the stats events will be persisted. - -As mentioned before, stats events have a TTL of 24 hours and when writing stats to the database is enabled, -you'd need to scavenge more often to release the disk space. - -| Format | Syntax | -| :------------------- | :----------------------------- | -| Command line | `--write-stats-to-db` | -| YAML | `WriteStatsToDb` | -| Environment variable | `EVENTSTORE_WRITE_STATS_TO_DB` | - -**Default**: `false` diff --git a/docs/diagnostics/integrations.md b/docs/diagnostics/integrations.md deleted file mode 100644 index 2cc1a4d4fdc..00000000000 --- a/docs/diagnostics/integrations.md +++ /dev/null @@ -1,371 +0,0 @@ ---- -title: "Integrations" ---- - -# Monitoring integrations - -EventStoreDB supports several methods to integrate with external monitoring and observability tools. Those include: - -- [OpenTelemetry](#opentelemetry-exporter): export metrics to an OpenTelemetry-compatible endpoint -- [Prometheus](#prometheus): collect metrics in Prometheus -- [Datadog](#datadog): monitor and measure the cluster with Datadog -- [ElasticSearch](#elasticsearch): this section describes how to collect EventStoreDB logs in ElasticSearch -- [Vector](#vector): collect metrics and logs to your APM tool using Vector - -## Prometheus - -You can collect EventStoreDB metrics to Prometheus and configure Grafana dashboards to monitor your deployment. -Event Store provides Prometheus support out of the box since version 23.6. Refer to [metrics](metrics.md) documentation to learn more. - -Older versions can be monitored by Prometheus using the community-supported exporter available in the [GitHub repository](https://github.com/marcinbudny/eventstore_exporter). - -## OpenTelemetry Exporter - - - -EventStoreDB passively exposes metrics for scraping on the `/metrics` endpoint. If you would like EventStoreDB to actively export the metrics, the _OpenTelemetry Exporter Plugin_ can be used. - -The OpenTelemetry Exporter plugin allows you to export EventStoreDB metrics to a specified endpoint using the [OpenTelemetry Protocol](https://opentelemetry.io/docs/specs/otel/protocol/) (OTLP). The following instructions will help you set up the exporter and customize its configuration, so you can receive, process, export and monitor metrics as needed. - -A number of APM providers natively support ingesting metrics using the OTLP protocol, so you might be able to directly use the OpenTelemetry Exporter to send metrics to your APM provider. Alternatively, you can export metrics to the OpenTelemetry Collector, which can then be configured to send metrics to a variety of backends. You can find out more about the [OpenTelemetry collector](https://opentelemetry.io/docs/collector/). - -### Configuration - -Refer to the general [plugins configuration](../configuration.md#plugins-configuration) guide to see how to configure plugins with JSON files and environment variables. - -Sample JSON configuration: -```json -{ - "OpenTelemetry": { - "Otlp": { - "Endpoint": "http://localhost:4317", - "Headers": "" - } - } -} -``` - -The configuration can specify: - -| Name | Description | -|-------------------------------|--------------------------------------------------------| -| OpenTelemetry__Otlp__Endpoint | Destination where the OTLP exporter will send the data | -| OpenTelemetry__Otlp__Headers | Optional headers for the connection | - -Headers are key-value pairs separated by commas. For example: -```:no-line-numbers -"Headers": "api-key=value,other-config-value=value" -``` - -EventStoreDB will log a message on startup confirming the metrics export to your specified endpoint: -```:no-line-numbers -OtlpExporter: Exporting metrics to http://localhost:4317/ every 15.0 seconds -``` - -The interval is taken from the `ExpectedScrapeIntervalSeconds` value in `metricsconfig.json` in the server installation directory: - -```:no-line-numbers -"ExpectedScrapeIntervalSeconds": 15 -``` - -### Troubleshooting - -| Symptom | Solution | -|------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| The OpenTelemetry Exporter plugin is not loaded | The OpenTelemetry Exporter plugin is only available in commercial editions. Check that it is present in `/plugins`.

If it is present, on startup the server will log a message similar to: `Loaded SubsystemsPlugin plugin: "otlp-exporter" "24.6.0.0".` | -| EventStoreDB logs a message on startup that it cannot find the configuration | The server logs a message: `OtlpExporter: No OpenTelemetry:Otlp configuration found. Not exporting metrics.`.

Check the configuration steps above. | - -## Datadog - -The best way to integrate EventStoreDB metrics with Datadog today is by using the [OpenTelemetry exporter]() built-in to the commercial version of the database. We currently don't support exporting logs via the exporter. - -You can use the community-supported integration to collect EventStoreDB logs and metrics in Datadog. - -Find out more details about the integration -in [Datadog documentation](https://docs.datadoghq.com/integrations/eventstore/). - -## Vector - -> Vector is a lightweight and ultra-fast tool for building observability pipelines. -> (from Vector website) - -You can use [Vector] for extracting metrics or logs from your self-managed EventStore server. - -It's also possible to collect metrics from the Event Store Cloud managed cluster or instance, as long as the -Vector agent is running on a machine that has a direct connection to the EventStoreDB server. You cannot, -however, fetch logs from Event Store Cloud using your own Vector agent. - -### Installation - -Follow the [installation instructions](https://vector.dev/docs/setup/installation/) provided by Vector to -deploy the agent. You can deploy and run it on the same machine where you run EventStoreDB server. If you run -EventStoreDB in Kubernetes, you can run Vector as a sidecar for each of the EventStoreDB pods. - -### Configuration - -Each Vector instance needs to be configured with sources and sinks. When configured properly, it will collect -information from each source, apply the necessary transformation (if needed), and send the transformed -information to the configured sink. - -[Vector] provides [many different sinks], you most probably will find your preferred monitoring platform among -those sinks. - -#### Collecting metrics - -There is an official [EventStoreDB source] that you can use to pull relevant metrics from your database. - -Below you can find an example that you can use in your `vector.toml` configuration file: - -```toml -[sources.eventstoredb_metrics] -type = "eventstoredb_metrics" -endpoint = "https://{hostname}:{http_port}/stats" -scrape_interval_secs = 3 -``` - -Here `hostname` is the EventStoreDB node hostname or the cluster DNS name, and `http_port` is the configured -HTTP port, which is `2113` by default. - -#### Collecting logs - -To collect logs, you can use the [file source] and configure it to target EventStoreDB log file. For log -collection, Vector must run on the same machine as EventStoreDB server as it collects the logs from files on -the local file system. - -```toml -[sources.eventstoredb_logs] -type = "file" -# If you changed the default log location, please update the filepath accordingly. -include = ["/var/log/eventstore"] -read_from = "end" -``` - -#### Example - -In this example, Vector runs on the same machine as EventStoreDB, collects metrics and logs, and then sends them to Datadog. Notice that despite the EventStoreDB HTTP is, in theory, accessible via `localhost`, it won't work if the server SSL certificate doesn't have `localhost` in the certificate CN or SAN. - -```toml -[sources.eventstoredb_metrics] -type = "eventstoredb_metrics" -endpoint = "https://node1.esdb.acme.company:2113/stats" -scrape_interval_secs = 10 - -[sources.eventstoredb_logs] -type = "file" -include = ["/var/log/eventstore"] -read_from = "end" - -[sinks.dd_metrics] -type = "datadog_metrics" -inputs = ["eventstoredb_metrics"] -api_key = "${DD_API_KEY}" -default_namespace = "service" - -[sinks.dd_logs] -type = "datadog_logs" -inputs = ["sources.eventstoredb_logs"] -default_api_key = "${DD_API_KEY}" -compression = "gzip" -``` - -## ElasticSearch - -Elastic Stack is one of the most popular tools for ingesting and analyzing logs and statistics: -- [Elasticsearch](https://www.elastic.co/guide/en/elasticsearch/reference/8.2/index.html) was built for advanced filtering and text analysis. -- [Filebeat](https://www.elastic.co/guide/en/beats/filebeat/8.2/index.html) allow tailing files efficiently. -- [Logstash](https://www.elastic.co/guide/en/logstash/current/getting-started-with-logstash.html) enables log transformations and processing pipelines. -- [Kibana](https://www.elastic.co/guide/en/kibana/8.2/index.html) is a dashboard and visualization UI for Elasticsearch data. - -EventStoreDB exposes structured information through its logs and statistics, allowing straightforward integration with mentioned tooling. - -### Logstash - -Logstash is the plugin based data processing component of the Elastic Stack which sends incoming data to Elasticsearch. It's excellent for building a text-based processing pipeline. It can also gather logs from files (although Elastic recommends now Filebeat for that, see more in the following paragraphs). Logstash needs to either be installed on the EventStoreDB node or have access to logs storage. The processing pipeline can be configured through the configuration file (e.g. `logstash.conf`). This file contains the three essential building blocks: -- input - source of logs, e.g. log files, system output, Filebeat. -- filter - processing pipeline, e.g. to modify, enrich, tag log data, -- output - place where we'd like to put transformed logs. Typically that contains Elasticsearch configuration. - -See the sample Logstash 8.2 configuration file. It shows how to take the EventStoreDB log files, split them based on the log type (regular and stats) and output them to separate indices to Elasticsearch: - -```ruby -####################################################### -# EventStoreDB logs file input -####################################################### -input { - file { - path => "/var/log/eventstore/*/log*.json" - start_position => "beginning" - codec => json - } -} - -####################################################### -# Filter out stats from regular logs -# add respecting field with log type -####################################################### -filter { - # check if log path includes "log-stats" - # so pattern for stats - if [log][file][path] =~ "log-stats" { - mutate { - add_field => { - "log_type" => "stats" - } - } - } - else { - mutate { - add_field => { - "log_type" => "logs" - } - } - } -} - -####################################################### -# Send logs to Elastic -# Create separate indexes for stats and regular logs -# using field defined in the filter transformation -####################################################### -output { - elasticsearch { - hosts => [ "elasticsearch:9200" ] - index => 'eventstoredb-%{[log_type]}' - } -} -``` - -You can play with such configuration through the [sample docker-compose](https://github.com/EventStore/samples/blob/2829b0a90a6488e1eee73fad0be33a3ded7d13d2/Logging/Elastic/Logstash/docker-compose.yml). - -### Filebeat - -Logstash was an initial attempt by Elastic to provide a log harvester tool. However, it appeared to have performance limitations. Elastic came up with the [Beats family](https://www.elastic.co/beats/), which allows gathering data from various specialized sources (files, metrics, network data, etc.). Elastic recommends Filebeat as the log collection and shipment tool off the host servers. Filebeat uses a backpressure-sensitive protocol when sending data to Logstash or Elasticsearch to account for higher volumes of data. - -Filebeat can pipe logs directly to Elasticsearch and set up a Kibana data view. - -Filebeat needs to either be installed on the EventStoreDB node or have access to logs storage. The processing pipeline can be configured through the configuration file (e.g. `filebeat.yml`). This file contains the three essential building blocks: -- input - configuration for file source, e.g. if stored in JSON format. -- output - place where we'd like to put transformed logs, e.g. Elasticsearch, Logstash, -- setup - additional setup and simple transformations (e.g. Elasticsearch indices template, Kibana data view). - -See the sample Filebeat 8.2 configuration file. It shows how to take the EventStoreDB log files, output them to Elasticsearch prefixing index with `eventstoredb` and create a Kibana data view: - -```yml -####################################################### -# EventStoreDB logs file input -####################################################### -filebeat.inputs: - - type: log - paths: - - /var/log/eventstore/*/log*.json - json.keys_under_root: true - json.add_error_key: true - -####################################################### -# ElasticSearch direct output -####################################################### -output.elasticsearch: - index: "eventstoredb-%{[agent.version]}" - hosts: ["elasticsearch:9200"] - -####################################################### -# ElasticSearch dashboard configuration -# (index pattern and data view) -####################################################### -setup.dashboards: - enabled: true - index: "eventstoredb-*" - -setup.template: - name: "eventstoredb" - pattern: "eventstoredb-%{[agent.version]}" - -####################################################### -# Kibana dashboard configuration -####################################################### -setup.kibana: - host: "kibana:5601" - -``` - -You can play with such configuration through the [sample docker-compose](https://github.com/EventStore/samples/blob/2829b0a90a6488e1eee73fad0be33a3ded7d13d2/Logging/Elastic/Filebeat/docker-compose.yml). - -### Filebeat with Logstash - -Even though Filebeat can pipe logs directly to Elasticsearch and do a basic Kibana setup, you'd like to have more control and expand the processing pipeline. That's why for production, it's recommended to use both. Multiple Filebeat instances (e.g. from different EventStoreDB clusters) can collect logs and pipe them to Logstash, which will play an aggregator role. Filebeat can output logs to Logstash, and Logstash can receive and process these logs with the Beats input. Logstash can transform and route logs to Elasticsearch instance(s). - -In that configuration, Filebeat should be installed on the EventStoreDB node (or have access to file logs) and define Logstash as output. See the sample Filebeat 8.2 configuration file. - -```yml -####################################################### -# EventStoreDB logs file input -####################################################### -filebeat.inputs: - - type: log - paths: - - /var/log/eventstore/*/log*.json - json.keys_under_root: true - json.add_error_key: true - -####################################################### -# Logstash output to transform and prepare logs -####################################################### -output.logstash: - hosts: ["logstash:5044"] -``` - -Then the sample Logstash 8.2 configuration file will look like the below. It shows how to take the EventStoreDB logs from Filebeat, split them based on the log type (regular and stats) and output them to separate indices to Elasticsearch: - -```ruby -####################################################### -# Filebeat input -####################################################### -input { - beats { - port => 5044 - } -} - -####################################################### -# Filter out stats from regular logs -# add respecting field with log type -####################################################### -filter { - # check if log path includes "log-stats" - # so pattern for stats - if [log][file][path] =~ "log-stats" { - mutate { - add_field => { - "log_type" => "stats" - } - } - } - else { - mutate { - add_field => { - "log_type" => "logs" - } - } - } -} - -####################################################### -# Send logs to Elastic -# Create separate indexes for stats and regular logs -# using field defined in the filter transformation -####################################################### -output { - elasticsearch { - hosts => [ "elasticsearch:9200" ] - index => 'eventstoredb-%{[log_type]}' - } -} -``` - -You can play with such configuration through the [sample docker-compose](https://github.com/EventStore/samples/blob/2829b0a90a6488e1eee73fad0be33a3ded7d13d2/Logging/Elastic/FilebeatWithLogstash/docker-compose.yml). - -[Vector]: https://vector.dev/docs/ -[EventStoreDB source]: https://vector.dev/docs/reference/configuration/sources/eventstoredb_metrics/ -[file source]: https://vector.dev/docs/reference/configuration/sources/file/ -[many different sinks]: https://vector.dev/docs/reference/configuration/sinks/ -[Console]: https://vector.dev/docs/reference/configuration/sinks/console/ diff --git a/docs/diagnostics/logs.md b/docs/diagnostics/logs.md deleted file mode 100644 index a063b857dd1..00000000000 --- a/docs/diagnostics/logs.md +++ /dev/null @@ -1,291 +0,0 @@ ---- -title: "Logs" ---- - -# Database logs - -EventStoreDB logs its internal operations to the console (stdout) and to log files. The default location of -the log files and the way to change it is described [below](#logs-location). - -There are a few options to change the way how EventStoreDB produces logs and how detailed the logs should be. - -::: warning -The EventStoreDB logs may contain sensitive information such as stream names, usernames, and projection definitions. -::: - -## Log format - -EventStoreDB uses the structured logging in JSON format that is more machine-friendly and can be ingested by -vendor-specific tools like Logstash or Datadog agent. - -Here is how the structured log looks like: - -```json -{ - "PID": "6940", - "ThreadID": "23", - "Date": "2020-06-16T16:14:02.052976Z", - "Level": "Debug", - "Logger": "ProjectionManager", - "Message": "PROJECTIONS: Starting Projections Manager. (Node State : {state})", - "EventProperties": { - "state": "Master" - } -} -{ - "PID": "6940", - "ThreadID": "15", - "Date": "2020-06-16T16:14:02.052976Z", - "Level": "Info", - "Logger": "ClusterVNodeController", - "Message": "========== [{internalHttp}] Sub System '{subSystemName}' initialized.", - "EventProperties": { - "internalHttp": "127.0.0.1:2112", - "subSystemName": "Projections" - } -} -{ - "PID": "6940", - "ThreadID": "23", - "Date": "2020-06-16T16:14:02.052976Z", - "Level": "Debug", - "Logger": "MultiStreamMessageWriter", - "Message": "PROJECTIONS: Resetting Worker Writer", - "EventProperties": {} -} -{ - "PID": "6940", - "ThreadID": "23", - "Date": "2020-06-16T16:14:02.055000Z", - "Level": "Debug", - "Logger": "ProjectionCoreCoordinator", - "Message": "PROJECTIONS: SubComponent Started: {subComponent}", - "EventProperties": { - "subComponent": "EventReaderCoreService" - } -} -``` - -This format is aligned with [Serilog Compact JSON format](https://github.com/serilog/serilog-formatting-compact). - -## Logs location - -Log files are located in `/var/log/eventstore` for Linux and macOS, and in the `logs` subdirectory of the -EventStoreDB installation directory on Windows. You can change the log files location using the `Log` -configuration option. - -::: tip -Moving logs to a separate storage might improve the database performance if you keep the default -verbose log level. -::: - -| Format | Syntax | -|:---------------------|:-----------------| -| Command line | `--log` | -| YAML | `Log` | -| Environment variable | `EVENTSTORE_LOG` | - -For example, adding this line to the `eventstore.conf` file will force writing logs to -the `/tmp/eventstore/logs` directory: - -```text:no-line-numbers -Log: /tmp/eventstore/logs -``` - -## Log level - -You can change the level using the `LogLevel` setting: - -| Format | Syntax | -|:---------------------|:-----------------------| -| Command line | `--log-level` | -| YAML | `LogLevel` | -| Environment variable | `EVENTSTORE_LOG_LEVEL` | - -Acceptable values are: `Default`, `Verbose`, `Debug`, `Information`, `Warning`, `Error`, and `Fatal`. - -## Logging options - -You can tune the EventStoreDB logging further by using the logging options described below. - -### Log configuration file - -Specifies the location of the file which configures the logging levels of various components. - -| Format | Syntax | -|:---------------------|:------------------------| -| Command line | `--log-config` | -| YAML | `LogConfig` | -| Environment variable | `EVENTSTORE_LOG_CONFIG` | - -By default, the application directory (and `/etc/eventstore` on Linux and Mac) are checked. You may specify a -full path. - -### HTTP requests logging - -EventStoreDB can also log all the incoming HTTP requests, like many HTTP servers do. Requests are logged -before being processed, so unsuccessful requests are logged too. - -Use one of the following ways to enable the HTTP requests logging: - -| Format | Syntax | -|:---------------------|:-------------------------------| -| Command line | `--log-http-requests` | -| YAML | `LogHttpRequests` | -| Environment variable | `EVENTSTORE_LOG_HTTP_REQUESTS` | - -**Default**: `false`, logging HTTP requests is disabled by default. - -### Log failed authentication - -For security monitoring, you can enable logging failed authentication attempts by -setting `LogFailedAuthenticationAttempts` setting to true. - -| Format | Syntax | -|:---------------------|:------------------------------------------------| -| Command line | `--log-failed-authentication-attempts` | -| YAML | `LogFailedAuthenticationAttempts` | -| Environment variable | `EVENTSTORE_LOG_FAILED_AUTHENTICATION_ATTEMPTS` | - -**Default**: `false` - -### Log console format - -The format of the console logger. Use `Json` for structured log output. - -| Format | Syntax | -|:---------------------|:--------------------------------| -| Command line | `--log-console-format` | -| YAML | `LogConsoleFormat` | -| Environment variable | `EVENTSTORE_LOG_CONSOLE_FORMAT` | - -Acceptable values are: `Plain`, `Json` - -**Default**: `Plain` - -### Log file size - -The maximum size of each log file, in bytes. - -| Format | Syntax | -|:---------------------|:---------------------------| -| Command line | `--log-file-size` | -| YAML | `LogFileSize` | -| Environment variable | `EVENTSTORE_LOG_FILE_SIZE` | - -**Default**: `1GB` - -### Log file interval - -How often to rotate logs. - -| Format | Syntax | -|:---------------------|:-------------------------------| -| Command line | `--log-file-interval` | -| YAML | `LogFileInterval` | -| Environment variable | `EVENTSTORE_LOG_FILE_INTERVAL` | - -Acceptable values are: `Minute`, `Hour`, `Day`, `Week`, `Month`, `Year` - -**Default**: `Day` - -### Log file retention count - -Defines how many log files need to be kept on disk. By default, logs for the last month are available. Tune this setting if you need to have more history in the logs, or you need to save disk space. - -| Format | Syntax | -|:---------------------|:---------------------------------| -| Command line | `--log-file-retention-count` | -| YAML | `LogFileRetentionCount` | -| Environment variable | `EVENTSTORE_LOG_RETENTION_COUNT` | - -**Default**: `31` - -#### Disable log file - -You can completely disable logging to a file by changing the `DisableLogFile` option. - -| Format | Syntax | -|:---------------------|:------------------------------| -| Command line | `--disable-log-file` | -| YAML | `DisableLogFile` | -| Environment variable | `EVENTSTORE_DISABLE_LOG_FILE` | - -**Default**: `false` - -## Logs download - -The _Logs Download Plugin_ provides HTTP access to EventStoreDB logs so that they can be viewed without requiring file system access. - -::: tip -You can use this API to download log files from your managed EventStoreDB clusters in Event Store Cloud. -::: - -On startup the server will log a message similar to: -```:no-line-numbers -LogsEndpoint: Serving logs from "" at endpoint "/admin/logs" -``` - -Access the logs via the `/admin/logs` endpoint on your server. Construct the full URL as follows: - -```:no-line-numbers -http(s)://:/admin/logs -``` - -Example: -```:no-line-numbers -https://localhost:2113/admin/logs -``` - -Only authenticated users belonging to the `$admins` or `$ops` groups can use this endpoint. - -### Listing log files - -To list the current log files, issue a `GET` to the `/admin/logs` endpoint - -Example: -```bash:no-line-numbers -curl https://user:password@localhost:2113/admin/logs | jq -``` - -Sample response: -```json -[ - { - "name": "log-stats20240205.json", - "lastModified": "2024-02-05T13:14:14.2789475+00:00", - "size": 1058614 - }, - { - "name": "log20240205.json", - "lastModified": "2024-02-05T13:14:37.0781601+00:00", - "size": 158542 - } -] -``` - -The response is ordered from most recent change first, is limited to a maximum of 1000 items, and includes: - -| Name | Description | -|--------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| name | File name of the log file | -| createdAt | Timestamp of when the log file was created in [round-trip format](https://learn.microsoft.com/en-us/dotnet/standard/base-types/standard-date-and-time-format-strings#the-round-trip-o-o-format-specifier) (local time with time zone information) | -| lastModified | Timestamp of the last modification in [round-trip format](https://learn.microsoft.com/en-us/dotnet/standard/base-types/standard-date-and-time-format-strings#the-round-trip-o-o-format-specifier) (local time with time zone information) | -| size | Size of the log file in bytes | - -### Downloading log files - -To download a specific log file, append its name to the URL: - -Example: -```bash:no-line-numbers -curl https://user:password@localhost:2113/admin/logs/log20240205.json --output log20240205.json -``` - -### Troubleshooting - -- **404 Not Found:** The plugin is only available in commercial editions. Verify the plugin is loaded by checking the server startup logs. - -- **401 Unauthorized:** Confirm the credentials are correct and the user belongs to the `$ops` or `$admins` group. - -- **Log Files Directory Not Found:** Check the `NodeIp` and `NodePort` settings are current and not using the deprecated settings `HttpIp` or `HttpPort`. diff --git a/docs/diagnostics/metrics.md b/docs/diagnostics/metrics.md deleted file mode 100644 index 3b5482730ab..00000000000 --- a/docs/diagnostics/metrics.md +++ /dev/null @@ -1,586 +0,0 @@ -# Metrics - -EventStoreDB collects metrics in [Prometheus format](https://prometheus.io/docs/instrumenting/exposition_formats/#text-based-format), available on the `/metrics` endpoint. Prometheus can be configured to scrape this endpoint directly. The metrics are configured in `metricsconfig.json`. - -In addition, EventStoreDB can actively export metrics to a specified endpoint using the [OpenTelemetry Protocol](https://opentelemetry.io/docs/specs/otel/protocol/) (OTLP). - -## Metrics reference - -### Caches - -#### Cache hits and misses - -EventStoreDB tracks cache hits/misses metrics for `stream-info` and `chunk` caches. - -| Time series | Type | Description | -|:---------------------------------------------------------------------------|:-------------------------|:----------------------------------------| -| `eventstore_cache_hits_misses{cache=,kind=<"hits"\|"misses">}` | [Counter](#common-types) | Total hits/misses on _CACHE_NAME_ cache | - -Example configuration: -```json -"CacheHitsMisses": { - "StreamInfo": true, - "Chunk": false -} -``` - -Example output: -``` -# TYPE eventstore_cache_hits_misses counter -eventstore_cache_hits_misses{cache="stream-info",kind="hits"} 104329 1688157489545 -eventstore_cache_hits_misses{cache="stream-info",kind="misses"} 117 1688157489545 -``` - -#### Dynamic cache resources - -Certain caches that EventStoreDB uses are dynamic in nature i.e. their capacity scales up/down during their lifetime. EventStoreDB records metrics for resources being used by each such dynamic cache. - -| Time series | Type | Description | -|:---------------------------------------------------------------------------------|:-----------------------|:-----------------------------------------------------| -| `eventstore_cache_resources_bytes{cache=,kind=<"capacity"\|"size">}` | [Gauge](#common-types) | Current capacity/size of _CACHE_NAME_ cache in bytes | -| `eventstore_cache_resources_entries{cache=,kind="count"}` | [Gauge](#common-types) | Current number of entries in _CACHE_NAME_ cache | - -Example configuration: -```json -"CacheResources": true -``` - -Example output: -``` -# TYPE eventstore_cache_resources_bytes gauge -# UNIT eventstore_cache_resources_bytes bytes -eventstore_cache_resources_bytes{cache="LastEventNumber",kind="capacity"} 50000000 1688157491029 -eventstore_cache_resources_bytes{cache="LastEventNumber",kind="size"} 15804 1688157491029 - -# TYPE eventstore_cache_resources_entries gauge -# UNIT eventstore_cache_resources_entries entries -eventstore_cache_resources_entries{cache="LastEventNumber",kind="count"} 75 1688157491029 -``` - -### Checkpoints - -| Time series | Type | Description | -|:--------------------------------------------------------------------|:-----------------------|:---------------------------------------| -| `eventstore_checkpoints{name=,read="non-flushed"}` | [Gauge](#common-types) | Value for _CHECKPOINT_NAME_ checkpoint | - -Example configuration: -```json -"Checkpoints": { - "Replication": true, - "Chaser": false, - "Epoch": false, - "Index": false, - "Proposal": false, - "Truncate": false, - "Writer": false, - "StreamExistenceFilter": false -} -``` - -Example output: -``` -# TYPE eventstore_checkpoints gauge -eventstore_checkpoints{name="replication",read="non-flushed"} 613363 1688054162478 -``` - -### Elections Count - -This metric tracks the number of elections that have been completed. - -| Time series | Type | Description | -|:-----------------------------|:-------------------------|:-----------------------------| -| `eventstore_elections_count` | [Counter](#common-types) | Elections count in a cluster | - -Example configuration: -```json -"ElectionsCount": true -``` - -Example output: -``` -# TYPE eventstore_elections_count counter -eventstore_elections_count 0 1710188996949 -``` - -### Events - -These metrics track events written to and read from the server, including reads from caches. - -| Time series | Type | Description | -|:-----------------------------------------------------|:-------------------------|:--------------------| -| `eventstore_io_bytes{activity="read"}` | [Counter](#common-types) | Event bytes read | -| `eventstore_io_events{activity=<"read"\|"written">}` | [Counter](#common-types) | Events read/written | - -Example configuration: -```json -"Events": { - "Read": false, - "Written": true -} -``` - -Example output: -``` -# TYPE eventstore_io_events counter -# UNIT eventstore_io_events events -eventstore_io_events{activity="written"} 320 1687963622074 -``` - -### Gossip - -Measures the round trip latency and processing time of gossip. -Usually a node pushes new gossip to other nodes periodically or when its view of the cluster changes. Sometimes nodes pull gossip from each other if there is a suspected network problem. - -#### Gossip latency - -| Time series | Type | Description | -|:--------------------------------------------------------------------------------------------------------------------|:---------------------------|:---------------------------------------------------------------------------------------------| -| `eventstore_gossip_latency_seconds_bucket{activity="pull-from-peer",status=<"successful"\|"failed">,le=}` | [Histogram](#common-types) | Number of gossips pulled from peers with latency less than or equal to _DURATION_ in seconds | -| `eventstore_gossip_latency_seconds_bucket{activity="push-to-peer",status=<"successful"\|"failed">,le=}` | [Histogram](#common-types) | Number of gossips pushed to peers with latency less than or equal to _DURATION_ in seconds | - -#### Gossip processing - -| Time Series | Type | Description | -|:---------------------------------------------------------------------------------------------------------------------------------------------------------------|:---------------------------|:-------------------------------------------------------------------------------------------------------------| -| `eventstore_gossip_processing_duration_seconds_bucket{`
`activity="push-from-peer",`
`status=<"successful"\|"failed">,`
`le=}` | [Histogram](#common-types) | Number of gossips pushed from peers that took less than or equal to _DURATION_ in seconds to process | -| `eventstore_gossip_processing_duration_seconds_bucket{`
`activity="request-from-peer",`
`status=<"successful"\|"failed">,`
`le=}` | [Histogram](#common-types) | Number of gossip requests from peers that took less than or equal to _DURATION_ in seconds to process | -| `eventstore_gossip_processing_duration_seconds_bucket{`
`activity="request-from-grpc-client",`
`status=<"successful"\|"failed">,`
`le=}` | [Histogram](#common-types) | Number of gossip requests from gRPC clients that took less than or equal to _DURATION_ in seconds to process | -| `eventstore_gossip_processing_duration_seconds_bucket{`
`activity="request-from-http-client",`
`status=<"successful"\|"failed">,`
`le=}` | [Histogram](#common-types) | Number of gossip requests from HTTP clients that took less than or equal to _DURATION_ in seconds to process | - -Example configuration: -```json -"Gossip": { - "PullFromPeer": false, - "PushToPeer": true, - "ProcessingPushFromPeer": false, - "ProcessingRequestFromPeer": false, - "ProcessingRequestFromGrpcClient": false, - "ProcessingRequestFromHttpClient": false -} -``` - -Example output: -``` -# TYPE eventstore_gossip_latency_seconds histogram -# UNIT eventstore_gossip_latency_seconds seconds -eventstore_gossip_latency_seconds_bucket{activity="push-to-peer",status="successful",le="0.005"} 8 1687972306948 -``` - -### Incoming gRPC calls - -| Time series | Type | Description | -|:------------------------------------------------------------------|:-------------------------|:-------------------------------------------------------------------------------------------| -| `eventstore_current_incoming_grpc_calls` | [Gauge](#common-types) | Inflight gRPC calls i.e. gRPC requests that have started on the server but not yet stopped | -| `eventstore_incoming_grpc_calls{kind="total"}` | [Counter](#common-types) | Total gRPC requests served | -| `eventstore_incoming_grpc_calls{kind="failed"}` | [Counter](#common-types) | Total gRPC requests failed | -| `eventstore_incoming_grpc_calls{`
`kind="unimplemented"}` | [Counter](#common-types) | Total gRPC requests made to unimplemented methods | -| `eventstore_incoming_grpc_calls{`
`kind="deadline-exceeded"}` | [Counter](#common-types) | Total gRPC requests for which deadline have exceeded | - -Example configuration: -```json -"IncomingGrpcCalls": { - "Current": true, - "Total": false, - "Failed": true, - "Unimplemented": false, - "DeadlineExceeded": false -} -``` - -Example output: -``` -# TYPE eventstore_current_incoming_grpc_calls gauge -eventstore_current_incoming_grpc_calls 1 1687963622074 - -# TYPE eventstore_incoming_grpc_calls counter -eventstore_incoming_grpc_calls{kind="failed"} 1 1687962877623 -``` - -#### Client protocol gRPC methods - -In addition, EventStoreDB also records metrics for each of client protocol gRPC methods: `StreamRead`, `StreamAppend`, `StreamBatchAppend`, `StreamDelete` and `StreamTombstone`. They are grouped together according to the mapping defined in the configuration. - -| Time series | Type | Description | -|:---------------------------------------------------------------------------------------------------------------|:---------------------------|:-------------------------------------------------------------------------------------------------| -| `eventstore_grpc_method_duration_seconds_bucket{`
`activity=