diff --git a/.cargo/config.toml b/.cargo/config.toml index 5cafed729..0c2b119cb 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,13 +1,15 @@ [env] -COMPILE_ENV = { value = "compile-env", relative = true, force = false } -PATH = { value = "compile-env/bin", relative = true, force = true } -LIBCLANG_PATH = { value = "compile-env/lib", relative = true, force = true } -PKG_CONFIG_PATH = { value = "compile-env/sysroot/x86_64-unknown-linux-gnu/release/lib/pkgconfig", relative = true, force = true } +DATAPLANE_SYSROOT = { value = "sysroot", relative = true, force = false } +C_INCLUDE_PATH = { value = "sysroot/include", relative = true, force = false } +LIBRARY_PATH = { value = "sysroot/lib", relative = true, force = false } +GW_CRD_PATH = { value = "devroot/src/fabric/config/crd/bases", relative = true, force = false } +PKG_CONFIG_PATH = { value = "sysroot/lib/pkgconfig", relative = true, force = false } +LIBCLANG_PATH = { value = "devroot/lib", relative = true, force = false } +CLANG_PATH = { value = "devroot/bin/clang", relative = true, force = false } +LLVM_COV = { value = "devroot/bin/llvm-cov", relative = true, force = false } +LLVM_PROFDATA = { value = "devroot/bin/llvm-profdata", relative = true, force = false } +CARGO_LLVM_COV_TARGET_DIR = { value = "target/llvm-cov/build", relative = true, force = false } +CARGO_LLVM_COV_BUILD_DIR = { value = "target/llvm-cov/target", relative = true, force = false } [build] -target = "x86_64-unknown-linux-gnu" -rustc = "compile-env/bin/rustc" -rustflags = ["--cfg", "tokio_unstable"] - -[target.x86_64-unknown-linux-gnu] -runner = ["scripts/test-runner.sh"] +rustflags = ["--cfg=tokio_unstable"] diff --git a/.envrc b/.envrc index 41bfc8c5d..fc2c94cbd 100644 --- a/.envrc +++ b/.envrc @@ -1,46 +1,2 @@ -export PROJECT_DIR="$(pwd)" - -if [ -h "${PROJECT_DIR}/compile-env" ] || [ -d "${PROJECT_DIR}/compile-env" ]; then - export PATH="${PROJECT_DIR}/compile-env/bin:$PATH" - export LIBCLANG_PATH="${PROJECT_DIR}/compile-env/bin" - export COMPILE_ENV="${PROJECT_DIR}/compile-env" -else - >&2 echo "no compile environment found" - exit 0 -fi - -export NEXTEST_EXPERIMENTAL_LIBTEST_JSON=1 - -CRT="-C target-feature=-crt-static" -DEBUG="-C debuginfo=full -C split-debuginfo=off -C dwarf-version=5" -LINKER="-C linker=${COMPILE_ENV}/bin/clang -C link-arg=--ld-path=${COMPILE_ENV}/bin/ld.lld" -RELRO="-C relro-level=full" -TARGET_CPU="-C target-cpu=x86-64-v3" - -RUSTFLAGS="${CRT} ${DEBUG} ${LINKER} ${RELRO} ${TARGET_CPU}" - -OPTIMIZE="-C opt-level=3 -C linker-plugin-lto -C lto=thin -C embed-bitcode=yes -C codegen-units=1" - -case ${PROFILE:-DEBUG} in - fuzz|FUZZ) - COVERAGE="-C instrument-coverage" - DEBUG_ASSERTIONS="-C debug-assertions=on" - OVERFLOW_CHECK="-C overflow-checks=on" - RUSTFLAGS="${RUSTFLAGS} ${COVERAGE} ${DEBUG_ASSERTIONS} ${OVERFLOW_CHECK}" - ;; - release|RELEASE) - RUSTFLAGS="${RUSTFLAGS} ${OPTIMIZE}" - ;; - debug|DEBUG) - DEBUG_ASSERTIONS="-C debug-assertions=on" - OPTIMIZE="-C opt-level=0" - OVERFLOW_CHECK="-C overflow-checks=on" - RUSTFLAGS="${RUSTFLAGS} ${OPTIMIZE} ${DEBUG_ASSERTIONS} ${OVERFLOW_CHECK}" - ;; - *) - >&2 echo "unknown profile" - exit 1 - ;; -esac - -export RUSTFLAGS +export RUSTC_BOOTSTRAP=1 +export PATH="$(pwd)/devroot/bin:$PATH" diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md index 60fe315b4..1d701ba8e 100644 --- a/.github/copilot-instructions.md +++ b/.github/copilot-instructions.md @@ -59,8 +59,7 @@ This comment should 2. an array which suggests links to any other open and relevant issues or pull requests you find - in this repository - in the [dpdk-sys repository][dpdk-sys] - - in the [gateway-proto repository][gateway-proto] - - in the [gateway repository][gateway] + - in the [fabric repository][fabric] - in the [testn repository][testn] - in the [dplane-rpc repository][dplane-rpc] 3. followed by a fenced code block in markdown format describing the suggested work to be done. @@ -99,7 +98,6 @@ The `other` tag should only be used if no other tag is appropriate. [dev-guide]: ../development/README.md [dpdk-sys]: https://github.com/githedgehog/dpdk-sys -[gateway-proto]: https://github.com/githedgehog/gateway-proto -[gateway]: https://github.com/githedgehog/gateway +[fabric]: https://github.com/githedgehog/fabric [testn]: https://github.com/githedgehog/testn [dplane-rpc]: https://github.com/githedgehog/dplane-rpc diff --git a/.github/workflows/README.md b/.github/workflows/README.md index 838b23b22..afb498a7b 100644 --- a/.github/workflows/README.md +++ b/.github/workflows/README.md @@ -9,6 +9,7 @@ management, and validate changes before they are merged. - [Main Development Workflow](#main-development-workflow-devyml) - [Linting and Validation Workflows](#linting-and-validation-workflows) - [Dependency Management](#dependency-management) +- [Version Management](#version-management) - [License and Security Scanning](#license-and-security-scanning) - [Merge Control](#merge-control) @@ -18,44 +19,42 @@ management, and validate changes before they are merged. ### Purpose -Primary CI workflow that ensures developer experience is good by building and -testing the codebase in a vanilla Ubuntu environment using standard tooling. +Primary CI workflow that builds and tests the codebase using the nix-based +build system. All build steps run inside `nix-shell` to ensure a reproducible +toolchain matching what developers use locally. -The workflow runs several jobs. Some of them only run if users opt in, such as -the VLAB/HLAB tests. See the lists of dispatch options and Pull Requests labels -below for details. +Production artifacts are produced via nix builds in a separate CI workflow. ### Triggers - Pull Requests - Pushes to `main` branch +- Tag pushes (`v*`) - Merge group checks -- Manual dispatch (workflow\_dispatch) +- Manual dispatch (workflow_dispatch) ### Main steps 1. Check code changes to determine which tests are required -2. Build and test across multiple profiles and environments: - - Profiles: `debug`, `release`, `fuzz` - - Build modes: sterile (clean environment) and developer (local-like - environment) -3. Run cargo deny checks for license and security issues -4. Push container images (for sterile release/debug builds) -5. Execute tests: - - Regular tests using `cargo nextest` - - Shuttle tests (concurrent execution testing) - - Fuzz tests with coverage -6. Run `cargo clippy` for linting -7. Generate documentation with `rustdoc` -8. Upload test results and coverage to Codecov -9. Publish test reports with flaky test detection -10. Run VLAB/HLAB integration tests (virtual/hybrid lab environments) +2. Build and test across a matrix of nix targets and profiles: + - Nix targets: `tests.all`, `frr.dataplane`, `dataplane` + - Profiles: `debug`, `release` +3. Run `cargo deny` checks for license and security issues +4. Execute tests: + - Regular tests using `cargo nextest` (via `just test`) + - Shuttle tests (concurrent execution testing with `features=shuttle`) +5. Run `cargo clippy` for linting (via `just lint`) +6. Build documentation with `rustdoc` (via `just docs`) +7. Run doctests (via `just doctest`) +8. Push container images to GHCR (for non-test targets) +9. Run VLAB/HLAB integration tests (virtual/hybrid lab environments) +10. Publish release artifacts and bump fabricator on tag pushes ### Manual dispatch options - `debug_enabled` - Enable tmate session for debugging on failure - `debug_justfile` - Show debug statements from just recipes -- `run_vlab_tests` - Run VLAB (virtual lab) tests +- `skip_vlab_tests` - Skip VLAB (virtual lab) tests - `run_hlab_tests` - Run HLAB (hybrid lab) tests - `enable_release_tests` - Enable release tests for VLAB/HLAB @@ -64,19 +63,20 @@ below for details. - `ci:+vlab` - Run VLAB tests on this PR - `ci:+hlab` - Run HLAB tests on this PR - `ci:+release` - Enable release tests for VLAB/HLAB on this PR +- `ci:-upgrade` - Disable upgrade tests on this PR ### Job matrix -- Profiles: debug, release, fuzz -- Build modes: sterile and developer environments -- VLAB configurations: spine-leaf fabric mode, with/without gateway, - L2VNI/L3VNI VPC modes +- Nix targets: `tests.all` (runs tests, lints, docs), `frr.dataplane` + and `dataplane` (build and push containers) +- Profiles: `debug`, `release` +- VLAB configurations: spine-leaf fabric mode, L2VNI/L3VNI VPC modes, + with gateway enabled ### Artifacts -- Test results (JUnit XML) -- Coverage reports (Codecov JSON) -- Container images pushed to GitHub Container Registry +- Container images pushed to GitHub Container Registry (GHCR) +- Release containers published on tag pushes via `just push` --- @@ -84,7 +84,8 @@ below for details. ### Rust Code Formatting (`lint-cargo-fmt.yml`) -Ensure Rust code is consistently formatted using `rustfmt`. +Ensure Rust code is consistently formatted using `rustfmt`. Runs inside +`nix-shell` to use the same toolchain version that developers use locally. ### License Headers Check (`lint-license-headers.yml`) @@ -118,11 +119,12 @@ associated workflow file. Automatically check for and update Cargo dependencies, creating a Pull Request with the changes. Each package is upgraded in a separate commit to ease review. +Runs inside `nix-shell` for access to the nix-managed toolchain. #### Triggers - Weekly schedule: Mondays at 3:18 AM UTC -- Manual dispatch (workflow\_dispatch) +- Manual dispatch (workflow_dispatch) #### Manual dispatch options @@ -130,14 +132,34 @@ with the changes. Each package is upgraded in a separate commit to ease review. #### Main steps -1. Install required tools (`just`, `cargo-edit`, `cargo-deny`) -2. Set up build environment -3. Run `cargo deny check` (pre-upgrade, continue on error) -4. Run `cargo update` to update within version constraints -5. Run `cargo upgrade` to find and apply upgrades (including incompatible versions) -6. Create individual commits for each package upgrade -7. Run `cargo deny check` again (post-upgrade, must pass) -8. Create a Pull Request with all upgrade commits +1. Set up nix environment with cachix binary cache +2. Run `cargo deny check` (pre-upgrade, continue on error) +3. Run `cargo update` to update within version constraints +4. Run `cargo upgrade` to find and apply upgrades (including incompatible + versions) +5. Create individual commits for each package upgrade +6. Run `cargo deny check` again (post-upgrade, must pass) +7. Create a Pull Request with all upgrade commits + +--- + +## Version Management + +### Version Bump (`version-bump.yml`) + +#### Purpose + +Bump the dataplane version in `Cargo.toml` and create a Pull Request with the +change. Runs inside `nix-shell` for access to the nix-managed toolchain. + +#### Triggers + +- Manual dispatch only (workflow_dispatch) + +#### Manual dispatch options + +- `new_version` - Explicit version string (e.g. `0.15.0`). If not provided, + the minor version is bumped automatically. --- @@ -156,7 +178,7 @@ Reports are available on the [FOSSA Dashboard]. ### Mergeability Check (`mergeability.yml`) -Block Pull Request merges based if the `dont-merge` label is set. +Block Pull Request merges if the `dont-merge` label is set. Runs and checks for the presence of the label on various Pull Request events: `synchronize`, `opened`, `reopened`, `labeled`, `unlabeled`. diff --git a/.github/workflows/bump.yml b/.github/workflows/bump.yml index 5eb2e45ce..5be25e143 100644 --- a/.github/workflows/bump.yml +++ b/.github/workflows/bump.yml @@ -29,11 +29,9 @@ permissions: jobs: cargo-upgrades: runs-on: "lab" + env: + USER: "runner" steps: - - name: "login to image cache" - run: | - echo "$REGISTRY_PASSWORD" | docker login -u "$REGISTRY_USERNAME" --password-stdin "$REGISTRY_URL" - # Use a GitHub App token so that the generated PR can trigger CI - name: "Generate GitHub App token" id: "app-token" @@ -41,28 +39,25 @@ jobs: with: app-id: "${{ secrets.DP_APP_ID }}" private-key: "${{ secrets.DP_PRIVATE_KEY }}" - - name: "install rust" - uses: "dtolnay/rust-toolchain@stable" - - name: "install ansi2txt" - run: | - # this keeps our GH actions logs from getting messed up with color codes - echo 'deb [trusted=yes] https://apt.gabe565.com /' | sudo tee /etc/apt/sources.list.d/gabe565.list - sudo apt-get update - sudo apt-get install --yes --no-install-recommends ansi2txt - - name: "install binstall" - uses: "cargo-bins/cargo-binstall@main" - - name: "install upgrade tools" - run: | - cargo binstall -y cargo-edit # required to make `cargo upgrade` edit the Cargo.toml file - cargo binstall -y just - cargo binstall -y cargo-deny + - name: "Checkout" uses: "actions/checkout@v6" - - name: "refresh compile-env" - run: | - just --yes dpdp_sys_registry="$REGISTRY_URL" refresh-compile-env - just --yes fake-nix - - name: "deny check (pre)" + + - name: "Install nix" + uses: "cachix/install-nix-action@v31" + with: + github_access_token: "${{ secrets.GITHUB_TOKEN }}" + nix_path: "nixpkgs=channel:nixpkgs-unstable" + + - uses: "cachix/cachix-action@v14" + with: + name: "hedgehog" + # prettier-ignore + signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}' + # prettier-ignore + authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}' + + - name: "check-dependencies (pre)" # Confirm that upstream licenses have not changed in some way that prevents us from using them. # We want to do this both before and after we run cargo upgrade to make it easier to decide if # the problem existed before the upgrade ran, or if the license issue was introduced by the @@ -72,8 +67,9 @@ jobs: # We run our "pre" check with `continue-on-error` set to true because it is equally possible that the upgrade # _resolves_ the license / security issue we have had / would have had without the upgrade. run: | - just cargo deny check + nix-shell --run "just check-dependencies" continue-on-error: true + - name: "cargo upgrade" id: upgrade run: | @@ -83,7 +79,7 @@ jobs: # Run "cargo update" echo "::notice::Running cargo update" - just cargo update + nix-shell --run "cargo update" if ! git diff --quiet; then echo "Found changes after cargo update, creating commit" git add Cargo.lock @@ -92,8 +88,8 @@ jobs: # Check updates available with "cargo upgrade", # then bump each package individually through separate commits - echo "::notice::Looking for depencies to upgrade" - just cargo upgrade --incompatible=allow --dry-run | tee upgrade_output.txt + echo "::notice::Looking for dependencies to upgrade" + nix-shell --run "cargo upgrade --incompatible=allow --dry-run" | tee upgrade_output.txt sed '/^====/d; /^name .*old req .*new req/d; s/ .*//' upgrade_output.txt > list_packages.txt nb_upgrades=$(wc -l < list_packages.txt) @@ -104,7 +100,7 @@ jobs: while read -r package; do echo "bump(cargo)!: bump $package (cargo upgrade)" | tee commit_msg.txt echo '' | tee -a commit_msg.txt - just cargo upgrade --incompatible=allow --package "$package" | tee -a commit_msg.txt + nix-shell --run "cargo upgrade --incompatible=allow --package $package" | tee -a commit_msg.txt git add Cargo.lock Cargo.toml cli/Cargo.toml git commit -sF commit_msg.txt done < list_packages.txt @@ -137,9 +133,11 @@ jobs: } >> "${GITHUB_OUTPUT}" rm -f -- upgrade.log upgrade_output.txt list_packages.txt commit_msg.txt - - name: "deny check (post)" + + - name: "check-dependencies (post)" run: | - just cargo deny check + nix-shell --run "just check-dependencies" + - name: "Create Pull Request" uses: "peter-evans/create-pull-request@v8" with: diff --git a/.github/workflows/dev.yml b/.github/workflows/dev.yml index d13fba1bf..06e3c454a 100644 --- a/.github/workflows/dev.yml +++ b/.github/workflows/dev.yml @@ -5,7 +5,7 @@ # The artifacts produced by these builds are not intended to be used for anything other than # ensuring that the developer experience is good. -# Production artifacts are produced in a sterile environment (in another CI workflow). +# Production artifacts are produced via nix builds (in another CI workflow). name: "dev.yml" @@ -104,38 +104,45 @@ jobs: echo "version=v0-${commit_sha::9}" >> "$GITHUB_OUTPUT" echo "ref=${commit_sha}" >> "$GITHUB_OUTPUT" - check: + build: + if: >- + ${{ + needs.check_changes.outputs.devfiles == 'true' + || startsWith(github.event.ref, 'refs/tags/v') + || github.event_name == 'workflow_dispatch' + }} + name: "${{matrix.nix-target}}/${{matrix.build.name}}" + continue-on-error: ${{ matrix.build.optional || false }} + runs-on: lab needs: - check_changes - version - if: "${{ needs.check_changes.outputs.devfiles == 'true' || (startsWith(github.event.ref, 'refs/tags/v') || startsWith(github.ref, 'refs/tags/v')) && (github.event_name == 'push' || github.event_name == 'workflow_dispatch') }}" permissions: checks: "write" pull-requests: "write" contents: "read" packages: "write" id-token: "write" + env: + USER: "runner" strategy: fail-fast: false matrix: - profile: - - name: "debug" - sterile: "" + nix-target: + - tests.all + - frr.dataplane + - dataplane + build: - name: "debug" - sterile: "sterile" + profile: "debug" + sanitize: "" + instrument: "none" - name: "release" - sterile: "sterile" - - name: "fuzz" - sterile: "sterile" - #- name: "release" - # sterile: "" - #- name: "fuzz" - # sterile: "" + profile: "release" + sanitize: "" # TODO: enable cfi and safe-stack when possible + instrument: "none" debug_justfile: - - "${{ inputs.debug_justfile || false }}" - name: "${{matrix.profile.name}} ${{matrix.profile.sterile}}" - runs-on: "lab" - timeout-minutes: 45 + - "${{ github.event_name == 'workflow_dispatch' && github.event.inputs.debug_justfile || false }}" steps: - name: "login to ghcr.io" uses: "docker/login-action@v4" @@ -161,234 +168,56 @@ jobs: persist-credentials: "false" fetch-depth: "0" - - name: "install just" - run: | - # this keeps our GH actions logs from getting messed up with color codes - echo 'deb [trusted=yes] https://apt.gabe565.com /' | sudo tee /etc/apt/sources.list.d/gabe565.list - sudo apt-get update - sudo apt-get install --yes --no-install-recommends just - - - name: "set up build environment" - run: | - REQUIRED_HUGEPAGES=512 - HUGEPAGES_PATH=/sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages - OVERCOMMIT_HUGEPAGES_PATH=/sys/kernel/mm/hugepages/hugepages-2048kB/nr_overcommit_hugepages - docker run --privileged --rm busybox:latest sh -c "echo $((6 * REQUIRED_HUGEPAGES)) > $OVERCOMMIT_HUGEPAGES_PATH" - docker run --privileged --rm busybox:latest sh -c "echo $((2 * REQUIRED_HUGEPAGES)) > $HUGEPAGES_PATH" - docker pull "$REGISTRY_URL/githedgehog/testn/n-vm:v0.0.9" - just --yes \ - debug_justfile="${{matrix.debug_justfile}}" \ - profile=${{matrix.profile.name}} \ - dpdp_sys_registry="$REGISTRY_URL" \ - refresh-compile-env - just --yes debug_justfile="${{matrix.debug_justfile}}" fake-nix - - - name: "cargo deny check" - run: | - just \ - debug_justfile="${{matrix.debug_justfile}}" \ - profile=${{matrix.profile.name}} \ - dpdp_sys_registry="$REGISTRY_URL" \ - ${{matrix.profile.sterile}} cargo deny check - - - name: "push container" - if: ${{ matrix.profile.sterile == 'sterile' && (matrix.profile.name == 'release' || matrix.profile.name == 'debug') }} - run: | - just \ - debug_justfile="${{matrix.debug_justfile}}" \ - profile=${{matrix.profile.name}} \ - dpdp_sys_registry="$REGISTRY_URL" \ - target=x86_64-unknown-linux-gnu \ - version=${{ needs.version.outputs.version }}-${{ matrix.profile.name }} \ - oci_repo="ghcr.io" \ - push - - - name: "print container image name" - if: ${{ matrix.profile.sterile == 'sterile' && (matrix.profile.name == 'release' || matrix.profile.name == 'debug') }} - run: | - just \ - debug_justfile="${{matrix.debug_justfile}}" \ - profile=${{matrix.profile.name}} \ - target=x86_64-unknown-linux-gnu \ - version=${{ needs.version.outputs.version }}-${{ matrix.profile.name }} \ - oci_repo="ghcr.io" \ - print-container-tags - - - name: "Check for uncommitted changes" - run: | - git diff --exit-code - if [ $? -ne 0 ]; then - echo "::error::Uncommitted changes detected:" - git diff - exit 1 - fi - echo "No uncommitted changes found" - - - name: "Check for untracked files" - run: | - if [ -n "$(git ls-files --others --exclude-standard)" ]; then - echo "::error::Untracked files detected:" - git ls-files --others --exclude-standard - exit 1 - fi - echo "No untracked files found" - - - id: "test" - name: "test" - run: | - set -euo pipefail - mkdir --parent ./target/nextest - # Run tests. The resulting results.json is not a full JSON object but - # a list of JSON objects, one per line. - if [ ${{ matrix.profile.name }} = "fuzz" ]; then - echo "::notice::Running fuzz tests" - just \ - debug_justfile="${{matrix.debug_justfile}}" \ - profile=${{matrix.profile.name}} \ - target=x86_64-unknown-linux-gnu \ - dpdp_sys_registry="$REGISTRY_URL" \ - ${{matrix.profile.sterile}} coverage \ - --status-level=none \ - --final-status-level=skip \ - --message-format=libtest-json-plus > ./results.json - else - echo "::notice::Running regular tests" - just \ - debug_justfile="${{matrix.debug_justfile}}" \ - profile=${{matrix.profile.name}} \ - target=x86_64-unknown-linux-gnu \ - dpdp_sys_registry="$REGISTRY_URL" \ - ${{matrix.profile.sterile}} cargo nextest run \ - --cargo-profile=${{matrix.profile.name}} \ - --status-level=none \ - --final-status-level=skip \ - --message-format=libtest-json-plus \ - > ./results.json - echo "::notice::Running Shuttle tests" - # We need to rebuild using the shuttle feature. To avoid running - # all tests a second time, we filter to run only tests with pattern - # "shuttle" in their name (test function name, file name, or module - # name). - # - # IF YOUR SHUTTLE TESTS DO NOT HAVE "shuttle" IN THEIR NAME, THEY - # WILL NOT RUN. - just \ - debug_justfile="${{matrix.debug_justfile}}" \ - profile=${{matrix.profile.name}} \ - target=x86_64-unknown-linux-gnu \ - dpdp_sys_registry="$REGISTRY_URL" \ - ${{matrix.profile.sterile}} cargo nextest run \ - --cargo-profile=${{matrix.profile.name}} \ - --status-level=none \ - --final-status-level=none \ - --message-format=libtest-json-plus \ - --features shuttle \ - shuttle \ - >> ./results.json - fi - # look for any flakes (flakes have a #\\d+ match in their name field) - jq \ - --raw-output \ - --slurp '.[] | select(.type == "test" and (.name | test(".*#\\d+"))) | ( .name | split("#") ) | - [.[0], (.[1] | tonumber)] | @csv - ' ./results.json > ./target/nextest/flakes.csv - if [ -s ./target/nextest/flakes.csv ]; then - { - echo "FLAKY_TESTS<> "${GITHUB_ENV}" - fi - rm results.json - - - name: "upload test results to codecov" - if: ${{ always() }} - uses: "codecov/codecov-action@v5" + - name: "Install nix" + uses: cachix/install-nix-action@v31 with: - fail_ci_if_error: true - files: ./target/nextest/default/junit.xml - report_type: "test_results" - disable_search: "true" - use_oidc: "true" - verbose: true - flags: "${{matrix.profile.name}}-${{ matrix.profile.sterile || 'developer' }}" - - - name: "upload codecov analysis" - if: ${{ matrix.profile.name == 'fuzz' }} - uses: "codecov/codecov-action@v5" - with: - fail_ci_if_error: true - files: ./target/nextest/coverage/codecov.json - report_type: "coverage" - disable_search: "true" - use_oidc: "true" - verbose: true - flags: "${{matrix.profile.name}}-${{ matrix.profile.sterile || 'developer' }}" - - - name: "clean up coverage data" - run: | - rm -f codecov codecov.SHA256SUM codecov.SHA256SUM.sig + github_access_token: ${{ secrets.GITHUB_TOKEN }} + nix_path: nixpkgs=channel:nixpkgs-unstable - - uses: "marocchino/sticky-pull-request-comment@v3" - if: ${{ always() }} - with: - header: "flakes_${{matrix.profile.name}}_${{matrix.profile.sterile}}" - ignore_empty: "true" - message: | - ${{ env.FLAKY_TESTS }} - - - name: "publish test report" - uses: "mikepenz/action-junit-report@v6" - if: "${{ always() }}" + - uses: "cachix/cachix-action@v14" with: - annotate_notice: "false" - annotate_only: "false" - check_annotations: "true" - check_retries: "false" - comment: "false" - detailed_summary: "true" - fail_on_failure: "false" - fail_on_parse_error: "true" - flaky_summary: "true" - include_empty_in_summary: "true" - include_passed: "true" - include_time_in_summary: "true" - report_paths: "target/nextest/default/*junit.xml" - require_passed_tests: "true" - require_tests: "true" - simplified_summary: "true" - truncate_stack_traces: "false" - group_reports: "true" - check_name: "test-report-${{matrix.profile.name}}-sterile:${{matrix.profile.sterile == 'sterile'}}" - skip_success_summary: "false" - job_summary: "true" - verbose_summary: "false" - - - id: "clippy" - name: "run clippy" - run: | - just debug_justfile="${{matrix.debug_justfile}}" profile=${{matrix.profile.name}} \ - ${{matrix.profile.sterile}} cargo clippy --all-targets --all-features -- -D warnings - - - id: "docs" - name: "run rustdoc" + name: "hedgehog" + # prettier-ignore + signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}' + # prettier-ignore + authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}' + + - name: "run pre-flight checks" + if: ${{ matrix.nix-target == 'tests.all' }} run: | - RUSTDOCFLAGS="-D warnings" just \ - debug_justfile="${{matrix.debug_justfile}}" \ - profile=${{matrix.profile.name}} \ - target=x86_64-unknown-linux-gnu \ - ${{matrix.profile.sterile}} cargo doc --no-deps + nix-shell --run ' + for features in "" "shuttle"; do + just \ + docker_sock=/run/docker/docker.sock \ + debug_justfile=${{matrix.debug_justfile}} \ + profile="${{matrix.build.profile}}" \ + sanitize="${{matrix.build.sanitize}}" \ + instrument="${{matrix.build.instrument}}" \ + features="${features}" \ + oci_repo="ghcr.io" \ + pre-flight + done + ' - - name: "run doctests" + - name: "push container" + if: ${{ matrix.nix-target != 'tests.all' }} run: | - just \ - debug_justfile="${{matrix.debug_justfile}}" \ - profile=${{matrix.profile.name}} \ - target=x86_64-unknown-linux-gnu \ - ${{matrix.profile.sterile}} cargo test --doc + nix-shell --run "just \ + debug_justfile=${{matrix.debug_justfile}} \ + check-dependencies" + for v in "" "version=${{ needs.version.outputs.version }}-${{ matrix.build.profile }}"; do + nix-shell --run " + just \ + docker_sock=/run/docker/docker.sock \ + debug_justfile=${{ github.event_name == 'workflow_dispatch' && github.event.inputs.debug_justfile || false }} \ + profile=${{ matrix.build.profile }} \ + sanitize=${{ matrix.build.sanitize }} \ + instrument=${{ matrix.build.instrument }} \ + oci_repo=ghcr.io \ + $v \ + push-container ${{ matrix.nix-target }} + " + done - name: "Setup tmate session for debug" if: ${{ failure() && github.event_name == 'workflow_dispatch' && inputs.debug_enabled }} @@ -400,7 +229,8 @@ jobs: vlab: if: "${{ needs.check_changes.outputs.devfiles == 'true' || (startsWith(github.event.ref, 'refs/tags/v') || startsWith(github.ref, 'refs/tags/v')) && (github.event_name == 'push' || github.event_name == 'workflow_dispatch') }}" needs: - - check + - check_changes + - build - version name: "${{ matrix.hybrid && 'h' || 'v' }}-${{ matrix.upgradefrom && 'up' || '' }}${{ matrix.upgradefrom }}${{ matrix.upgradefrom && '-' || '' }}${{ matrix.mesh && 'mesh-' || '' }}${{ matrix.gateway && 'gw-' || '' }}${{ matrix.includeonie && 'onie-' || '' }}${{ matrix.buildmode }}-${{ matrix.vpcmode }}" @@ -430,7 +260,9 @@ jobs: && matrix.hybrid }} fabricatorref: master - prebuild: "just bump dataplane ${{ needs.version.outputs.version }}-release" + prebuild: | + just bump dataplane ${{ needs.version.outputs.version }}-release + just bump frr ${{ needs.version.outputs.version }}-release fabricmode: ${{ matrix.fabricmode }} gateway: ${{ matrix.gateway }} includeonie: ${{ matrix.includeonie }} @@ -455,7 +287,6 @@ jobs: - l2vni hybrid: - false - # Upgrade tests are disabled at the moment upgradefrom: - "" include: @@ -481,21 +312,21 @@ jobs: name: "Summary" runs-on: "ubuntu-latest" needs: - - check + - build - vlab - # Run always, except when the "check" job was skipped. + # Run always, except when the "build" job was skipped. # - # When the check job is skipped, summary will be marked as skipped, and + # When the build job is skipped, summary will be marked as skipped, and # it's OK for CI (it's not a failure). - # Why don't we do the same for check jobs? Because their names depend on + # Why don't we do the same for build jobs? Because their names depend on # matrix values, and if we skip them the names won't be generated and # GitHub won't be able to find skipped jobs for required status checks. if: ${{ always() }} steps: - - name: "Flag any check matrix failures" - if: ${{ needs.check.result != 'success' && needs.check.result != 'skipped' }} + - name: "Flag any build matrix failures" + if: ${{ needs.build.result != 'success' && needs.build.result != 'skipped' }} run: | - echo '::error:: Some check job(s) failed' + echo '::error:: Some build job(s) failed' exit 1 - name: "Flag any vlab matrix failures" if: ${{ needs.vlab.result != 'success' && needs.vlab.result != 'skipped' }} @@ -507,11 +338,15 @@ jobs: runs-on: lab if: startsWith(github.event.ref, 'refs/tags/v') && github.event_name == 'push' needs: - - check + - build - vlab permissions: packages: write + contents: read + + env: + USER: "runner" steps: - name: Checkout repository @@ -532,34 +367,29 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} - - name: "login to image cache" - run: | - echo "$REGISTRY_PASSWORD" | docker login -u "$REGISTRY_USERNAME" --password-stdin "$REGISTRY_URL" + - name: "Install nix" + uses: cachix/install-nix-action@v31 + with: + github_access_token: ${{ secrets.GITHUB_TOKEN }} + nix_path: nixpkgs=channel:nixpkgs-unstable - - name: "set up build environment" - run: | - REQUIRED_HUGEPAGES=512 - HUGEPAGES_PATH=/sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages - OVERCOMMIT_HUGEPAGES_PATH=/sys/kernel/mm/hugepages/hugepages-2048kB/nr_overcommit_hugepages - docker run --privileged --rm busybox:latest sh -c "echo $((6 * REQUIRED_HUGEPAGES)) > $OVERCOMMIT_HUGEPAGES_PATH" - docker run --privileged --rm busybox:latest sh -c "echo $((2 * REQUIRED_HUGEPAGES)) > $HUGEPAGES_PATH" - docker pull "$REGISTRY_URL/githedgehog/testn/n-vm:v0.0.9" - just --yes \ - debug_justfile="${{matrix.debug_justfile}}" \ - profile=${{matrix.profile.name}} \ - dpdp_sys_registry="$REGISTRY_URL" \ - refresh-compile-env - just --yes debug_justfile="${{matrix.debug_justfile}}" fake-nix + - uses: "cachix/cachix-action@v14" + with: + name: "hedgehog" + # prettier-ignore + signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}' + # prettier-ignore + authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}' - - name: "push container" + - name: "push containers" run: | - just \ - debug_justfile="${{matrix.debug_justfile}}" \ - profile=release \ - dpdp_sys_registry="$REGISTRY_URL" \ - target=x86_64-unknown-linux-gnu \ - oci_repo="ghcr.io" \ - push + nix-shell --run " + just \ + docker_sock=/run/docker/docker.sock \ + oci_repo=ghcr.io \ + version=${{ github.ref_name }} \ + push + " # Bump dataplane in the fabricator repository @@ -570,10 +400,11 @@ jobs: path: fab-repo persist-credentials: false - - name: Bump dataplane in fabricator + - name: Bump dataplane+frr in fabricator working-directory: fab-repo run: | sed -i "s/^\tDataplaneVersion.*/\tDataplaneVersion=meta.Version(\"${{ github.ref_name }}\")/" pkg/fab/versions.go + sed -i "s/^\tFRRVersion.*/\tFRRVersion=meta.Version(\"${{ github.ref_name }}\")/" pkg/fab/versions.go go fmt pkg/fab/versions.go - name: Generate token for the fabricator repository @@ -593,12 +424,12 @@ jobs: path: fab-repo branch: pr/auto/dataplane-bump commit-message: | - bump: dataplane to ${{ github.ref_name }} + bump: dataplane/frr to ${{ github.ref_name }} This is an automated commit created by GitHub Actions workflow, in the dataplane repository. signoff: true - title: "bump: dataplane to ${{ github.ref_name }}" + title: "bump: dataplane/frr to ${{ github.ref_name }}" body: | This is an automated Pull Request created by GitHub Actions workflow, in the dataplane repository. diff --git a/.github/workflows/lint-cargo-fmt.yml b/.github/workflows/lint-cargo-fmt.yml index 9364389ac..80a9b0959 100644 --- a/.github/workflows/lint-cargo-fmt.yml +++ b/.github/workflows/lint-cargo-fmt.yml @@ -1,4 +1,7 @@ # Make sure Rust source code is consistently formatted with rustfmt. +# +# Uses the nix-shell environment to ensure the same rustfmt version +# that developers use locally. name: "lint-cargo-fmt.yml" @@ -14,26 +17,36 @@ concurrency: jobs: format-check: name: "Check formatting for Rust code" - runs-on: "ubuntu-latest" + runs-on: "lab" # Skip this job in merge group checks; but we need the workflow to run, # given that the status check is required for merging. if: "${{ github.event.pull_request }}" + env: + USER: "runner" steps: - - name: "Install Rust toolchain" - uses: "dtolnay/rust-toolchain@master" - with: - toolchain: "nightly" - components: "rustfmt" - - name: "Checkout" uses: "actions/checkout@v6" with: fetch-depth: "1" persist-credentials: "false" + - name: "Install nix" + uses: "cachix/install-nix-action@v31" + with: + github_access_token: "${{ secrets.GITHUB_TOKEN }}" + nix_path: "nixpkgs=channel:nixpkgs-unstable" + + - uses: "cachix/cachix-action@v14" + with: + name: "hedgehog" + # prettier-ignore + signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}' + # prettier-ignore + authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}' + - name: "Check formatting" run: | - cargo fmt --check + nix-shell --run "just fmt --check" - name: "How to fix" if: ${{ failure() }} diff --git a/.github/workflows/version-bump.yml b/.github/workflows/version-bump.yml index a8196d5be..bce668daa 100644 --- a/.github/workflows/version-bump.yml +++ b/.github/workflows/version-bump.yml @@ -25,6 +25,8 @@ jobs: bump_version: name: "Bump version in Cargo.toml and create Pull Request" runs-on: "lab" + env: + USER: "runner" steps: - name: "Validate new version input" if: ${{ inputs.new_version != '' }} @@ -37,12 +39,6 @@ jobs: exit 1 fi - - name: "Install rust" - uses: "dtolnay/rust-toolchain@stable" - - - name: "Install binstall" - uses: "cargo-bins/cargo-binstall@main" - # Use a GitHub App token so that the generated PR can trigger CI - name: "Generate GitHub App token" id: "app-token" @@ -51,43 +47,36 @@ jobs: app-id: "${{ secrets.DP_APP_ID }}" private-key: "${{ secrets.DP_PRIVATE_KEY }}" - - name: "Install whyq" - run: | - set -euxo pipefail - sudo apt-get update - sudo apt-get install --yes --no-install-recommends jq - cargo binstall --no-confirm whyq - - - name: "Install just" - run: | - # This keeps our GH actions logs from getting messed up with color codes - echo 'deb [trusted=yes] https://apt.gabe565.com /' | sudo tee /etc/apt/sources.list.d/gabe565.list - sudo apt-get update - sudo apt-get install --yes --no-install-recommends just - - name: "Checkout" uses: "actions/checkout@v6" with: persist-credentials: "false" fetch-depth: "0" - - name: "Login to image cache" - run: | - echo "${REGISTRY_PASSWORD}" | docker login -u "${REGISTRY_USERNAME}" --password-stdin "${REGISTRY_URL}" + - name: "Install nix" + uses: "cachix/install-nix-action@v31" + with: + github_access_token: "${{ secrets.GITHUB_TOKEN }}" + nix_path: "nixpkgs=channel:nixpkgs-unstable" - - name: "Install compile-env" - run: | - just --yes dpdp_sys_registry="${REGISTRY_URL}" refresh-compile-env - just --yes fake-nix + - uses: "cachix/cachix-action@v14" + with: + name: "hedgehog" + # prettier-ignore + signingKey: '${{ secrets.CACHIX_SIGNING_KEY }}' + # prettier-ignore + authToken: '${{ secrets.CACHIX_AUTH_TOKEN }}' - name: "Bump version" run: | - if [ -n "${{ inputs.new_version }}" ]; then - just bump_version "${{ inputs.new_version }}" - else - just bump_minor_version --input=toml - fi - new_version="$(yq -r --input=toml '.workspace.package.version' Cargo.toml)" + nix-shell --run " + if [ -n '${{ inputs.new_version }}' ]; then + just bump_version '${{ inputs.new_version }}' + else + just bump_minor_version --input=toml + fi + " + new_version="$(nix-shell --run "tomlq --raw-output '.workspace.package.version' Cargo.toml")" echo "new_version=${new_version}" >> "${GITHUB_ENV}" - name: "Commit changes" diff --git a/Cargo.lock b/Cargo.lock index 3cba13dad..5152edb94 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -154,9 +154,9 @@ checksum = "c3d036a3c4ab069c7b410a2ce876bd74808d2d0888a82667669f8e783a898bf1" [[package]] name = "arc-swap" -version = "1.8.2" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9f3647c145568cec02c42054e07bdf9a5a698e15b466fb2341bfc393cd24aa5" +checksum = "a07d1f37ff60921c83bdfc7407723bdefe89b44b98a9b772f225c8f9d67141a6" dependencies = [ "rustversion", ] @@ -241,9 +241,9 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "aws-lc-rs" -version = "1.16.1" +version = "1.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94bffc006df10ac2a68c83692d734a465f8ee6c5b384d8545a636f81d858f4bf" +checksum = "a054912289d18629dc78375ba2c3726a3afe3ff71b4edba9dedfca0e3446d1fc" dependencies = [ "aws-lc-sys", "zeroize", @@ -251,9 +251,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.38.0" +version = "0.39.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4321e568ed89bb5a7d291a7f37997c2c0df89809d7b6d12062c81ddb54aa782e" +checksum = "1fa7e52a4c5c547c741610a2c6f123f3881e409b714cd27e6798ef020c514f0a" dependencies = [ "cc", "cmake", @@ -401,7 +401,7 @@ dependencies = [ "bitflags 2.11.0", "cexpr", "clang-sys", - "itertools", + "itertools 0.13.0", "proc-macro2", "quote", "regex", @@ -1259,7 +1259,6 @@ dependencies = [ "bincode2", "clap", "colored", - "dataplane-dpdk-sysroot-helper", "log", "rustyline", "serde", @@ -1468,11 +1467,11 @@ name = "dataplane-k8s-intf" version = "0.14.0" dependencies = [ "bolero", + "dataplane-dpdk-sysroot-helper", "dataplane-hardware", "dataplane-lpm", "dataplane-net", "dataplane-tracectl", - "dotenvy", "futures", "k8s-openapi", "kube", @@ -1486,7 +1485,6 @@ dependencies = [ "thiserror 2.0.18", "tokio", "tracing", - "ureq", ] [[package]] @@ -1558,6 +1556,7 @@ dependencies = [ "ipnet", "linkme", "multi_index_map", + "n-vm", "netdev", "pretty_assertions", "rtnetlink", @@ -1565,6 +1564,7 @@ dependencies = [ "thiserror 2.0.18", "tokio", "tracing", + "tracing-subscriber", "tracing-test", ] @@ -1711,7 +1711,6 @@ dependencies = [ name = "dataplane-sysfs" version = "0.14.0" dependencies = [ - "dataplane-dpdk-sysroot-helper", "n-vm", "nix 0.31.2", "procfs", @@ -1951,12 +1950,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "dotenvy" -version = "0.15.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" - [[package]] name = "downcast-rs" version = "2.0.2" @@ -2993,11 +2986,20 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + [[package]] name = "itoa" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" +checksum = "8f42a60cbdf9a97f5d2305f08a87dc4e09308d1276d28c869c684d7777685682" [[package]] name = "jiff" @@ -3103,9 +3105,9 @@ dependencies = [ [[package]] name = "kube" -version = "3.0.1" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f96b537b4c4f61fc183594edbecbbefa3037e403feac0701bb24e6eff78e0034" +checksum = "acc5a6a69da2975ed9925d56b5dcfc9cc739b66f37add06785b7c9f6d1e88741" dependencies = [ "k8s-openapi", "kube-client", @@ -3116,9 +3118,9 @@ dependencies = [ [[package]] name = "kube-client" -version = "3.0.1" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af97b8b696eb737e5694f087c498ca725b172c2a5bc3a6916328d160225537ee" +checksum = "0fcaf2d1f1a91e1805d4cd82e8333c022767ae8ffd65909bbef6802733a7dd40" dependencies = [ "base64 0.22.1", "bytes", @@ -3151,9 +3153,9 @@ dependencies = [ [[package]] name = "kube-core" -version = "3.0.1" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7aeade7d2e9f165f96b3c1749ff01a8e2dc7ea954bd333bcfcecc37d5226bdd" +checksum = "f126d2db7a8b532ec1d839ece2a71e2485dc3bbca6cc3c3f929becaa810e719e" dependencies = [ "derive_more", "form_urlencoded", @@ -3170,9 +3172,9 @@ dependencies = [ [[package]] name = "kube-derive" -version = "3.0.1" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c98f59f4e68864624a0b993a1cc2424439ab7238eaede5c299e89943e2a093ff" +checksum = "d6b9b97e121fce957f9cafc6da534abc4276983ab03190b76c09361e2df849fa" dependencies = [ "darling 0.23.0", "proc-macro2", @@ -3184,9 +3186,9 @@ dependencies = [ [[package]] name = "kube-runtime" -version = "3.0.1" +version = "3.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc158473d6d86ec22692874bd5ddccf07474eab5c6bb41f226c522e945da5244" +checksum = "c072737075826ee74d3e615e80334e41e617ca3d14fb46ef7cdfda822d6f15f2" dependencies = [ "ahash", "async-broadcast", @@ -4444,7 +4446,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "27c6023962132f4b30eb4c172c91ce92d933da334c59c23cddee82358ddafb0b" dependencies = [ "anyhow", - "itertools", + "itertools 0.14.0", "proc-macro2", "quote", "syn 2.0.117", @@ -4919,7 +4921,6 @@ dependencies = [ "aws-lc-rs", "log", "once_cell", - "ring", "rustls-pki-types", "rustls-webpki", "subtle", @@ -4958,9 +4959,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.103.9" +version = "0.103.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53" +checksum = "df33b2b81ac578cabaf06b89b0631153a3f416b0a886e8a7a1707fb51abbd1ef" dependencies = [ "aws-lc-rs", "ring", @@ -6122,35 +6123,6 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" -[[package]] -name = "ureq" -version = "3.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdc97a28575b85cfedf2a7e7d3cc64b3e11bd8ac766666318003abbacc7a21fc" -dependencies = [ - "base64 0.22.1", - "flate2", - "log", - "percent-encoding", - "rustls", - "rustls-pki-types", - "ureq-proto", - "utf-8", - "webpki-roots", -] - -[[package]] -name = "ureq-proto" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d81f9efa9df032be5934a46a068815a10a042b494b6a58cb0a1a97bb5467ed6f" -dependencies = [ - "base64 0.22.1", - "http 1.4.0", - "httparse", - "log", -] - [[package]] name = "url" version = "2.5.8" @@ -6164,12 +6136,6 @@ dependencies = [ "serde_derive", ] -[[package]] -name = "utf-8" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" - [[package]] name = "utf8_iter" version = "1.0.4" @@ -6380,15 +6346,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "webpki-roots" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22cfaf3c063993ff62e73cb4311efde4db1efb31ab78a3e5c457939ad5cc0bed" -dependencies = [ - "rustls-pki-types", -] - [[package]] name = "winapi" version = "0.3.9" @@ -6784,18 +6741,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.42" +version = "0.8.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2578b716f8a7a858b7f02d5bd870c14bf4ddbbcf3a4c05414ba6503640505e3" +checksum = "efbb2a062be311f2ba113ce66f697a4dc589f85e78a4aea276200804cea0ed87" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.42" +version = "0.8.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e6cc098ea4d3bd6246687de65af3f920c430e236bee1e3bf2e441463f08a02f" +checksum = "0e8bc7269b54418e7aeeef514aa68f8690b8c0489a06b0136e5f57c4c5ccab89" dependencies = [ "proc-macro2", "quote", diff --git a/README.md b/README.md index 342ee63db..1d6acda44 100644 --- a/README.md +++ b/README.md @@ -13,25 +13,12 @@ of the Fabric. ### Prerequisites - A recent `x86_64` linux machine is required for development -- Bash (you very likely have this) -- [Docker][docker] (install through your package manager) -- Cargo / Rust (install via [`rustup`][rustup]) - - :warning: You need a recent version of rust (1.86.0 or better) to build the project. +- [Nix][nix] (the nix-shell provides the full toolchain, including Rust, Cargo, and all required libraries). + The single-user installation is recommended unless you are familiar with nix and prefer the multi-user installation; + both will work. +- [just][just] (task runner — install through your package manager or `nix-env -i just`) - ```bash - rustup update - ``` - - - :warning: You need to install (at least) the glibc target to use the default builds. - - ```bash - rustup target add x86_64-unknown-linux-gnu - ``` - -- [just][just] (install through your package manager or cargo) - -[docker]: https://www.docker.com/ -[rustup]: https://rustup.rs/ +[nix]: https://nixos.org/download/#nix-install-linux [just]: https://github.com/casey/just ### Step 0. Clone the repository @@ -41,141 +28,205 @@ git clone git@github.com:githedgehog/dataplane.git cd dataplane ``` -### Step 1. Get the sysroot +### Step 1. Enter the nix-shell -In the source directory, run +From the source directory, enter the development shell: ```bash -just refresh-compile-env +just shell ``` -You should now have a directory called `compile-env` which contains the tools needed to build `dpdk-sys` such as -`clang` and `lld` . -You should also have `./compile-env/sysroot` which contains the libraries that `dpdk-sys` needs to link against. -Only the `x86_64-unknown-linux-gnu` target is currently supported. +This provides the full development toolchain, including Rust, Cargo, Clippy, `cargo-nextest`, and all required +libraries and system dependencies. + +### Step 2. Build the project -### Step 2. Fake nix +To build the dataplane with default settings -The sysroot is currently built using nix, but you don't need nix to build the project. -The idea is to symlink `/nix` to `./compile-env/nix` so that the build scripts can find the libraries they need. -This is a compromise between requiring the developer to understand nix (which can be non-trivial) and requiring the -developer to have a bunch of libraries installed on their system. +```bash +just build +``` + +is sufficient. -> [!WARNING] -> This is a hack! -> It works fine but the plan won't work if you already have /nix. -> If you already have /nix talk to me, and we will make it work. -> It should be pretty easy (we will just need to export some stuff from `dpdk-sys`) +If you wish to build a specific package from this workspace, such as the init system or the cli ```bash -just fake-nix +just build workspace.init +just build workspace.cli ``` -> [!NOTE] -> If you move your project directory, you will need to run `just fake-nix refake` to update the symlinks. +Most just recipes are impacted by the `profile` argument which selects the cargo profile to use. +For instance, to build in release mode + +```bash +just profile=release build +``` + +You can also select a target platform via the `platform` argument. +The default is `x86-64-v3`. + +```bash +just platform=zen4 build +``` -### Step 3. Build the project +### Step 3. Run the tests -At this point you should be able to run +To run the full test suite ```bash -cargo build +just test ``` -to build default workspace members (dpdk-sysroot-helper, errno, and net), or +To run tests in release mode ```bash -just cargo build --package="$package" +just profile=release test ``` -to build workspace members which are not compiled by default (dataplane, dpdk, dpdk-sys). +You can enable a comma separated list of sanitizers via the `sanitize` argument. +You don't strictly need to use the fuzz profile with the sanitizers, but it is recommended. -These members are not enabled by default to help developers which develop on ARM machines, and which can't run (or even -compile) packages reliant on the sysroot. +```bash +just sanitize=address,leak profile=fuzz test +just sanitize=safe-stack profile=fuzz test +just sanitize=thread profile=fuzz test +``` -After running +You can also build and run the tests for a specific package from within this workspace. +For example, to run the `dataplane-net` package's tests ```bash -just cargo build --package=dataplane +just test net ``` -You should now have an ELF executable in `target/x86_64-unknown-linux-gnu/debug/dataplane`. +This covers basic testing and building of dataplane, but [there is more to testing dataplane](./testing.md). + +### Step 4. Build container images -You can build in release mode with +Note that running `just build dataplane` only builds the binary, not the container. +To build the dataplane container ```bash -just cargo build --package=dataplane --profile=release +just build-container dataplane ``` -at which point you should have an executable in `target/x86_64-unknown-linux-gnu/release/dataplane`. +Or, if you wish to build in release mode -### Step 4. Run the tests (debug mode) +```bash +just profile=release build-container dataplane +``` -To run the test suite, you can run +You can build the FRR container as well ```bash -just cargo test +just build-container frr.dataplane ``` -To run the test suite under release mode +Sanitizers work with the container builds too ```bash -just cargo test --profile=release +just sanitize=address,leak profile=fuzz build-container dataplane +just sanitize=address,leak profile=fuzz build-container frr.dataplane +just sanitize=thread profile=fuzz build-container dataplane +just sanitize=thread profile=fuzz build-container frr.dataplane ``` -> [!NOTE] -> Why the `just` in `just cargo build ...`? -> -> `just` is computing the correct `RUSTFLAGS` for us depending on the profile. -> After that it simply calls `cargo build`. -> Normally we would include those kinds of setting in `Cargo.toml` but `cargo` can not currently express all the -> `RUSTFLAGS` we are using (thus the `just` wrapper). +### Step 5. Push container images -This covers basic testing and building of dataplane, but [there is more to testing dataplane](./testing.md). +To build and push a container image to the configured OCI registry -## IDE Setup +```bash +just push-container dataplane +just push-container frr.dataplane +``` -Because this repository uses a custom sysroot with custom libraries and binaries, you need to set up your environment -accordingly. -Here are the suggested configurations for various IDEs: +By default, images are pushed to `127.0.0.1:30000`. +You can override this with the `oci_repo` argument -### VSCode Setup +```bash +just oci_repo=my-registry.example.com:5000 push-container dataplane +``` -Add the following to your `.vscode/settings.json` file: +## Common build arguments -```json -{ - "rust-analyzer.server.path": "./compile-env/bin/rust-analyzer", - "rust-analyzer.cargo.sysroot": "./compile-env", - "rust-analyzer.server.extraEnv": { - "RUSTC_BOOTSTRAP": "1", - "RUSTC": "/compile-env/bin/rustc", - "CARGO": "/compile-env/bin/cargo" - } -} +Most just recipes accept the following arguments, which can be combined freely: + +| Argument | Default | Description | +| ------------ | ----------- | ------------------------------------------------------------------------------------- | +| `profile` | `debug` | Cargo build profile (`debug`, `release`, or `fuzz`) | +| `sanitize` | (none) | Comma-separated list of sanitizers (`address`, `leak`, `thread`, `safe-stack`, `cfi`) | +| `instrument` | `none` | Instrumentation mode (`none` or `coverage`) | +| `platform` | `x86-64-v3` | Target platform (`x86-64-v3` or `zen3`, `zen4`, `zen5`, `bluefield2`, `bluefield3`) | +| `jobs` | `1` | Number of nix jobs to run in parallel | + +## Additional recipes + +### Run linters + +```bash +just lint ``` -You'll also want to run `cargo clippy` on save. -To do this, add the following to your `.vscode/settings.json` file: +### Build documentation -```json -"rust-analyzer.check.command": "clippy" +```bash +just docs +``` + +To build docs for a specific package + +```bash +just docs net +``` + +### Set up local development roots + +Create the `devroot` and `sysroot` symlinks needed for local IDE integration and development + +```bash +just setup-roots +``` + +## Updating the Gateway API version + +The fabric pin in `npins/sources.json` is frozen to prevent accidental updates. +To update it to a specific version: + +```bash +npins unfreeze fabric +npins add github githedgehog fabric --at +npins freeze fabric +``` + +After updating, exit and restart `nix-shell` for the changes to take effect. + +## IDE Setup + +The nix-shell provides the full toolchain, so IDE setup is straightforward. +Here are the suggested configurations for various IDEs: + +### VSCode Setup + +Launch VSCode from within the nix-shell so that rust-analyzer and other tools can find the correct toolchain: + +```bash +nix-shell --run "code ." ``` > [!NOTE] -> Please submit a PR if you have a way to avoid the absolute path. -> `${workspaceRoot}` and `${workspaceFolder}` won't work since rust-analyzer has a custom function that implements env -> var substitution in `extraEnv`. -> `${env:xxx}` susbstitutions only work if the variable is set in `extraEnv` itself. +> VSCode must be started from within the nix-shell, otherwise the correct rust-analyzer will not be found. -Finally, you want to format code using rust analyzer, and to format on save to make sure your code is always formatted. -To do this, add the following to your `.vscode/settings.json` file: +Add the following to your `.vscode/settings.json` file: ```json -"[rust]": { +{ + "rust-analyzer.check.command": "clippy", + "[rust]": { "editor.defaultFormatter": "rust-lang.rust-analyzer", "editor.formatOnSave": true -}, + } +} ``` ### Zed Setup @@ -193,11 +244,8 @@ Save the following to the `.zed/settings.json` file: "lsp": { "rust-analyzer": { "binary": { - "path": "/compile-env/bin/rust-analyzer", - "env": { - "RUSTC_BOOTSTRAP": "1", - "PATH": "/compile-env/bin" - } + "path": "nix-shell", + "arguments": ["--run", "rust-analyzer"] }, "initialization_options": { "check": { @@ -205,10 +253,24 @@ Save the following to the `.zed/settings.json` file: } } } + }, + "dap": { + "CodeLLDB": { + "binary": "nix-shell", + "args": ["--run", "lldb-dap"] + } + }, + "terminal": { + "shell": { + "program": "nix-shell" + } } } ``` +Zed wraps rust-analyzer and the debugger with `nix-shell --run`, so it does not need to be launched from the +nix-shell. + ## Code organization The dataplane code is organized in a set of crates. diff --git a/cli/Cargo.toml b/cli/Cargo.toml index 533308327..06df70ee1 100644 --- a/cli/Cargo.toml +++ b/cli/Cargo.toml @@ -22,6 +22,5 @@ thiserror = { workspace = true } [build-dependencies] # internal -dpdk-sysroot-helper = { workspace = true } # external diff --git a/cli/build.rs b/cli/build.rs deleted file mode 100644 index 52f5b0197..000000000 --- a/cli/build.rs +++ /dev/null @@ -1,8 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Open Network Fabric Authors - -fn main() { - let sysroot = dpdk_sysroot_helper::get_sysroot(); - println!("cargo:rustc-link-search=all={sysroot}/lib"); - println!("cargo:rustc-link-arg=--sysroot={sysroot}"); -} diff --git a/dataplane/Cargo.toml b/dataplane/Cargo.toml index fc2dff5b0..794ef3c4e 100644 --- a/dataplane/Cargo.toml +++ b/dataplane/Cargo.toml @@ -5,6 +5,10 @@ license.workspace = true publish.workspace = true version.workspace = true +[features] +default = ["dpdk"] +dpdk = ["dep:dpdk", "dep:dpdk-sysroot-helper"] + [dependencies] afpacket = { workspace = true, features = ["async-tokio"] } args = { workspace = true } @@ -14,7 +18,7 @@ axum-server = { workspace = true } concurrency = { workspace = true } config = { workspace = true } ctrlc = { workspace = true, features = ["termination"] } -dpdk = { workspace = true } +dpdk = { workspace = true, optional = true } dyn-iter = { workspace = true } flow-entry = { workspace = true } flow-filter = { workspace = true } @@ -60,6 +64,6 @@ tracing-subscriber = { workspace = true } [build-dependencies] # internal -dpdk-sysroot-helper = { workspace = true } +dpdk-sysroot-helper = { workspace = true, optional = true } # external diff --git a/dataplane/build.rs b/dataplane/build.rs index 52f5b0197..78e28dd9f 100644 --- a/dataplane/build.rs +++ b/dataplane/build.rs @@ -2,7 +2,6 @@ // Copyright Open Network Fabric Authors fn main() { - let sysroot = dpdk_sysroot_helper::get_sysroot(); - println!("cargo:rustc-link-search=all={sysroot}/lib"); - println!("cargo:rustc-link-arg=--sysroot={sysroot}"); + #[cfg(feature = "dpdk")] + dpdk_sysroot_helper::use_sysroot(); } diff --git a/dataplane/src/drivers/dpdk.rs b/dataplane/src/drivers/dpdk.rs index f5f9b2a57..0f1903220 100644 --- a/dataplane/src/drivers/dpdk.rs +++ b/dataplane/src/drivers/dpdk.rs @@ -3,6 +3,7 @@ //! DPDK dataplane driver +#![cfg(feature = "dpdk")] #![allow(unused)] use dpdk::dev::{Dev, TxOffloadConfig}; diff --git a/default.nix b/default.nix index a0c5dcd84..94f4eb516 100644 --- a/default.nix +++ b/default.nix @@ -6,6 +6,9 @@ profile ? "debug", instrumentation ? "none", sanitize ? "", + features ? "", + default-features ? "true", + tag ? "dev", }: let sources = import ./npins; @@ -21,6 +24,7 @@ let inherit lib platform libc; }; sanitizers = split-str ",+" sanitize; + cargo-features = split-str ",+" features; profile' = import ./nix/profiles.nix { inherit sanitizers instrumentation profile; inherit (platform') arch; @@ -29,6 +33,7 @@ let { "debug" = "dev"; "release" = "release"; + "fuzz" = "fuzz"; } .${profile}; overlays = import ./nix/overlays { @@ -54,11 +59,20 @@ let overlays.dataplane ]; }).pkgsCross.${platform'.info.nixarch}; + frr-pkgs = + (import sources.nixpkgs { + overlays = [ + overlays.rust + overlays.llvm + overlays.dataplane + overlays.frr + ]; + }).pkgsCross.${platform'.info.nixarch}; sysroot = pkgs.pkgsHostHost.symlinkJoin { name = "sysroot"; paths = with pkgs.pkgsHostHost; [ - pkgs.pkgsHostHost.libc.dev - pkgs.pkgsHostHost.libc.out + pkgs.pkgsHostHost.libc.dev # fully qualified: bare `libc` resolves to the "gnu" function argument, not pkgs.pkgsHostHost.libc + pkgs.pkgsHostHost.libc.out # (same as above) fancy.dpdk-wrapper.dev fancy.dpdk-wrapper.out fancy.dpdk.dev @@ -108,6 +122,7 @@ let cargo-bolero cargo-deny cargo-depgraph + cargo-edit cargo-llvm-cov cargo-nextest direnv @@ -115,26 +130,39 @@ let just kopium llvmPackages'.clang # you need the host compiler in order to link proc macros + llvmPackages'.llvm # needed for coverage npins pkg-config rust-toolchain + skopeo + yq ]); }; devenv = pkgs.mkShell { name = "dataplane-dev-shell"; packages = [ devroot ]; inputsFrom = [ sysroot ]; - shellHook = '' - export RUSTC_BOOTSTRAP=1 - ''; + env = { + RUSTC_BOOTSTRAP = "1"; + DATAPLANE_SYSROOT = "${sysroot}"; + C_INCLUDE_PATH = "${sysroot}/include"; + LIBRARY_PATH = "${sysroot}/lib"; + PKG_CONFIG_PATH = "${sysroot}/lib/pkgconfig"; + LIBCLANG_PATH = "${devroot}/lib"; + GW_CRD_PATH = "${dev-pkgs.gateway-crd}/src/fabric/config/crd/bases"; + }; }; + justfileFilter = p: _type: builtins.match ".*\.justfile$" p != null; markdownFilter = p: _type: builtins.match ".*\.md$" p != null; + jsonFilter = p: _type: builtins.match ".*\.json$" p != null; cHeaderFilter = p: _type: builtins.match ".*\.h$" p != null; outputsFilter = p: _type: (p != "target") && (p != "sysroot") && (p != "devroot") && (p != ".git"); src = pkgs.lib.cleanSourceWith { filter = p: t: - (markdownFilter p t) + (justfileFilter p t) + || (markdownFilter p t) + || (jsonFilter p t) || (cHeaderFilter p t) || ((outputsFilter p t) && (craneLib.filterCargoSources p t)); src = ./.; @@ -147,7 +175,7 @@ let }; target = pkgs.stdenv'.targetPlatform.rust.rustcTarget; is-cross-compile = dev-pkgs.stdenv.hostPlatform.rust.rustcTarget != target; - cc = if is-cross-compile then "${target}-clang" else "clang"; + cxx = if is-cross-compile then "${target}-clang++" else "clang++"; strip = if is-cross-compile then "${target}-strip" else "strip"; objcopy = if is-cross-compile then "${target}-objcopy" else "objcopy"; package-list = builtins.fromJSON ( @@ -168,17 +196,15 @@ let cargo-cmd-prefix = [ "-Zunstable-options" "-Zbuild-std=compiler_builtins,core,alloc,std,panic_unwind,panic_abort,sysroot,unwind" + "-Zbuild-std-features=backtrace,panic-unwind,mem,compiler-builtins-mem" "--target=${target}" ] + ++ (if default-features == "false" then [ "--no-default-features" ] else [ ]) ++ ( - if builtins.elem "thread" sanitizers then - [ - "-Zbuild-std-features=backtrace,panic-unwind,mem,compiler-builtins-mem" - ] + if cargo-features != [ ] then + [ "--features=${builtins.concatStringsSep "," cargo-features}" ] else - [ - "-Zbuild-std-features=backtrace,panic-unwind,mem,compiler-builtins-mem,llvm-libunwind" - ] + [ ] ); invoke = { @@ -204,9 +230,8 @@ let strictDeps = true; dontStrip = true; doRemapPathPrefix = false; # TODO: this setting may be wrong, test with debugger - doNotRemoveReferencesToRustToolchain = true; - doNotRemoveReferencesToVendorDir = true; - separateDebugInfo = true; + removeReferencesToRustToolchain = true; + removeReferencesToVendorDir = true; nativeBuildInputs = [ (dev-pkgs.kopium) @@ -221,18 +246,19 @@ let ]; env = { + VERSION = tag; CARGO_PROFILE = cargo-profile; DATAPLANE_SYSROOT = "${sysroot}"; LIBCLANG_PATH = "${pkgs.pkgsBuildHost.llvmPackages'.libclang.lib}/lib"; C_INCLUDE_PATH = "${sysroot}/include"; LIBRARY_PATH = "${sysroot}/lib"; PKG_CONFIG_PATH = "${sysroot}/lib/pkgconfig"; - GW_CRD_PATH = "${dev-pkgs.gateway-crd}/src/gateway/config/crd/bases"; + GW_CRD_PATH = "${dev-pkgs.gateway-crd}/src/fabric/config/crd/bases"; RUSTC_BOOTSTRAP = "1"; RUSTFLAGS = builtins.concatStringsSep " " ( profile'.RUSTFLAGS ++ [ - "-Clinker=${pkgs.pkgsBuildHost.llvmPackages'.clang}/bin/${cc}" + "-Clinker=${pkgs.pkgsBuildHost.llvmPackages'.clang}/bin/${cxx}" "-Clink-arg=--ld-path=${pkgs.pkgsBuildHost.llvmPackages'.lld}/bin/ld.lld" "-Clink-arg=-L${sysroot}/lib" # NOTE: this is basically a trick to make our source code available to debuggers. @@ -248,15 +274,6 @@ let # gdb/lldbserver container should allow us to actually debug binaries deployed to test machines. "--remap-path-prefix==${src}" ] - ++ ( - if ((builtins.elem "thread" sanitizers) || (builtins.elem "safe-stack" sanitizers)) then - [ - # "-Zexternal-clangrt" - # "-Clink-arg=--rtlib=compiler-rt" - ] - else - [ ] - ) ); }; } @@ -286,7 +303,7 @@ let rm -f $out/target.tar.zst ''; }); - package-builder = + workspace-builder = { pname ? null, cargoArtifacts ? null, @@ -313,16 +330,19 @@ let workspace = builtins.mapAttrs ( dir: pname: - package-builder { + workspace-builder { inherit pname; } ) package-list; test-builder = { - pname ? null, + package ? null, cargoArtifacts ? null, }: + let + pname = if package != null then package else "all"; + in pkgs.callPackage invoke { builder = craneLib.mkCargoDerivation; args = { @@ -336,19 +356,22 @@ let "--archive-file" "$out/${pname}.tar.zst" "--cargo-profile=${cargo-profile}" - "--package=${pname}" ] + ++ (if package != null then [ "--package=${pname}" ] else [ ]) ++ cargo-cmd-prefix ); }; }; - tests = builtins.mapAttrs ( - dir: pname: - test-builder { - inherit pname; - } - ) package-list; + tests = { + all = test-builder { }; + pkg = builtins.mapAttrs ( + dir: package: + test-builder { + inherit package; + } + ) package-list; + }; clippy-builder = { @@ -382,27 +405,68 @@ let } ) package-list; - dataplane-tar = pkgs.stdenv'.mkDerivation { - pname = "dataplane-tar"; + docs-builder = + { + package ? null, + }: + let + pname = if package != null then package else "all"; + in + pkgs.callPackage invoke { + builder = craneLib.mkCargoDerivation; + args = { + inherit pname; + cargoArtifacts = null; + RUSTDOCFLAGS = "-D warnings"; + buildPhaseCargoCommand = builtins.concatStringsSep " " ( + [ + "cargo" + "doc" + "--profile=${cargo-profile}" + "--no-deps" + ] + ++ (if package != null then [ "--package=${pname}" ] else [ ]) + ++ cargo-cmd-prefix + ); + }; + }; + + docs = { + all = docs-builder { }; + pkg = builtins.mapAttrs ( + dir: package: + docs-builder { + inherit package; + } + ) package-list; + }; + + dataplane.tar = pkgs.stdenv'.mkDerivation { + pname = "dataplane.tar"; inherit version; dontUnpack = true; src = null; + dontPatchShebangs = true; + dontFixup = true; + dontPatchElf = true; buildPhase = let libc = pkgs.pkgsHostHost.libc; in '' tmp="$(mktemp -d)" - mkdir -p "$tmp/"{bin,lib,var,etc,run/dataplane,run/frr/hh,run/netns} + mkdir -p "$tmp/"{bin,lib,var,etc,run/dataplane,run/frr/hh,run/netns,home,tmp} ln -s /run "$tmp/var/run" - cp --dereference "${workspace.dataplane}/bin/dataplane" "$tmp/bin" - cp --dereference "${workspace.cli}/bin/cli" "$tmp/bin" - cp --dereference "${workspace.init}/bin/dataplane-init" "$tmp/bin" - ln -s cli "$tmp/bin/sh" for f in "${pkgs.pkgsHostHost.dockerTools.fakeNss}/etc/"* ; do cp --archive "$(readlink -e "$f")" "$tmp/etc/$(basename "$f")" done cd "$tmp" + ln -s "${workspace.dataplane}/bin/dataplane" "$tmp/bin/dataplane" + ln -s "${workspace.cli}/bin/cli" "$tmp/bin/cli" + ln -s "${workspace.init}/bin/dataplane-init" "$tmp/bin/dataplane-init" + for i in "${pkgs.pkgsHostHost.busybox}/bin/"*; do + ln -s "${pkgs.pkgsHostHost.busybox}/bin/busybox" "$tmp/bin/$(basename "$i")" + done # we take some care to make the tar file reproducible here tar \ --create \ @@ -418,8 +482,8 @@ let --group=0 \ \ `# anybody editing the files shipped in the container image is up to no good, block all of that.` \ - `# More, we expressly forbid setuid / setgid anything. May as well toss in the sticky bit as well.` \ - --mode='u-sw,go=' \ + `# More, we expressly forbid setuid / setgid anything.` \ + --mode='ugo-sw' \ \ `# acls / setcap / selinux isn't going to be reliably copied into the image; skip to make more reproducible` \ --no-acls \ @@ -434,7 +498,7 @@ let `# None of this applies to musl (if we ever decide to ship with musl). That said, these filters will` \ `# just not do anything in that case. ` \ \ - `# First up, anybody even trying to access the glibc audit functionality in our container environment is ` \ + `# Anybody even trying to access the glibc audit functionality in our container environment is ` \ `# 100% up to no good.` \ `# Intercepting and messing with dynamic library loading is _absolutely_ not on our todo list, and this ` \ `# stuff has a history of causing security issues (arbitrary code execution). Just disarm this.` \ @@ -462,20 +526,169 @@ let --file "$out" \ \ . \ - ${pkgs.pkgsHostHost.libc.out} \ - ${if builtins.elem "thread" sanitizers then pkgs.pkgsHostHost.glibc.libgcc or "" else ""} \ + ${libc.out} \ + ${pkgs.pkgsHostHost.glibc.libgcc} \ + ${workspace.dataplane} \ + ${workspace.init} \ + ${workspace.cli} \ + ${pkgs.pkgsHostHost.busybox} ''; + }; + + containers.dataplane = pkgs.dockerTools.buildLayeredImage { + name = "ghcr.io/githedgehog/dataplane"; + inherit tag; + contents = pkgs.buildEnv { + name = "dataplane-env"; + pathsToLink = [ + "/bin" + "/etc" + "/var" + "/lib" + ]; + paths = [ + pkgs.pkgsHostHost.dockerTools.fakeNss + pkgs.pkgsHostHost.busybox + pkgs.pkgsHostHost.dockerTools.usrBinEnv + workspace.cli + workspace.dataplane + workspace.init + ]; + }; + config.Entrypoint = [ "/bin/dataplane" ]; + }; + containers.dataplane-debugger = pkgs.dockerTools.buildLayeredImage { + name = "ghcr.io/githedgehog/dataplane/debugger"; + inherit tag; + contents = pkgs.buildEnv { + name = "dataplane-debugger-env"; + pathsToLink = [ + "/bin" + "/etc" + "/var" + "/lib" + ]; + paths = [ + pkgs.pkgsBuildHost.gdb + pkgs.pkgsBuildHost.rr + pkgs.pkgsBuildHost.coreutils + pkgs.pkgsBuildHost.bashInteractive + pkgs.pkgsBuildHost.iproute2 + pkgs.pkgsBuildHost.ethtool + pkgs.pkgsHostHost.dockerTools.usrBinEnv + + pkgs.pkgsHostHost.libc.debug + workspace.cli.debug + workspace.dataplane.debug + workspace.init.debug + ]; + }; + }; + + containers.frr.dataplane = pkgs.dockerTools.buildLayeredImage { + name = "ghcr.io/githedgehog/dpdk-sys/frr"; + inherit tag; + contents = pkgs.buildEnv { + name = "dataplane-frr-env"; + pathsToLink = [ "/" ]; + paths = with frr-pkgs; [ + bash + coreutils + dockerTools.usrBinEnv + fancy.dplane-plugin + fancy.dplane-rpc + fancy.frr-agent + fancy.frr-config + fancy.frr.dataplane + findutils + gnugrep + iproute2 + jq + prometheus-frr-exporter + python3Minimal + tini + ]; + }; + + fakeRootCommands = '' + #!${frr-pkgs.bash}/bin/bash + set -euxo pipefail + mkdir /tmp + mkdir -p /run/frr/hh + chown -R frr:frr /run/frr + mkdir -p /var + ln -s /run /var/run + chown -R frr:frr /var/run/frr + ''; + + enableFakechroot = true; + + config.Entrypoint = [ + "/bin/tini" + "--" + ]; + config.Cmd = [ "/libexec/frr/docker-start" ]; + }; + + containers.frr.host = pkgs.dockerTools.buildLayeredImage { + name = "ghcr.io/githedgehog/dpdk-sys/frr-host"; + inherit tag; + contents = pkgs.buildEnv { + name = "dataplane-frr-host-env"; + pathsToLink = [ + "/" + ]; + paths = with frr-pkgs; [ + bash + coreutils + dockerTools.fakeNss + dockerTools.usrBinEnv + # TODO: frr-config's docker-start launches /bin/frr-agent which is not + # present in the host container. A host-specific entrypoint script may + # be needed once this container is actively deployed. + fancy.frr-config + fancy.frr.host + findutils + gnugrep + iproute2 + jq + prometheus-frr-exporter + python3Minimal + tini + ]; + }; + fakeRootCommands = '' + #!${frr-pkgs.bash}/bin/bash + set -euxo pipefail + mkdir /tmp + mkdir -p /run/frr/hh + chown -R frr:frr /run/frr + mkdir -p /var + ln -s /run /var/run + chown -R frr:frr /var/run/frr + ''; + + enableFakechroot = true; + + config.Entrypoint = [ + "/bin/tini" + "--" + ]; + config.Cmd = [ "/libexec/frr/docker-start" ]; }; in { inherit clippy - dataplane-tar + containers dev-pkgs - devroot devenv + devroot + docs + frr-pkgs + dataplane package-list pkgs sources diff --git a/development/code/running-tests.md b/development/code/running-tests.md index f0972dff7..697aa1e09 100644 --- a/development/code/running-tests.md +++ b/development/code/running-tests.md @@ -4,26 +4,26 @@ The default test runner works fine, but it is notably slower and less fully featured than [nextest]. -Fortunately, [nextest] ships with recent versions of the compile-env, so assuming you have already followed the +Fortunately, [nextest] ships with the nix-shell, so assuming you have already followed the instructions in the [README.md], you should be able to run ```shell -just cargo nextest run +cargo nextest run ``` -even if you have not installed [nextest]. +even if you have not installed [nextest] on your system. > [!WARNING] > [nextest profiles] are not the same thing as [cargo profiles]. > If you want to select a cargo profile when running [nextest], use, for example ```shell -just cargo nextest run --cargo-profile=release +cargo nextest run --cargo-profile=release ``` ## Code Coverage (llvm-cov) -The compile-env also ships with [cargo llvm-cov] for collecting +The nix-shell also ships with [cargo llvm-cov] for collecting [code coverage](https://en.wikipedia.org/wiki/Code_coverage) information. Assuming you have followed the [README.md], you should be able to run @@ -50,7 +50,7 @@ And then open a web-browser to [http://localhost:8000](http://localhost:8000) to The dataplane project makes fairly extensive use of [fuzz testing](https://en.wikipedia.org/wiki/Fuzzing). We use the [bolero] crate for our fuzz tests. -Running the test suite via `just cargo test` or `just cargo nextest run` will run the fuzz tests. +Running the test suite via `cargo test` or `cargo nextest run` will run the fuzz tests. - The tests (even the fuzz tests) are only run briefly. - Coverage information and sanitizers are not enabled. @@ -62,29 +62,11 @@ change this. The major downside is that these processes are very computationally intensive and can take a long time to run. In fact, the [afl] fuzzer runs until you terminate it. -To run a full fuzz test, start by listing the available fuzz targets: - -```shell -just list-fuzz-tests -``` - -Then pick a target, e.g. `vxlan::test::mutation_of_header_preserves_contract`, and run `libfuzzer` like so - -```shell -just _test_type=FUZZ fuzz vxlan::test::mutation_of_header_preserves_contract -``` - -The test will run for 1 minute by default, but you can change to, e.g., 15 minutes via - -```shell -just _test_type=FUZZ fuzz vxlan::test::mutation_of_header_preserves_contract -T 15min -``` - > [!NOTE] -> The fuzz tests are run with full optimizations and extensive debugging information, so expect a fairly long compile -> time. +> Dedicated `just` recipes for running full fuzz campaigns (with libfuzzer/afl) are planned for a future PR. [README.md]: ../../README.md +[afl]: https://github.com/AFLplusplus/AFLplusplus [bolero]: https://github.com/camshaft/bolero [cargo llvm-cov]: https://github.com/taiki-e/cargo-llvm-cov?tab=readme-ov-file#cargo-llvm-cov [cargo profiles]: https://doc.rust-lang.org/cargo/reference/profiles.html diff --git a/dpdk-sys/build.rs b/dpdk-sys/build.rs index 556af520a..e0c5a219c 100644 --- a/dpdk-sys/build.rs +++ b/dpdk-sys/build.rs @@ -20,7 +20,8 @@ impl ParseCallbacks for Cb { } } -fn bind(path: &Path, sysroot: &str) { +fn bind(path: &Path) { + let sysroot = dpdk_sysroot_helper::get_sysroot(); let out_path = PathBuf::from(env::var("OUT_DIR").unwrap()); let static_fn_path = out_path.join("generated.h"); bindgen::Builder::default() @@ -47,7 +48,6 @@ fn bind(path: &Path, sysroot: &str) { .default_enum_style(bindgen::EnumVariation::ModuleConsts) .blocklist_item("rte_atomic.*") .allowlist_item("rte.*") - .allowlist_item("wrte_.*") .allowlist_item("RTE.*") .blocklist_item("__*") .clang_macro_fallback() @@ -68,15 +68,9 @@ fn bind(path: &Path, sysroot: &str) { } fn main() { + dpdk_sysroot_helper::use_sysroot(); let out_path = PathBuf::from(env::var("OUT_DIR").unwrap()); - let sysroot = dpdk_sysroot_helper::get_sysroot(); - - println!("cargo:rustc-link-arg=--sysroot={sysroot}"); - println!("cargo:rustc-link-search=all={sysroot}/lib"); - // NOTE: DPDK absolutely requires whole-archive in the linking command. - // While I find this very questionable, it is what it is. - // It is just more work for the LTO later on I suppose ¯\_(ツ)_/¯ let depends = [ "dpdk_wrapper", "rte_net_virtio", @@ -100,6 +94,7 @@ fn main() { "rte_rcu", "rte_ring", "rte_eal", + "rte_argparse", "rte_kvargs", "rte_telemetry", "rte_log", @@ -109,6 +104,7 @@ fn main() { "efa", "hns", "mana", + "ionic", "bnxt_re-rdmav59", "cxgb4-rdmav59", "erdma-rdmav59", @@ -126,12 +122,11 @@ fn main() { "numa", ]; - for dep in &depends { + // NOTE: DPDK absolutely requires whole-archive in the linking command. + // While I find this very questionable, it is what it is. + // It is just more work for the LTO later on I suppose ¯\_(ツ)_/¯ + for dep in depends { println!("cargo:rustc-link-lib=static:+whole-archive,+bundle={dep}"); } - let rerun_if_changed = ["build.rs", "../scripts/dpdk-sys.env"]; - for file in &rerun_if_changed { - println!("cargo:rerun-if-changed={file}"); - } - bind(&out_path, sysroot.as_str()); + bind(&out_path); } diff --git a/dpdk-sysroot-helper/src/lib.rs b/dpdk-sysroot-helper/src/lib.rs index 8c5b81f37..6d23c47b3 100644 --- a/dpdk-sysroot-helper/src/lib.rs +++ b/dpdk-sysroot-helper/src/lib.rs @@ -29,27 +29,23 @@ pub fn get_target_name() -> String { .to_string() } -#[must_use] -pub fn get_project_root() -> String { - env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR not set") -} - -#[must_use] -pub fn get_compile_env() -> String { - env::var("COMPILE_ENV").expect("COMPILE_ENV not set") -} - #[must_use] pub fn get_sysroot() -> String { - let compile_env = env::var("COMPILE_ENV").expect("COMPILE_ENV not set"); - let sysroot_env = format!("{compile_env}/sysroot"); - let target = get_target_name(); - let profile = get_profile_name(); - let expected_sysroot = format!("{sysroot_env}/{target}/{profile}"); - let expected_sysroot_path = Path::new(&expected_sysroot); - if expected_sysroot_path.exists() { - expected_sysroot + let sysroot_env = env::var("DATAPLANE_SYSROOT").expect("DATAPLANE_SYSROOT not set"); + let sysroot_path = Path::new(&sysroot_env); + if sysroot_path.exists() { + sysroot_env } else { - panic!("sysroot not found at {expected_sysroot}") + panic!("sysroot not found at {sysroot_env}") + } +} + +pub fn use_sysroot() { + let sysroot = get_sysroot(); + println!("cargo:rerun-if-env-changed=DATAPLANE_SYSROOT"); + println!("cargo:rustc-link-search=all={sysroot}/lib"); + let rerun_if_changed = ["build.rs"]; + for file in rerun_if_changed { + println!("cargo:rerun-if-changed={file}"); } } diff --git a/dpdk/build.rs b/dpdk/build.rs index 52f5b0197..236576084 100644 --- a/dpdk/build.rs +++ b/dpdk/build.rs @@ -2,7 +2,5 @@ // Copyright Open Network Fabric Authors fn main() { - let sysroot = dpdk_sysroot_helper::get_sysroot(); - println!("cargo:rustc-link-search=all={sysroot}/lib"); - println!("cargo:rustc-link-arg=--sysroot={sysroot}"); + dpdk_sysroot_helper::use_sysroot(); } diff --git a/hardware/build.rs b/hardware/build.rs index 52f5b0197..236576084 100644 --- a/hardware/build.rs +++ b/hardware/build.rs @@ -2,7 +2,5 @@ // Copyright Open Network Fabric Authors fn main() { - let sysroot = dpdk_sysroot_helper::get_sysroot(); - println!("cargo:rustc-link-search=all={sysroot}/lib"); - println!("cargo:rustc-link-arg=--sysroot={sysroot}"); + dpdk_sysroot_helper::use_sysroot(); } diff --git a/init/Cargo.toml b/init/Cargo.toml index 0b8f2a8ac..cfea1672a 100644 --- a/init/Cargo.toml +++ b/init/Cargo.toml @@ -5,6 +5,10 @@ license.workspace = true publish.workspace = true version.workspace = true +[features] +default = ["sysroot"] +sysroot = ["dep:dpdk-sysroot-helper"] + [dependencies] # internal hardware = { workspace = true, features = ["serde", "scan"] } @@ -27,6 +31,6 @@ tracing-subscriber = { workspace = true, features = ["fmt"] } [build-dependencies] # internal -dpdk-sysroot-helper = { workspace = true } +dpdk-sysroot-helper = { workspace = true, optional = true } # external diff --git a/init/build.rs b/init/build.rs index 52f5b0197..1fc109eb8 100644 --- a/init/build.rs +++ b/init/build.rs @@ -2,7 +2,6 @@ // Copyright Open Network Fabric Authors fn main() { - let sysroot = dpdk_sysroot_helper::get_sysroot(); - println!("cargo:rustc-link-search=all={sysroot}/lib"); - println!("cargo:rustc-link-arg=--sysroot={sysroot}"); + #[cfg(feature = "sysroot")] + dpdk_sysroot_helper::use_sysroot(); } diff --git a/justfile b/justfile index bdb849477..835ff2275 100644 --- a/justfile +++ b/justfile @@ -2,43 +2,61 @@ # Copyright Open Network Fabric Authors set unstable := true -set shell := [x"${SHELL:-bash}", "-euo", "pipefail", "-c"] -set script-interpreter := [x"${SHELL:-bash}", "-euo", "pipefail"] -set dotenv-load := true -set dotenv-required := true -set dotenv-path := "." -set dotenv-filename := "./scripts/rust.env" +set shell := ["/usr/bin/env", "bash", "-euo", "pipefail", "-c"] +set script-interpreter := ["/usr/bin/env", "bash", "-euo", "pipefail"] # enable to debug just recipes - debug_justfile := "false" -[private] -dpdk_sys_commit := shell("source ./scripts/dpdk-sys.env && echo $DPDK_SYS_COMMIT") + [private] _just_debuggable_ := if debug_justfile == "true" { "set -x" } else { "" } +# number of nix jobs to run in parallel +jobs := "1" + # List out the available commands [private] [default] @default: just --list --justfile {{ justfile() }} -# Set to FUZZ to run the full fuzzer in the fuzz recipe -_test_type := "DEFAULT" +# cargo build profile (debug/release/fuzz) +profile := "debug" + +# sanitizer to use (address/thread/safe-stack/cfi/"") +sanitize := "" + +# comma-separated list of cargo features to enable (e.g. "shuttle") +features := "" + +# whether to include default cargo features for this workspace (set to "false" to disable) +default_features := "true" + +# Private computed cargo flag groups for consistent invocations. +# Recipes should compose these as needed (not all cargo subcommands accept all flags). +[private] +_cargo_feature_flags := \ + (if default_features == "false" { "--no-default-features " } else { "" }) \ + + (if features != "" { "--features " + features } else { "" }) + +[private] +_cargo_profile_flag := if profile == "debug" { "" } else { "--profile " + profile } -# comma delimited list of sanitizers to use with bolero -sanitizers := "address,leak" +# filters for nextest +filter := if features == "shuttle" { "shuttle" } else { "" } -# the tripple to compile for -target := "x86_64-unknown-linux-gnu" +# instrumentation mode (none/coverage) +instrument := "none" -# cargo build profile to use -profile := "release" +# target platform (x86-64-v3/bluefield2) +platform := "x86-64-v3" version_extra := "" -version_target := if target == "x86_64-unknown-linux-gnu" { "" } else { "-" + target } +version_platform := if platform == "x86-64-v3" { "" } else { "-" + platform } version_profile := if profile == "release" { "" } else { "-" + profile } -version := env("VERSION", "") || `git describe --tags --dirty --always` + version_target + version_profile + version_extra +version_san := if sanitize == "" { "" } else { "-san." + replace(sanitize, ",", ".") } +version_feat := if features == "" { "" } else { "-feat." + replace(features, ",", ".") } +version := env("VERSION", "") || `git describe --tags --dirty --always` + version_platform + version_profile + version_san + version_feat + version_extra # Print version that will be used in the build version: @@ -49,507 +67,202 @@ version: oci_repo := "127.0.0.1:30000" oci_insecure := "" oci_name := "githedgehog/dataplane" -oci_image_full := oci_repo + "/" + oci_name + ":" + version - -# Docker images -# The respository to push images to or pull them from -dpdp_sys_registry := "${REGISTRY_URL:-ghcr.io}" -[private] -_image_profile := if profile == "debug" { "debug" } else { "release" } -[private] -_dpdk_sys_container_repo := dpdp_sys_registry + "/githedgehog/dpdk-sys" -[private] -_dpdk_sys_container_tag := dpdk_sys_commit - -[private] -_libc_container := _dpdk_sys_container_repo + "/libc-env:" + _dpdk_sys_container_tag + "." + _image_profile - -[private] -_debug_env_container := _dpdk_sys_container_repo + "/debug-env:" + _dpdk_sys_container_tag + "." + _image_profile -[private] -_compile_env_image_name := _dpdk_sys_container_repo + "/compile-env" -[private] -_compile_env_container := _compile_env_image_name + ":" + _dpdk_sys_container_tag + "." + _image_profile - -# Base container for the dataplane build -[private] -_dataplane_base_container := if _image_profile == "release" { _libc_container } else { _debug_env_container } - -# Warn if the compile-env image is deprecated (or missing) - -[private] -_compile_env_check := if shell('docker image list --format "{{.Repository}}:{{.Tag}}" | grep -x "' + _compile_env_container + '" || true') == '' { shell('printf "\n/!\\ Latest compile-env not found, try \"just refresh-compile-env\"\n\n" >&2') } else { '' } - -# Docker settings - -[private] -_network := "host" -[private] -_docker_sock_cmd := replace_regex(_just_debuggable_, ".+", "$0;") + ''' - declare -r DOCKER_HOST="${DOCKER_HOST:-unix:///var/run/docker.sock}" - declare -r without_unix="${DOCKER_HOST##unix://}" - if [ -S "${without_unix}" ]; then - printf -- '%s' "${without_unix}" - elif [ -S "/run/docker/docker.sock" ]; then - printf -- '%s' "/run/docker/docker.sock" - elif [ -S /var/run/docker.sock ]; then - printf -- '%s' "/var/run/docker.sock" - fi -''' -export DOCKER_HOST := x"${DOCKER_HOST:-unix:///var/run/docker.sock}" -export DOCKER_SOCK := shell(_docker_sock_cmd) - -# The git commit hash of the last commit to HEAD -# We allow this command to fail in the sterile environment because git is not available there - -[private] -_commit := `git rev-parse HEAD 2>/dev/null || echo "sterile"` - -# The git branch we are currnetly on -# We allow this command to fail in the sterile environment because git is not available there - -[private] -_branch := `(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo "sterile") | tr -c '[:alnum:]\n' '-'` - -# The git tree state (clean or dirty) -# We allow this command to fail in the sterile environment because git is not available there - -[private] -_clean := ``` - set -euo pipefail - ( - git diff-index --quiet HEAD -- 2>/dev/null && \ - test -z "$(git ls-files --exclude-standard --others)" && \ - echo clean \ - ) || echo dirty -``` - -# The slug is the branch name (sanitized) with a marker if the tree is dirty - -[private] -_slug := (if _clean == "clean" { "" } else { "dirty." }) + _branch - -# Some branch names could be too long for docker tags, e.g. merge queue one - -[private] -_dirty_prefix := (if _clean == "clean" { "" } else { "dirty" }) - -# Define a function to truncate long lines to the limit for containers tags +oci_frr_prefix := "githedgehog/dpdk-sys/frr" +oci_image_dataplane := oci_repo + "/" + oci_name + ":" + version +oci_image_dataplane_debugger := oci_repo + "/" + oci_name + "/debugger:" + version +oci_image_frr_dataplane := oci_repo + "/" + oci_frr_prefix + ":" + version +oci_image_frr_host := oci_repo + "/" + oci_frr_prefix + "-host:" + version [private] -_define_truncate128 := 'truncate128() { printf -- "%s" "${1::128}" ; }' - -# The time of the build (in iso8601 utc) +_skopeo_dest_insecure := if oci_insecure == "true" { "--dest-tls-verify=false" } else { "" } [private] -_build_time := datetime_utc("%+") +docker_sock := "/var/run/docker.sock" - -# Run cargo with RUSTFLAGS computed based on profile +# Build a nix derivation with standard build arguments [script] -cargo *args: - # Ideally this would be done via Cargo.toml and .cargo/config.toml, - # unfortunately passing RUSTFLAGS based on profile (rather than target or cfg) - # is currently unstable (nightly builds only). +build target="dataplane.tar" *args: {{ _just_debuggable_ }} - export PATH="$(pwd)/compile-env/bin:${PATH}" - declare -a args=({{ args }}) - declare -a extra_args=() - for arg in "${args[@]}"; do - case "$arg" in - --debug|--profile=debug|--cargo-profile=debug) - declare -rx RUSTFLAGS="${RUSTFLAGS_DEBUG}" - declare -rx LIBC_ENV_PROFILE="debug" - ;; - --release|--profile=release|--cargo-profile=release) - declare -rx RUSTFLAGS="${RUSTFLAGS_RELEASE}" - extra_args+=("$arg") - ;; - --profile=fuzz|--cargo-profile=fuzz) - declare -rx RUSTFLAGS="${RUSTFLAGS_FUZZ}" - export RUSTC_BOOTSTRAP=1 - extra_args+=("$arg") - ;; - *) - extra_args+=("$arg") - ;; - esac - done - if [ -z "${RUSTFLAGS:-}" ]; then - declare -rx RUSTFLAGS="${RUSTFLAGS_DEBUG}" - fi - - export RUSTDOCFLAGS="${RUSTDOCFLAGS:-} ${RUSTFLAGS} --html-in-header $(pwd)/scripts/doc/custom-header.html" - ./compile-env/bin/cargo "${extra_args[@]}" - -# Run the (very minimal) compile environment -[script] -compile-env *args: - {{ _just_debuggable_ }} - mkdir -p dev-env-template/etc - if [ -z "${UID:-}" ]; then - >&2 echo "ERROR: environment variable UID not set" - fi - declare -rxi UID - GID="$(id -g)" - declare -rxi GID - declare -rx USER="${USER:-runner}" - declare DOCKER_GID - DOCKER_GID="$(getent group docker | cut -d: -f3)" - declare -rxi DOCKER_GID - envsubst < dev-env-template/etc.template/group.template > dev-env-template/etc/group - envsubst < dev-env-template/etc.template/passwd.template > dev-env-template/etc/passwd - mkdir -p "$(pwd)/sterile" - declare CARGO_TARGET_DIR - CARGO_TARGET_DIR="$(pwd)/target" - TMPDIR="${CARGO_TARGET_DIR}/tmp" # needed for doctests, as /tmp is "noexec" - mkdir -p "${CARGO_TARGET_DIR}/tmp" - sudo -E docker run \ - --rm \ - --interactive \ - --network="{{ _network }}" \ - --env DOCKER_HOST="${DOCKER_HOST}" \ - --env CARGO_TARGET_DIR="${CARGO_TARGET_DIR}" \ - --env TMPDIR="${TMPDIR}" \ - --env DOCKER_HOST="${DOCKER_HOST:-unix:///var/run/docker.sock}" \ - --env TEST_TYPE="{{ _test_type }}" \ - --env VERSION="{{ version }}" \ - --tmpfs "/tmp:uid=$(id -u),gid=$(id -g),nodev,noexec,nosuid" \ - --mount "type=tmpfs,destination=/home/${USER:-runner},tmpfs-mode=1777" \ - --mount "type=bind,source=$(pwd),destination=$(pwd),bind-propagation=rprivate" \ - --mount "type=bind,source=$(pwd)/dev-env-template/etc/passwd,destination=/etc/passwd,readonly" \ - --mount "type=bind,source=$(pwd)/dev-env-template/etc/group,destination=/etc/group,readonly" \ - --mount "type=bind,source=${CARGO_TARGET_DIR},destination=${CARGO_TARGET_DIR}" \ - --mount "type=bind,source={{ DOCKER_SOCK }},destination={{ DOCKER_SOCK }}" \ - --user "$(id -u):$(id -g)" \ - --device "/dev/kvm" \ - --device "/dev/vhost-net" \ - --device "/dev/vhost-vsock" \ - --cap-drop ALL \ - --cap-add SETUID `# needed for sudo in test-runner` \ - --cap-add SETGID `# needed for sudo in test-runner` \ - --cap-add SETFCAP `# needed by test-runner to grant/limit caps of tests` \ - --read-only \ - --group-add="$(getent group docker | cut -d: -f3)" \ - --workdir "$(pwd)" \ - "{{ _compile_env_container }}" \ + mkdir -p results + declare -r target="{{target}}" + nix build -f default.nix "${target}" \ + --argstr profile '{{ profile }}' \ + --argstr sanitize '{{ sanitize }}' \ + --argstr features '{{ features }}' \ + --argstr default-features '{{ default_features }}' \ + --argstr instrumentation '{{ instrument }}' \ + --argstr platform '{{ platform }}' \ + --argstr tag '{{version}}' \ + --print-build-logs \ + --show-trace \ + --out-link "results/${target}" \ + --max-jobs {{jobs}} \ + --keep-failed \ {{ args }} -# Pull the latest versions of the containers +# run formatters for the code used in this project [script] -pull: +fmt *args: {{ _just_debuggable_ }} - sudo -E docker pull "{{ _compile_env_container }}" + nix-shell --run "cargo fmt {{args}}" -# Allocate 2M hugepages (if needed) -[private] -[script] -allocate-2M-hugepages hugepages_2m="1024": - {{ _just_debuggable_ }} - pages=$(< /sys/devices/system/node/node0/hugepages/hugepages-2048kB/nr_hugepages) - if [ "$pages" -gt {{ hugepages_2m }} ]; then - >&2 echo "INFO: ${pages} 2M hugepages already allocated" - exit 0 - fi - printf -- "%s" {{ hugepages_2m }} | sudo tee /sys/devices/system/node/node0/hugepages/hugepages-2048kB/nr_hugepages >/dev/null - -# Allocate 1G hugepages (if needed) -[private] -[script] -allocate-1G-hugepages hugepages_1g="8": - {{ _just_debuggable_ }} - pages=$(< /sys/devices/system/node/node0/hugepages/hugepages-1048576kB/nr_hugepages) - if [ "$pages" -gt {{ hugepages_1g }} ]; then - >&2 echo "INFO: ${pages} 1G hugepages already allocated" - exit 0 - fi - printf -- "%s" {{ hugepages_1g }} | sudo tee /sys/devices/system/node/node0/hugepages/hugepages-1048576kB/nr_hugepages >/dev/null - -# umount hugepage mounts created by dataplane -[private] -[script] -umount-hugepages: - {{ _just_debuggable_ }} - declare hugemnt2M - hugemnt2M="/run/user/$(id -u)/hedgehog/dataplane/hugepages/2M" - declare -r hugemnt2M - declare hugemnt1G - hugemnt1G="/run/user/$(id -u)/hedgehog/dataplane/hugepages/1G" - declare -r hugemnt1G - if [ "$(findmnt -rno FSTYPE "${hugemnt2M}")" = "hugetlbfs" ]; then - sudo umount --lazy "${hugemnt2M}" - fi - if [ "$(findmnt -rno FSTYPE "${hugemnt1G}")" = "hugetlbfs" ]; then - sudo umount --lazy "${hugemnt1G}" - fi - sync - -# mount hugetlbfs -[private] -[script] -mount-hugepages: - {{ _just_debuggable_ }} - declare hugemnt2M - hugemnt2M="/run/user/$(id -u)/hedgehog/dataplane/hugepages/2M" - declare -r hugemnt2M - declare hugemnt1G - hugemnt1G="/run/user/$(id -u)/hedgehog/dataplane/hugepages/1G" - declare -r hugemnt1G - [ ! -d "$hugemnt2M" ] && mkdir --parent "$hugemnt2M" - [ ! -d "$hugemnt1G" ] && mkdir --parent "$hugemnt1G" - if [ ! "$(findmnt -rno FSTYPE "${hugemnt2M}")" = "hugetlbfs" ]; then - sudo mount -t hugetlbfs -o pagesize=2M,noatime hugetlbfs "$hugemnt2M" - fi - if [ ! "$(findmnt -rno FSTYPE "${hugemnt1G}")" = "hugetlbfs" ]; then - sudo mount -t hugetlbfs -o pagesize=1G,noatime hugetlbfs "$hugemnt1G" - fi - sync - -# Set up the environment for testing locally -setup-test-env: allocate-2M-hugepages allocate-1G-hugepages mount-hugepages - -# Tear down environment for testing locally -teardown-test-env: umount-hugepages - -# Dump the compile-env container into a sysroot for use by the build +# run a series of pre-flight checks to catch most problems you might find in CI early [script] -create-compile-env: +pre-flight: (check-dependencies) (fmt "--check") (test) (lint) (doctest) {{ _just_debuggable_ }} - mkdir compile-env - sudo -E docker create --name dpdk-sys-compile-env-{{ _slug }} "{{ _compile_env_container }}" - fake - sudo -E docker export dpdk-sys-compile-env-{{ _slug }} \ - | tar --no-same-owner --no-same-permissions -xf - -C compile-env - sudo -E docker rm dpdk-sys-compile-env-{{ _slug }} - -# remove the compile-env directory -[confirm("Remove old compile environment? (yes/no)\n(you can recreate it with `just create-compile-env`)")] -[script] -remove-compile-env: - {{ _just_debuggable_ }} - if [ -d compile-env ]; then sudo rm -rf compile-env; fi + echo "pre flight checks pass" -# refresh the compile-env (clear and restore) [script] -refresh-compile-env: pull remove-compile-env create-compile-env +test package="tests.all" *args: (build (if package == "tests.all" { "tests.all" } else { "tests.pkg." + package }) args) + {{ _just_debuggable_ }} + declare -r target="{{ if package == "tests.all" { "tests.all" } else { "tests.pkg." + package } }}" + nix-shell --run "cargo nextest run --archive-file results/${target}/*.tar.zst --workspace-remap $(pwd) {{ filter }}" -# clean up (delete) old compile-env images from system [script] -prune-old-compile-env: +docs package="" *args: (build (if package == "" { "docs.all" } else { "docs.pkg." + package }) args) {{ _just_debuggable_ }} - docker image list "{{ _compile_env_image_name }}" --format "{{{{.Repository}}:{{{{.Tag}}" | \ - grep -v "{{ _dpdk_sys_container_tag }}" | \ - xargs -r docker image rm -# Install "fake-nix" (required for local builds to function) -[confirm("Fake a nix install (yes/no)")] +# Create devroot and sysroot symlinks for local development [script] -fake-nix refake="": +setup-roots *args: {{ _just_debuggable_ }} - if [ -h /nix ]; then - if [ "$(readlink -e /nix)" = "$(readlink -e "$(pwd)/compile-env/nix")" ]; then - >&2 echo "Nix already faked!" - exit 0 - else - if [ "{{ refake }}" = "refake" ]; then - sudo rm /nix - else - >&2 echo "Nix already faked elsewhere!" - >&2 echo "Run \`just fake-nix refake\` to re-fake to this location" - exit 1 - fi - fi - elif [ -d /nix ]; then - >&2 echo "Nix already installed, can't fake it!" - exit 1 - fi - if [ ! -d ./compile-env/nix ]; then - just refresh-compile-env - fi - if [ ! -d ./compile-env/nix ]; then - >&2 echo "Failed to create nix environment" - exit 1 - fi - sudo ln -rs ./compile-env/nix /nix - -# Run a "sterile" command -sterile *args: \ - (cargo "clean") \ - (compile-env "just" \ - ("debug_justfile=" + debug_justfile) \ - ("target=" + target) \ - ("profile=" + profile) \ - ("_test_type=" + _test_type) \ - ("sanitizers=" + sanitizers) \ - args \ - ) - -# Run the full fuzzer / property-checker on a bolero test. Args are forwarded to bolero -[script] -list-fuzz-tests *args: (cargo "bolero" "list" ("--sanitizer=" + sanitizers) "--build-std" "--profile=fuzz" args) - -# Run the full fuzzer / property-checker on a bolero test. Args are forwarded to bolero -fuzz test timeout="-T 60sec" *args="--engine=libfuzzer --engine-args=-max_len=65536": ( \ - compile-env \ - "just" \ - "_test_type=FUZZ" \ - "cargo" \ - "bolero" \ - "test" \ - test \ - "--build-std" \ - "--profile=fuzz" \ - ("--sanitizer=" + sanitizers) \ - timeout \ - args \ - ) - -# Run the full fuzzer / property-checker on a bolero test with the AFL fuzzer -[script] -fuzz-afl test: (fuzz test "" "--engine=afl" "--engine-args=-mnone") + for root in devroot sysroot; do + nix build -f default.nix "${root}" \ + --argstr profile '{{ profile }}' \ + --argstr sanitize '{{ sanitize }}' \ + --argstr instrumentation '{{ instrument }}' \ + --argstr platform '{{ platform }}' \ + --argstr tag '{{version}}' \ + --out-link "${root}" \ + {{ args }} + done +# Build the dataplane container image [script] -sh *args: - /bin/sh -i -c "{{ args }}" +build-container target="dataplane" *args: (build (if target == "dataplane" { "dataplane.tar" } else { "containers." + target }) args) + {{ _just_debuggable_ }} + declare -xr DOCKER_HOST="${DOCKER_HOST:-unix://{{docker_sock}}}" + case "{{target}}" in + "dataplane") + declare img + img="$(docker import --change 'ENTRYPOINT ["/bin/dataplane"]' ./results/dataplane.tar)" + declare -r img + docker tag "${img}" "{{oci_image_dataplane}}" + echo "imported {{ oci_image_dataplane }}" + ;; + "dataplane-debugger") + docker load < ./results/containers.dataplane-debugger + docker tag "ghcr.io/githedgehog/dataplane/debugger:{{version}}" "{{oci_image_dataplane_debugger}}" + echo "imported {{ oci_image_dataplane_debugger }}" + ;; + "frr.dataplane") + docker load < ./results/containers.frr.dataplane + docker tag "ghcr.io/githedgehog/dpdk-sys/frr:{{version}}" "{{oci_image_frr_dataplane}}" + echo "imported {{oci_image_frr_dataplane}}" + ;; + "frr.host") + docker load < ./results/containers.frr.host + docker tag "ghcr.io/githedgehog/dpdk-sys/frr-host:{{version}}" "{{oci_image_frr_host}}" + echo "imported {{oci_image_frr_host}}" + ;; + *) + >&2 echo "{{target}}" not a valid container + exit 99 + esac -# Build containers in a sterile environment +# Build and push the dataplane container [script] -build-container: (sterile "_network=none" "cargo" "--locked" "build" ("--profile=" + profile) ("--target=" + target) "--package=dataplane" "--package=dataplane-cli") && version +push-container target="dataplane" *args: (build-container target args) && version {{ _just_debuggable_ }} - mkdir -p "artifact/{{ target }}/{{ profile }}" - cp -r "${CARGO_TARGET_DIR:-target}/{{ target }}/{{ profile }}/dataplane" "artifact/{{ target }}/{{ profile }}/dataplane" - cp -r "${CARGO_TARGET_DIR:-target}/{{ target }}/{{ profile }}/cli" "artifact/{{ target }}/{{ profile }}/dataplane-cli" - declare build_date - build_date="$(date --utc --iso-8601=date --date="{{ _build_time }}")" - declare -r build_date - declare build_time_epoch - build_time_epoch="$(date --utc '+%s' --date="{{ _build_time }}")" - declare -r build_time_epoch - sudo -E docker build \ - --label "git.commit={{ _commit }}" \ - --label "git.branch={{ _branch }}" \ - --label "git.tree-state={{ _clean }}" \ - --label "build.date=${build_date}" \ - --label "build.timestamp={{ _build_time }}" \ - --label "build.time_epoch=${build_time_epoch}" \ - --tag "{{ oci_image_full }}" \ - --build-arg ARTIFACT="artifact/{{ target }}/{{ profile }}/dataplane" \ - --build-arg ARTIFACT_CLI="artifact/{{ target }}/{{ profile }}/dataplane-cli" \ - --build-arg BASE="{{ _dataplane_base_container }}" \ - . - -# Build a container for local testing, without cache and extended base + declare -xr DOCKER_HOST="${DOCKER_HOST:-unix://{{docker_sock}}}" + case "{{target}}" in + "dataplane") + skopeo copy --src-daemon-host="${DOCKER_HOST}" {{ _skopeo_dest_insecure }} docker-daemon:{{ oci_image_dataplane }} docker://{{ oci_image_dataplane }} + echo "Pushed {{ oci_image_dataplane }}" + ;; + "dataplane-debugger") + skopeo copy --src-daemon-host="${DOCKER_HOST}" {{ _skopeo_dest_insecure }} docker-daemon:{{ oci_image_dataplane_debugger }} docker://{{ oci_image_dataplane_debugger }} + echo "Pushed {{ oci_image_dataplane_debugger }}" + ;; + "frr.dataplane") + skopeo copy --src-daemon-host="${DOCKER_HOST}" {{ _skopeo_dest_insecure }} docker-daemon:{{oci_image_frr_dataplane}} docker://{{oci_image_frr_dataplane}} + echo "Pushed {{ oci_image_frr_dataplane }}" + ;; + "frr.host") + skopeo copy --src-daemon-host="${DOCKER_HOST}" {{ _skopeo_dest_insecure }} docker-daemon:{{oci_image_frr_host}} docker://{{oci_image_frr_host}} + echo "Pushed {{ oci_image_frr_host }}" + ;; + *) + >&2 echo "{{target}}" not a valid container + exit 99 + esac + +# Pushes all release container images. +# Note: deliberately ignores all recipe parameters save version and debug_justfile. [script] -build-container-quick: (compile-env "cargo" "--locked" "build" ("--target=" + target) "--package=dataplane" "--package=dataplane-cli") +push: {{ _just_debuggable_ }} - mkdir -p "artifact/{{ target }}/{{ profile }}" - cp -r "${CARGO_TARGET_DIR:-target}/{{ target }}/{{ profile }}/dataplane" "artifact/{{ target }}/{{ profile }}/dataplane" - cp -r "${CARGO_TARGET_DIR:-target}/{{ target }}/{{ profile }}/cli" "artifact/{{ target }}/{{ profile }}/dataplane-cli" - declare build_date - build_date="$(date --utc --iso-8601=date --date="{{ _build_time }}")" - declare -r build_date - sudo -E docker build \ - --label "git.commit={{ _commit }}" \ - --label "git.branch={{ _branch }}" \ - --label "git.tree-state={{ _clean }}" \ - --label "build.date=${build_date}" \ - --label "build.timestamp={{ _build_time }}" \ - --tag "{{ oci_image_full }}" \ - --build-arg ARTIFACT="artifact/{{ target }}/{{ profile }}/dataplane" \ - --build-arg ARTIFACT_CLI="artifact/{{ target }}/{{ profile }}/dataplane-cli" \ - --build-arg BASE="{{ _debug_env_container }}" \ - . - - sudo -E docker tag "{{ oci_image_full }}" "dataplane:local-testing-latest" - -# Temporary tools to get a proper skopeo version -localbin := "bin" -localpath := `pwd` -localbinpath := `pwd`/localbin - -_localbin: - @mkdir -p {{localbin}} - -# go install helper -_goinstall PACKAGE VERSION BINNAME TARGET FLAGS="": _localbin - #!/usr/bin/env bash - set -euo pipefail - - echo "Installing go package: {{PACKAGE}}@{{VERSION}}..." - GOBIN=`pwd`/{{localbin}} go install {{FLAGS}} {{PACKAGE}}@{{VERSION}} - mv {{localbin}}/{{BINNAME}} {{TARGET}} - -skopeo_version := "v1.21.0" -skopeo := localbin / "skopeo" + "-" + skopeo_version -@_skopeo: _localbin - [ -f {{skopeo}} ] || just _goinstall "github.com/containers/skopeo/cmd/skopeo" {{skopeo_version}} "skopeo" {{skopeo}} "--tags containers_image_openpgp,exclude_graphdriver_btrfs" - -skopeo_dest_insecure := if oci_insecure == "true" { "--dest-tls-verify=false" } else { "" } -skopeo_copy_flags := if env("DOCKER_HOST", "") != "" { "--src-daemon-host " + env_var("DOCKER_HOST") } else { "" } - -# Build and push containers -[script] -push: _skopeo build-container && version - {{ skopeo }} copy {{skopeo_copy_flags}} {{skopeo_dest_insecure}} --all docker-daemon:{{ oci_image_full }} docker://{{ oci_image_full }} - echo "Pushed {{ oci_image_full }}" + for container in dataplane frr.dataplane; do + nix-shell --run "just debug_justfile={{debug_justfile}} oci_repo={{oci_repo}} version={{version}} profile=release platform=x86-64-v3 sanitize= instrument=none push-container ${container}" + done # Print names of container images to build or push [script] print-container-tags: - echo "{{ oci_image_full }}" + echo "{{ oci_image_dataplane }}" -# Run Clippy like you're in CI +# Check dependency licenses and security advisories [script] -clippy *args: (cargo "clippy" "--all-targets" "--all-features" args "--" "-D" "warnings") +check-dependencies *args: + {{ _just_debuggable_ }} + nix-shell --run "cargo deny {{ _cargo_feature_flags }} check {{ args }}" -# Serve rustdoc output locally (using port 8000) +# Run linters [script] -rustdoc-serve: - echo "Launching web server, hit Ctrl-C to stop." - python -m http.server -d "target/{{ target }}/doc" +lint *args: + {{ _just_debuggable_ }} + nix-shell --run "cargo clippy --all-targets {{ _cargo_feature_flags }} {{ _cargo_profile_flag }} {{ args }} -- -D warnings" -# Build for each separate commit (for "pull_request") or for the HEAD of the branch (other events) +# Run doctests [script] -build-sweep start="main": +doctest *args: {{ _just_debuggable_ }} - set -euo pipefail - if [ {{ _clean }} != "clean" ]; then - >&2 echo "can not build-sweep with dirty branch (would risk data loss)" - >&2 git status - exit 1 - fi - INIT_HEAD=$(git rev-parse --abbrev-ref HEAD) - # Get all commits since {{ start }}, in chronological order - while read -r commit; do - git -c advice.detachedHead=false checkout "${commit}" || exit 1 - { just debug_justfile={{ debug_justfile }} cargo build --locked --profile=dev --target=x86_64-unknown-linux-gnu; } || exit 1 - done < <(git rev-list --reverse "{{ start }}".."$(git rev-parse HEAD)") - # Return to the initial branch if any (exit "detached HEAD" state) - git checkout "${INIT_HEAD}" - -# Run tests with code coverage. Args will be forwarded to nextest -[script] -coverage *args: \ - (cargo "llvm-cov" "clean" "--workspace") \ - (cargo "llvm-cov" "--no-report" "--branch" "--remap-path-prefix" "nextest" "--cargo-profile=fuzz" args) \ - (cargo "llvm-cov" "report" "--html" "--output-dir=./target/nextest/coverage" "--profile=fuzz") \ - (cargo "llvm-cov" "report" "--json" "--output-path=./target/nextest/coverage/report.json" "--profile=fuzz") \ - (cargo "llvm-cov" "report" "--codecov" "--output-path=./target/nextest/coverage/codecov.json" "--profile=fuzz") - + nix-shell --run "cargo test --doc {{ _cargo_feature_flags }} {{ _cargo_profile_flag }} {{ args }}" -# regenerate the dependency graph for the project +# Run tests with code coverage. Args will be forwarded to nextest +[script] +coverage target="tests.all" *args: (build (if target == "tests.all" { "tests.all" } else { "tests.pkg." + target }) args) + {{ _just_debuggable_ }} + declare -r target="{{ if target == "tests.all" { "tests.all" } else { "tests.pkg." + target } }}" + export LLVM_COV="$(pwd)/devroot/bin/llvm-cov" + export LLVM_PROFDATA="$(pwd)/devroot/bin/llvm-profdata" + export CARGO_LLVM_COV_TARGET_DIR="$(pwd)/target/llvm-cov" + export CARGO_LLVM_COV_BUILD_DIR="$(pwd)" + cargo llvm-cov clean + cargo llvm-cov show-env + cargo llvm-cov --no-report --branch nextest --archive-file "./results/${target}/"*.tar.zst --workspace-remap . {{ args }} + # NOTE: --profile="" is intentional. When collecting coverage from a nextest archive, the + # profile path component that cargo-llvm-cov normally expects in the profdata directory is + # absent. Passing an empty profile string removes that component from the lookup path so + # the tool can find the profdata generated by the archive run above. + cargo llvm-cov report --html --profile="" --output-dir=./target/nextest/coverage + cargo llvm-cov --branch report --codecov --profile="" --output-path=./target/nextest/coverage/codecov.json + +# Regenerate the dependency graph for the project [script] depgraph: - just cargo depgraph --exclude dataplane-test-utils,dataplane-dpdk-sysroot-helper --workspace-only \ - | sed 's/dataplane-//g' \ - | dot -Grankdir=TD -Gsplines=polyline -Granksep=1.5 -Tsvg > workspace-deps.svg + {{ _just_debuggable_ }} + cargo depgraph --exclude dataplane-test-utils,dataplane-dpdk-sysroot-helper --workspace-only \ + | sed 's/dataplane-//g' \ + | dot -Grankdir=TD -Gsplines=polyline -Granksep=1.5 -Tsvg > workspace-deps.svg # Bump the minor version in Cargo.toml and reset patch version to 0 [script] bump_minor_version yq_flags="": - CURRENT_VERSION=$(yq -r {{ yq_flags }} '.workspace.package.version' Cargo.toml) + CURRENT_VERSION="$(yq -r {{ yq_flags }} '.workspace.package.version' Cargo.toml)" echo "Current version: ${CURRENT_VERSION}" - MAJOR_VNUM=$(echo ${CURRENT_VERSION} | cut -d. -f1) - MINOR_VNUM=$(echo ${CURRENT_VERSION} | cut -d. -f2) + MAJOR_VNUM="$(cut -d. -f1 <<<"${CURRENT_VERSION}")" + MINOR_VNUM="$(cut -d. -f2 <<<"${CURRENT_VERSION}")" NEW_VERSION="${MAJOR_VNUM}.$((MINOR_VNUM + 1)).0" just bump_version "${NEW_VERSION}" @@ -558,4 +271,9 @@ bump_minor_version yq_flags="": bump_version version: echo "New version: {{ version }}" sed -i "s/^version = \".*\"/version = \"{{ version }}\"/" Cargo.toml - just cargo update -w + cargo update --workspace + +# Enter nix-shell +[script] +shell: + nix-shell diff --git a/k8s-intf/Cargo.toml b/k8s-intf/Cargo.toml index 87325a162..51836cddb 100644 --- a/k8s-intf/Cargo.toml +++ b/k8s-intf/Cargo.toml @@ -38,5 +38,4 @@ lpm = { workspace = true, features = [] } net = { workspace = true, features = ["bolero", "test_buffer"] } [build-dependencies] -dotenvy = { workspace = true, features = [] } -ureq = { workspace = true, features = ["rustls", "gzip"] } +dpdk-sysroot-helper = { workspace = true } diff --git a/k8s-intf/build.rs b/k8s-intf/build.rs index 039653f21..3b3acdddc 100644 --- a/k8s-intf/build.rs +++ b/k8s-intf/build.rs @@ -1,87 +1,10 @@ // SPDX-License-Identifier: Apache-2.0 // Copyright Open Network Fabric Authors -use std::env; use std::fs; +use std::io::Read; use std::path::PathBuf; -fn workspace_root() -> PathBuf { - PathBuf::from(env::var("CARGO_MANIFEST_DIR").expect("CARGO_MANIFEST_DIR not set")) - .ancestors() - .nth(1) - .expect("Workspace root not found") - .to_path_buf() -} - -fn env_file_name() -> PathBuf { - workspace_root().join("scripts").join("k8s-crd.env") -} - -#[derive(Default)] -struct EnvConfig { - version: Option, - url: Option, - local_path: Option, -} - -fn read_env_config() -> EnvConfig { - let env_file_path = env_file_name(); - let env_file = - dotenvy::from_path_iter(env_file_path).expect("Failed to read scripts/k8s-crd.env"); - - let mut config = EnvConfig::default(); - env_file.filter_map(Result::ok).for_each(|(key, value)| { - match key.as_str() { - "K8S_GATEWAY_AGENT_REF" => { - if !value.is_empty() { - config.version = Some(value); - } - } - "K8S_GATEWAY_AGENT_CRD_URL" => { - if !value.is_empty() { - config.url = Some(value); - } - } - "K8S_GATEWAY_AGENT_CRD_PATH" => { - if !value.is_empty() { - config.local_path = Some(value); - } - } - _ => { /* ignore undeclared variables */ } - } - }); - - // don't set version if we'll build from local crd spec - if config.local_path.is_some() { - config.version.take(); - } - - config -} - -fn fetch_crd(url: &str) -> String { - println!("cargo:note=Fetching CRD from: {url}"); - ureq::get(url) - .call() - .expect("Failed to fetch agent CRD from url") - .body_mut() - .read_to_string() - .expect("Failed to read response body") -} - -fn fetch_crd_from_file(path: &str) -> String { - println!("cargo:note=Fetching CRD from file at {path}"); - match fs::read_to_string(path) { - Ok(crd) => crd, - Err(e) => panic!("Failed to read CRD from {path}: {e}"), - } -} - -const LICENSE_PREAMBLE: &str = "// SPDX-License-Identifier: Apache-2.0 -// Copyright Open Network Fabric Authors - -"; - fn fixup_signed_types(raw: String) -> String { raw.replace("i64", "u64").replace("i32", "u32") } @@ -106,19 +29,13 @@ fn fixup_types(raw: String) -> String { "last_applied_gen: Option", "last_applied_gen: Option", ) - // fixme: we should consider to use u64 for generation Ids? } -fn gen_version_const(version: &Option) -> String { - let version = version - .as_ref() - .map(|v| format!("Some(\"{v}\")")) - .unwrap_or("None".to_string()); - - format!("pub const GW_API_VERSION: Option<&str> = {version};\n\n") +fn gen_version_const(version: String) -> String { + format!("pub const GW_API_VERSION: Option<&str> = Some(\"{version}\");\n\n") } -fn generate_rust_for_crd(crd_content: &str, version: &Option) -> String { +fn generate_rust_for_crd(crd_content: &str, version: String) -> String { // Run kopium with stdin input let mut child = std::process::Command::new("kopium") .args(["-D", "PartialEq", "-Af", "-"]) @@ -147,14 +64,18 @@ fn generate_rust_for_crd(crd_content: &str, version: &Option) -> String let raw = String::from_utf8(output.stdout).expect("Failed to convert kopium output to string"); - LICENSE_PREAMBLE.to_string() + gen_version_const(version).as_str() + &fixup_types(raw) + gen_version_const(version) + &fixup_types(raw) +} + +fn get_gateway_version() -> String { + println!("cargo:rerun-if-env-changed=VERSION"); + std::env::var("VERSION").unwrap_or("dev".into()) } -const GENERATED_OUTPUT_DIR: &str = "src/generated"; const KOPIUM_OUTPUT_FILE: &str = "gateway_agent_crd.rs"; fn kopium_output_path() -> PathBuf { - PathBuf::from(GENERATED_OUTPUT_DIR).join(KOPIUM_OUTPUT_FILE) + PathBuf::from(std::env::var("OUT_DIR").unwrap()).join(KOPIUM_OUTPUT_FILE) } fn code_needs_regen(new_code: &str) -> bool { @@ -171,42 +92,61 @@ fn code_needs_regen(new_code: &str) -> bool { true } -fn rerun() { - println!("cargo:rerun-if-changed={}", env_file_name().display()); -} - fn main() { - rerun(); - - // get config from env file - let config = read_env_config(); - - // get CRD spec from local path or URL - let crd_spec = if let Some(agent_crd_file) = config.local_path { - fetch_crd_from_file(&agent_crd_file) - } else if let Some(agent_crd_url) = config.url { - fetch_crd(&agent_crd_url) - } else { - panic!("No CRD path or URL is set in env file"); - }; + let version = get_gateway_version(); + + let agent_crd_path = + PathBuf::from(std::env::var("GW_CRD_PATH").expect("GW_CRD_PATH var unset")) + .join("gwint.githedgehog.com_gatewayagents.yaml"); - // CRD spec can't be empty - if crd_spec.is_empty() { - panic!("Empty CRD specification"); + let sysroot = dpdk_sysroot_helper::get_sysroot(); + let output_file = kopium_output_path(); + + // Emit all rerun directives unconditionally so Cargo always knows what to + // watch, even when we take the early-return (no-regen-needed) path below. + println!("cargo:rerun-if-env-changed=GW_CRD_PATH"); + println!("cargo:rerun-if-env-changed=DATAPLANE_SYSROOT"); + println!( + "cargo:rerun-if-changed={}", + agent_crd_path.to_str().expect("non unicode crd path") + ); + for file in [ + "build.rs", + sysroot.as_str(), + output_file.to_str().expect("non unicode output path"), + ] { + println!("cargo:rerun-if-changed={file}"); } - // generate rust types from the read crd_spec - let agent_generated_code = generate_rust_for_crd(&crd_spec, &config.version); + let agent_crd_contents = { + let mut agent_crd_file = std::fs::OpenOptions::new() + .read(true) + .write(false) + .open(&agent_crd_path) + .unwrap_or_else(|e| { + panic!( + "failed to open {path}: {e}", + path = agent_crd_path.to_str().expect("non unicode crd path") + ) + }); + let mut contents = String::with_capacity( + agent_crd_file + .metadata() + .expect("unable to get crd metadata") + .len() as usize, + ); + agent_crd_file + .read_to_string(&mut contents) + .unwrap_or_else(|e| panic!("unable to read crd data into string: {e}")); + contents + }; + let agent_generated_code = generate_rust_for_crd(&agent_crd_contents, version); + if !code_needs_regen(&agent_generated_code) { println!("cargo:note=No changes to code generated from CRD"); return; } - // Write the generated code - let output_dir = PathBuf::from(GENERATED_OUTPUT_DIR); - fs::create_dir_all(&output_dir).expect("Failed to create output directory"); - - let output_file = kopium_output_path(); fs::write(&output_file, agent_generated_code) .expect("Failed to write generated agent CRD code"); diff --git a/k8s-intf/src/generated/gateway_agent_crd.rs b/k8s-intf/src/generated/gateway_agent_crd.rs deleted file mode 100644 index 2e3941854..000000000 --- a/k8s-intf/src/generated/gateway_agent_crd.rs +++ /dev/null @@ -1,506 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Open Network Fabric Authors - -pub const GW_API_VERSION: Option<&str> = Some("v0.42.0"); - -// WARNING: generated by kopium - manual changes will be overwritten -// kopium command: kopium -D PartialEq -Af - -// kopium version: 0.22.5 - -#[allow(unused_imports)] -mod prelude { - pub use kube::CustomResource; - pub use schemars::JsonSchema; - pub use serde::{Serialize, Deserialize}; - pub use std::collections::BTreeMap; -} -use self::prelude::*; - -/// GatewayAgentSpec defines the desired state of GatewayAgent. -#[derive(CustomResource, Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -#[kube(group = "gwint.githedgehog.com", version = "v1alpha1", kind = "GatewayAgent", plural = "gatewayagents")] -#[kube(namespaced)] -#[kube(status = "GatewayAgentStatus")] -#[kube(derive="PartialEq")] -pub struct GatewayAgentSpec { - /// AgentVersion is the desired version of the gateway agent to trigger generation changes on controller upgrades - #[serde(default, skip_serializing_if = "Option::is_none", rename = "agentVersion")] - pub agent_version: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub communities: Option>, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub config: Option, - /// GatewaySpec defines the desired state of Gateway. - #[serde(default, skip_serializing_if = "Option::is_none")] - pub gateway: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub groups: Option>, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub peerings: Option>, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub vpcs: Option>, -} - -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentConfig { - /// FabricBFD defines if fabric-facing links should be configured with BFD - #[serde(default, skip_serializing_if = "Option::is_none", rename = "fabricBFD")] - pub fabric_bfd: Option, -} - -/// GatewaySpec defines the desired state of Gateway. -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentGateway { - /// ASN is the ASN of the gateway - #[serde(default, skip_serializing_if = "Option::is_none")] - pub asn: Option, - /// Groups is a list of group memberships for the gateway - #[serde(default, skip_serializing_if = "Option::is_none")] - pub groups: Option>, - /// Interfaces is a map of interface names to their configurations - #[serde(default, skip_serializing_if = "Option::is_none")] - pub interfaces: Option>, - /// Logs defines the configuration for logging levels - #[serde(default, skip_serializing_if = "Option::is_none")] - pub logs: Option, - /// Neighbors is a list of BGP neighbors - #[serde(default, skip_serializing_if = "Option::is_none")] - pub neighbors: Option>, - /// Profiling defines the configuration for profiling - #[serde(default, skip_serializing_if = "Option::is_none")] - pub profiling: Option, - /// ProtocolIP is used as a loopback IP and BGP Router ID - #[serde(default, skip_serializing_if = "Option::is_none", rename = "protocolIP")] - pub protocol_ip: Option, - /// VTEP IP to be used by the gateway - #[serde(default, skip_serializing_if = "Option::is_none", rename = "vtepIP")] - pub vtep_ip: Option, - /// VTEP MAC address to be used by the gateway - #[serde(default, skip_serializing_if = "Option::is_none", rename = "vtepMAC")] - pub vtep_mac: Option, - /// VTEPMTU is the MTU for the VTEP interface - #[serde(default, skip_serializing_if = "Option::is_none", rename = "vtepMTU")] - pub vtep_mtu: Option, - /// Workers defines the number of worker threads to use for dataplane - #[serde(default, skip_serializing_if = "Option::is_none")] - pub workers: Option, -} - -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentGatewayGroups { - /// Name is the name of the group to which the gateway belongs - #[serde(default, skip_serializing_if = "Option::is_none")] - pub name: Option, - /// Priority is the priority of the gateway within the group - #[serde(default, skip_serializing_if = "Option::is_none")] - pub priority: Option, -} - -/// Interfaces is a map of interface names to their configurations -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentGatewayInterfaces { - /// IPs is the list of IP address to assign to the interface - #[serde(default, skip_serializing_if = "Option::is_none")] - pub ips: Option>, - /// Kernel is the kernel name of the interface to use (required for kernel driver), e.g. enp2s1 - #[serde(default, skip_serializing_if = "Option::is_none")] - pub kernel: Option, - /// MTU for the interface - #[serde(default, skip_serializing_if = "Option::is_none")] - pub mtu: Option, - /// PCI address of the interface (required for DPDK driver), e.g. 0000:00:01.0 - #[serde(default, skip_serializing_if = "Option::is_none")] - pub pci: Option, -} - -/// Logs defines the configuration for logging levels -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentGatewayLogs { - #[serde(default, skip_serializing_if = "Option::is_none")] - pub default: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub tags: Option>, -} - -/// GatewayBGPNeighbor defines the configuration for a BGP neighbor -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentGatewayNeighbors { - /// ASN is the remote ASN of the BGP neighbor - #[serde(default, skip_serializing_if = "Option::is_none")] - pub asn: Option, - /// IP is the IP address of the BGP neighbor - #[serde(default, skip_serializing_if = "Option::is_none")] - pub ip: Option, - /// Source is the source interface for the BGP neighbor configuration - #[serde(default, skip_serializing_if = "Option::is_none")] - pub source: Option, -} - -/// Profiling defines the configuration for profiling -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentGatewayProfiling { - #[serde(default, skip_serializing_if = "Option::is_none")] - pub enabled: Option, -} - -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentGroups { - #[serde(default, skip_serializing_if = "Option::is_none")] - pub members: Option>, -} - -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentGroupsMembers { - pub name: String, - pub priority: u32, - #[serde(rename = "vtepIP")] - pub vtep_ip: String, -} - -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentPeerings { - /// GatewayGroup is the name of the gateway group that should process the peering - #[serde(default, skip_serializing_if = "Option::is_none", rename = "gatewayGroup")] - pub gateway_group: Option, - /// Peerings is a map of peering entries for each VPC participating in the peering (keyed by VPC name) - #[serde(default, skip_serializing_if = "Option::is_none")] - pub peering: Option>, -} - -/// Peerings is a map of peering entries for each VPC participating in the peering (keyed by VPC name) -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentPeeringsPeering { - #[serde(default, skip_serializing_if = "Option::is_none")] - pub expose: Option>, -} - -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentPeeringsPeeringExpose { - #[serde(default, skip_serializing_if = "Option::is_none", rename = "as")] - pub r#as: Option>, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub default: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub ips: Option>, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub nat: Option, -} - -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentPeeringsPeeringExposeAs { - /// CIDR to include, only one of cidr, not can be set - #[serde(default, skip_serializing_if = "Option::is_none")] - pub cidr: Option, - /// CIDR to exclude, only one of cidr, not can be set - #[serde(default, skip_serializing_if = "Option::is_none")] - pub not: Option, -} - -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentPeeringsPeeringExposeIps { - /// CIDR to include, only one of cidr, not, vpcSubnet can be set - #[serde(default, skip_serializing_if = "Option::is_none")] - pub cidr: Option, - /// CIDR to exclude, only one of cidr, not, vpcSubnet can be set - #[serde(default, skip_serializing_if = "Option::is_none")] - pub not: Option, - /// CIDR by VPC subnet name to include, only one of cidr, not, vpcSubnet can be set - #[serde(default, skip_serializing_if = "Option::is_none", rename = "vpcSubnet")] - pub vpc_subnet: Option, -} - -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentPeeringsPeeringExposeNat { - #[serde(default, skip_serializing_if = "Option::is_none")] - pub masquerade: Option, - #[serde(default, skip_serializing_if = "Option::is_none", rename = "portForward")] - pub port_forward: Option, - #[serde(default, skip_serializing_if = "Option::is_none", rename = "static")] - pub r#static: Option, -} - -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentPeeringsPeeringExposeNatMasquerade { - /// Time since the last packet after which flows are removed from the connection state table - #[serde(default, skip_serializing_if = "Option::is_none", rename = "idleTimeout")] - pub idle_timeout: Option, -} - -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentPeeringsPeeringExposeNatPortForward { - /// Time since the last packet after which flows are removed from the connection state table - #[serde(default, skip_serializing_if = "Option::is_none", rename = "idleTimeout")] - pub idle_timeout: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub ports: Option>, -} - -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentPeeringsPeeringExposeNatPortForwardPorts { - #[serde(default, skip_serializing_if = "Option::is_none", rename = "as")] - pub r#as: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub port: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub proto: Option, -} - -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub enum GatewayAgentPeeringsPeeringExposeNatPortForwardPortsProto { - #[serde(rename = "tcp")] - Tcp, - #[serde(rename = "udp")] - Udp, - #[serde(rename = "")] - KopiumEmpty, -} - -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentPeeringsPeeringExposeNatStatic { -} - -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentVpcs { - #[serde(default, skip_serializing_if = "Option::is_none", rename = "internalID")] - pub internal_id: Option, - /// Subnets is a map of all subnets in the VPC (incl. CIDRs, VNIs, etc) keyed by the subnet name - #[serde(default, skip_serializing_if = "Option::is_none")] - pub subnets: Option>, - /// VNI is the VNI for the VPC - #[serde(default, skip_serializing_if = "Option::is_none")] - pub vni: Option, -} - -/// Subnets is a map of all subnets in the VPC (incl. CIDRs, VNIs, etc) keyed by the subnet name -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentVpcsSubnets { - /// CIDR is the subnet CIDR block, such as "10.0.0.0/24" - #[serde(default, skip_serializing_if = "Option::is_none")] - pub cidr: Option, -} - -/// GatewayAgentStatus defines the observed state of GatewayAgent. -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentStatus { - /// AgentVersion is the version of the gateway agent - #[serde(default, skip_serializing_if = "Option::is_none", rename = "agentVersion")] - pub agent_version: Option, - /// Generation of the last successful configuration application - #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastAppliedGen")] - pub last_applied_gen: Option, - /// Time of the last successful configuration application - #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastAppliedTime")] - pub last_applied_time: Option, - /// Time of the last heartbeat from the agent - #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastHeartbeat")] - pub last_heartbeat: Option, - /// State represents collected data from the dataplane API that includes FRR as well - #[serde(default, skip_serializing_if = "Option::is_none")] - pub state: Option, -} - -/// State represents collected data from the dataplane API that includes FRR as well -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentStatusState { - /// BGP is BGP status - #[serde(default, skip_serializing_if = "Option::is_none")] - pub bgp: Option, - /// Dataplane is the status of the dataplane - #[serde(default, skip_serializing_if = "Option::is_none")] - pub dataplane: Option, - /// FRR is the status of the FRR daemon - #[serde(default, skip_serializing_if = "Option::is_none")] - pub frr: Option, - /// LastCollectedTime is the time of the last successful collection of data from the dataplane API - #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastCollectedTime")] - pub last_collected_time: Option, - /// Peerings is the status of the VPCs peerings where key is VPC1->VPC2 and data is for one direction only - #[serde(default, skip_serializing_if = "Option::is_none")] - pub peerings: Option>, - /// VPCs is the status of the VPCs where key is the vpc (vpcinfo) name - #[serde(default, skip_serializing_if = "Option::is_none")] - pub vpcs: Option>, -} - -/// BGP is BGP status -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentStatusStateBgp { - /// VRFs keyed by VRF name (e.g. "default", "vrfVvpc-1") - #[serde(default, skip_serializing_if = "Option::is_none")] - pub vrfs: Option>, -} - -/// VRFs keyed by VRF name (e.g. "default", "vrfVvpc-1") -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentStatusStateBgpVrfs { - /// Neighbors keyed by an ip address string - #[serde(default, skip_serializing_if = "Option::is_none")] - pub neighbors: Option>, -} - -/// Neighbors keyed by an ip address string -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentStatusStateBgpVrfsNeighbors { - #[serde(default, skip_serializing_if = "Option::is_none", rename = "connectionsDropped")] - pub connections_dropped: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub enabled: Option, - #[serde(default, skip_serializing_if = "Option::is_none", rename = "establishedTransitions")] - pub established_transitions: Option, - #[serde(default, skip_serializing_if = "Option::is_none", rename = "ipv4UnicastPrefixes")] - pub ipv4_unicast_prefixes: Option, - #[serde(default, skip_serializing_if = "Option::is_none", rename = "ipv6UnicastPrefixes")] - pub ipv6_unicast_prefixes: Option, - #[serde(default, skip_serializing_if = "Option::is_none", rename = "l2VPNEVPNPrefixes")] - pub l2_vpnevpn_prefixes: Option, - #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastResetReason")] - pub last_reset_reason: Option, - #[serde(default, skip_serializing_if = "Option::is_none", rename = "localAS")] - pub local_as: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub messages: Option, - #[serde(default, skip_serializing_if = "Option::is_none", rename = "peerAS")] - pub peer_as: Option, - #[serde(default, skip_serializing_if = "Option::is_none", rename = "remoteRouterID")] - pub remote_router_id: Option, - /// BGPNeighborSessionState represents the BGP FSM state for a neighbor. - #[serde(default, skip_serializing_if = "Option::is_none", rename = "sessionState")] - pub session_state: Option, -} - -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentStatusStateBgpVrfsNeighborsIpv4UnicastPrefixes { - #[serde(default, skip_serializing_if = "Option::is_none")] - pub received: Option, - #[serde(default, skip_serializing_if = "Option::is_none", rename = "receivedPrePolicy")] - pub received_pre_policy: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub sent: Option, -} - -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentStatusStateBgpVrfsNeighborsIpv6UnicastPrefixes { - #[serde(default, skip_serializing_if = "Option::is_none")] - pub received: Option, - #[serde(default, skip_serializing_if = "Option::is_none", rename = "receivedPrePolicy")] - pub received_pre_policy: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub sent: Option, -} - -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentStatusStateBgpVrfsNeighborsL2VpnevpnPrefixes { - #[serde(default, skip_serializing_if = "Option::is_none")] - pub received: Option, - #[serde(default, skip_serializing_if = "Option::is_none", rename = "receivedPrePolicy")] - pub received_pre_policy: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub sent: Option, -} - -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentStatusStateBgpVrfsNeighborsMessages { - #[serde(default, skip_serializing_if = "Option::is_none")] - pub received: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub sent: Option, -} - -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentStatusStateBgpVrfsNeighborsMessagesReceived { - #[serde(default, skip_serializing_if = "Option::is_none")] - pub capability: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub keepalive: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub notification: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub open: Option, - #[serde(default, skip_serializing_if = "Option::is_none", rename = "routeRefresh")] - pub route_refresh: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub update: Option, -} - -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentStatusStateBgpVrfsNeighborsMessagesSent { - #[serde(default, skip_serializing_if = "Option::is_none")] - pub capability: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub keepalive: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub notification: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub open: Option, - #[serde(default, skip_serializing_if = "Option::is_none", rename = "routeRefresh")] - pub route_refresh: Option, - #[serde(default, skip_serializing_if = "Option::is_none")] - pub update: Option, -} - -/// Neighbors keyed by an ip address string -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub enum GatewayAgentStatusStateBgpVrfsNeighborsSessionState { - #[serde(rename = "unset")] - Unset, - #[serde(rename = "idle")] - Idle, - #[serde(rename = "connect")] - Connect, - #[serde(rename = "active")] - Active, - #[serde(rename = "open")] - Open, - #[serde(rename = "established")] - Established, -} - -/// Dataplane is the status of the dataplane -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentStatusStateDataplane { - #[serde(default, skip_serializing_if = "Option::is_none")] - pub version: Option, -} - -/// FRR is the status of the FRR daemon -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentStatusStateFrr { - /// LastAppliedGen is the generation of the last successful application of a configuration to the FRR - #[serde(default, skip_serializing_if = "Option::is_none", rename = "lastAppliedGen")] - pub last_applied_gen: Option, -} - -/// Peerings is the status of the VPCs peerings where key is VPC1->VPC2 and data is for one direction only -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentStatusStatePeerings { - /// Bytes is the number of bytes sent on the peering - #[serde(default, skip_serializing_if = "Option::is_none")] - pub b: Option, - /// BytesPerSecond is the number of bytes sent per second on the peering - #[serde(default, skip_serializing_if = "Option::is_none")] - pub bps: Option, - /// Drops is the number of packets dropped on the peering - #[serde(default, skip_serializing_if = "Option::is_none")] - pub d: Option, - /// Packets is the number of packets sent on the peering - #[serde(default, skip_serializing_if = "Option::is_none")] - pub p: Option, - /// PktsPerSecond is the number of packets sent per second on the peering - #[serde(default, skip_serializing_if = "Option::is_none")] - pub pps: Option, -} - -/// VPCs is the status of the VPCs where key is the vpc (vpcinfo) name -#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, JsonSchema)] -pub struct GatewayAgentStatusStateVpcs { - /// Bytes is the number of bytes sent on the vpc - #[serde(default, skip_serializing_if = "Option::is_none")] - pub b: Option, - /// Drops is the number of packets dropped on the vpc - #[serde(default, skip_serializing_if = "Option::is_none")] - pub d: Option, - /// Packets is the number of packets sent on the vpc - #[serde(default, skip_serializing_if = "Option::is_none")] - pub p: Option, -} - diff --git a/k8s-intf/src/generated/mod.rs b/k8s-intf/src/generated/mod.rs deleted file mode 100644 index 7c48aa95a..000000000 --- a/k8s-intf/src/generated/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Open Network Fabric Authors - -// Don't complain about generated code -#[allow(clippy::all, clippy::pedantic)] -#[rustfmt::skip] -pub mod gateway_agent_crd; diff --git a/k8s-intf/src/lib.rs b/k8s-intf/src/lib.rs index d7d8dc0b2..0d5bad7a1 100644 --- a/k8s-intf/src/lib.rs +++ b/k8s-intf/src/lib.rs @@ -8,11 +8,12 @@ #[cfg(any(test, feature = "bolero"))] pub mod bolero; pub mod client; -pub mod generated; pub mod utils; +#[allow(clippy::all, clippy::pedantic)] +#[rustfmt::skip] pub mod gateway_agent_crd { - pub use crate::generated::gateway_agent_crd::*; + include!(concat!(env!("OUT_DIR"), "/gateway_agent_crd.rs")); } pub use client::watch_gateway_agent_crd; diff --git a/mgmt/Cargo.toml b/mgmt/Cargo.toml index f39f381c6..aed33e2e5 100644 --- a/mgmt/Cargo.toml +++ b/mgmt/Cargo.toml @@ -58,6 +58,7 @@ fixin = { workspace = true } id = { workspace = true, features = ["bolero"] } interface-manager = { workspace = true, features = ["bolero"] } lpm = { workspace = true, features = ["testing"] } +n-vm = { workspace = true } net = { workspace = true, features = ["bolero"] } pipeline = { workspace = true } routing = { workspace = true, features = ["testing"] } @@ -67,3 +68,4 @@ test-utils = { workspace = true } bolero = { workspace = true, default-features = false, features = ["alloc"] } ipnet = { workspace = true } pretty_assertions = { workspace = true, features = ["std"] } +tracing-subscriber = { workspace = true } diff --git a/mgmt/src/tests/mgmt.rs b/mgmt/src/tests/mgmt.rs index 44b48c835..d94db1fa8 100644 --- a/mgmt/src/tests/mgmt.rs +++ b/mgmt/src/tests/mgmt.rs @@ -4,26 +4,18 @@ #[cfg(test)] #[allow(dead_code)] pub mod test { - use caps::Capability::CAP_NET_ADMIN; use config::external::communities::PriorityCommunityTable; use config::external::gwgroup::GwGroup; use config::external::gwgroup::GwGroupMember; use config::external::gwgroup::GwGroupTable; - use flow_filter::FlowFilterTableWriter; use lpm::prefix::Prefix; - use nat::portfw::PortFwTableWriter; - use nat::stateful::NatAllocatorWriter; - use nat::stateless::NatTablesWriter; use net::eth::mac::Mac; use net::interface::Mtu; use pipeline::PipelineData; use std::net::IpAddr; use std::net::Ipv4Addr; use std::str::FromStr; - use test_utils::with_caps; - use tracectl::get_trace_ctl; - use tracing::error; use tracing_test::traced_test; use config::external::ExternalConfigBuilder; @@ -48,16 +40,19 @@ pub mod test { use crate::processor::confbuild::internal::build_internal_config; use crate::processor::proc::{ConfigProcessor, ConfigProcessorParams}; + use concurrency::sync::Arc; + use config::internal::status::DataplaneStatus; + use flow_filter::FlowFilterTableWriter; + use nat::portfw::PortFwTableWriter; + use nat::stateful::NatAllocatorWriter; + use nat::stateless::NatTablesWriter; use routing::{Router, RouterParamsBuilder}; - use tracing::debug; - use stats::VpcMapName; use stats::VpcStatsStore; - use vpcmap::map::VpcMapWriter; - - use concurrency::sync::Arc; - use config::internal::status::DataplaneStatus; use tokio::sync::RwLock; + use tracectl::get_trace_ctl; + use tracing::{debug, error}; + use vpcmap::map::VpcMapWriter; /* OVERLAY config sample builders */ fn sample_vpc_table() -> VpcTable { @@ -410,8 +405,9 @@ pub mod test { println!("{rendered}"); } + #[ignore = "temporarily disabled during vm test runner refactor"] + #[n_vm::in_vm] #[tokio::test] - #[fixin::wrap(with_caps([CAP_NET_ADMIN]))] async fn test_sample_config() { get_trace_ctl() .setup_from_string("cpi=debug,mgmt=debug,routing=debug") diff --git a/mgmt/tests/reconcile.rs b/mgmt/tests/reconcile.rs index 24c5f1d72..ed9bd6ced 100644 --- a/mgmt/tests/reconcile.rs +++ b/mgmt/tests/reconcile.rs @@ -26,6 +26,7 @@ use tracing::info; use tracing_test::traced_test; #[test] +#[n_vm::in_vm] #[wrap(with_caps([Capability::CAP_NET_ADMIN]))] #[traced_test] fn reconcile_fuzz() { diff --git a/nix/overlays/dataplane-dev.nix b/nix/overlays/dataplane-dev.nix index e548ef9e8..9f694ab75 100644 --- a/nix/overlays/dataplane-dev.nix +++ b/nix/overlays/dataplane-dev.nix @@ -7,7 +7,7 @@ final: prev: let override-packages = { - stdenv = final.llvmPackages.stdenv; + stdenv = final.llvmPackages'.stdenv; rustPlatform = final.rustPlatform'-dev; }; in @@ -20,6 +20,7 @@ in ); cargo-bolero = prev.cargo-bolero.override { inherit (override-packages) rustPlatform; }; cargo-deny = prev.cargo-deny.override { inherit (override-packages) rustPlatform; }; + cargo-edit = prev.cargo-edit.override { inherit (override-packages) rustPlatform; }; cargo-llvm-cov = prev.cargo-llvm-cov.override override-packages; cargo-nextest = prev.cargo-nextest.override override-packages; just = prev.just.override override-packages; @@ -30,8 +31,20 @@ in in final.writeTextFile { name = "gateway-crd"; - text = builtins.readFile "${sources.gateway}/${p}"; + text = builtins.readFile "${sources.fabric}/${p}"; executable = false; - destination = "/src/gateway/${p}"; + destination = "/src/fabric/${p}"; }; + + gdb' = prev.gdb.overrideAttrs (orig: { + CFLAGS = "-Os -flto"; + CXXFLAGS = "-Os -flto"; + LDFLAGS = "-flto -Wl,--as-needed,--gc-sections -static-libstdc++ -static-libgcc"; + buildInputs = (orig.buildInputs or [ ]); + configureFlags = (orig.configureFlags or [ ]) ++ [ + "--enable-static" + "--disable-inprocess-agent" + "--disable-source-highlight" # breaks static compile + ]; + }); } diff --git a/nix/overlays/dataplane.nix b/nix/overlays/dataplane.nix index 45be4326f..d8cd8c9c4 100644 --- a/nix/overlays/dataplane.nix +++ b/nix/overlays/dataplane.nix @@ -3,11 +3,36 @@ { sources, sanitizers, + platform, + profile, ... }: final: prev: let - dataplane-dep = pkg: pkg.override { stdenv = final.stdenv'; }; + helpers.addToEnv = + new: orig: + orig + // ( + with builtins; (mapAttrs (var: val: (toString (orig.${var} or "")) + " " + (toString val)) new) + ); + dataplane-dep = + pkg: + (pkg.override { stdenv = final.stdenv'; }).overrideAttrs (orig: { + env = helpers.addToEnv (orig.env or { }) ( + let + # -ffile-prefix-map is a simple trick to map /build to /nix/store paths for code coverage data. + # This trick does not work well for .tar packages or source code generated during the build, but it's + # the best I can do without massively increasing build system complexity. + extra-cflags = "-ffile-prefix-map=/build=${orig.src} -ffile-prefix-map=/build/source=${orig.src}"; + extra-cxxflags = extra-cflags; + in + { + NIX_CFLAGS_COMPILE = extra-cflags; + NIX_CXXFLAGS_COMPILE = extra-cxxflags; + } + ); + }); + in { # libmd is used by libbsd (et al) which is an optional dependency of dpdk. @@ -17,8 +42,6 @@ in # is a solid plan. fancy.libmd = (dataplane-dep prev.libmd).overrideAttrs (orig: { outputs = (orig.outputs or [ "out" ]) ++ [ - "man" - "dev" "static" ]; # we need to enable shared libs (in addition to static) to make dpdk's build happy. Basically, DPDK's build has no @@ -114,8 +137,6 @@ in fancy.rdma-core = ((dataplane-dep prev.rdma-core).override { docutils = null; - ethtool = null; - iproute2 = null; libnl = final.fancy.libnl; pandoc = null; udev = null; @@ -193,7 +214,13 @@ in # Also, while this library has a respectable security track record, this is also a very strong candidate for # cfi, safe-stack, and cf-protection. fancy.dpdk = dataplane-dep ( - final.callPackage ../pkgs/dpdk (final.fancy // { src = sources.dpdk; }) + final.callPackage ../pkgs/dpdk ( + final.fancy + // { + inherit platform profile; + src = sources.dpdk; + } + ) ); # DPDK is largely composed of static-inline functions. @@ -203,7 +230,6 @@ in # these methods anyway. fancy.dpdk-wrapper = dataplane-dep (final.callPackage ../pkgs/dpdk-wrapper final.fancy); - # TODO: consistent packages fancy.pciutils = dataplane-dep ( final.pciutils.override { static = true; @@ -212,9 +238,8 @@ in } ); - fancy.libunwind = (dataplane-dep final.llvmPackages.libunwind).override { enableShared = false; }; + fancy.libunwind = (dataplane-dep final.llvmPackages'.libunwind).override { enableShared = false; }; - # TODO: consistent packages, min deps fancy.hwloc = ((dataplane-dep prev.hwloc).override { inherit (final.fancy) numactl; @@ -222,12 +247,13 @@ in cudaPackages = null; enableCuda = false; expat = null; - libX11 = null; + libx11 = null; ncurses = null; x11Support = false; }).overrideAttrs (orig: { outputs = (orig.outputs or [ ]) ++ [ "static" ]; + CFLAGS = "-ffile-prefix-map=/build/hwloc=${orig.src}"; configureFlags = (orig.configureFlags or [ ]) ++ [ "--enable-static" ]; @@ -238,7 +264,5 @@ in }); # This isn't directly required by dataplane, - fancy.perftest = dataplane-dep ( - final.callPackage ../pkgs/perftest final.fancy // { src = sources.perftest; } - ); + fancy.perftest = dataplane-dep (final.callPackage ../pkgs/perftest { src = sources.perftest; }); } diff --git a/nix/overlays/default.nix b/nix/overlays/default.nix index 19045bb38..4ead96840 100644 --- a/nix/overlays/default.nix +++ b/nix/overlays/default.nix @@ -2,13 +2,12 @@ # Copyright Open Network Fabric Authors inputs@{ sources, - platform, - profile, - sanitizers, ... }: { + rust = import sources.rust-overlay; llvm = import ./llvm.nix inputs; # requires rust dataplane-dev = import ./dataplane-dev.nix inputs; # requires llvm dataplane = import ./dataplane.nix inputs; # requires llvm + frr = import ./frr.nix inputs; # requires dataplane } diff --git a/nix/overlays/frr.nix b/nix/overlays/frr.nix new file mode 100644 index 000000000..1069da0f1 --- /dev/null +++ b/nix/overlays/frr.nix @@ -0,0 +1,216 @@ +# SPDX-License-Identifier: Apache-2.0 +# Copyright Open Network Fabric Authors +{ + sources, + sanitizers, + platform, + profile, + ... +}: +final: prev: +let + dep = + pkg: + (pkg.override { stdenv = final.stdenv'; }).overrideAttrs (orig: { + nativeBuildInputs = (orig.nativeBuildInputs or [ ]) ++ [ prev.removeReferencesTo ]; + postInstall = (orig.postInstall or "") + '' + find "$out" \ + -type f \ + -exec remove-references-to -t ${final.stdenv'.cc} '{}' +; + if [ -n "$lib" ] && [ -d "$lib" ]; then + find "$lib" \ + -type f \ + -exec remove-references-to -t ${final.stdenv'.cc} '{}' +; + fi + ''; + }); + frr-build = + frrSrc: + dep ( + (final.callPackage ../pkgs/frr ( + final.fancy + // { + stdenv = final.stdenv'; + inherit frrSrc; + } + )).overrideAttrs + (orig: { + LDFLAGS = + (orig.LDFLAGS or "") + + " -L${final.fancy.readline}/lib -lreadline " + + " -L${final.fancy.json_c}/lib -ljson-c " + + " -Wl,--push-state,--as-needed,--no-whole-archive,-Bstatic " + + " -L${final.fancy.libxcrypt}/lib -lcrypt " + + " -L${final.fancy.pcre2}/lib -lpcre2-8 " + + " -L${final.fancy.xxHash}/lib -lxxhash " + + " -L${final.fancy.libgccjit}/lib -latomic " + + " -Wl,--pop-state"; + configureFlags = orig.configureFlags ++ [ + "--enable-shared" + "--enable-static" + # FRR's build system has an ODR violation when static bins are enabled; + # this overrides the base package's --enable-static-bin. + "--disable-static-bin" + ]; + nativeBuildInputs = (orig.nativeBuildInputs or [ ]) ++ [ prev.nukeReferences ]; + # disallowedReferences = (orig.disallowedReferences or []) ++ [ final.stdenv'.cc ]; + preFixup = '' + find "$out" \ + -type f \ + -exec nuke-refs \ + -e "$out" \ + -e ${final.stdenv'.cc.libc} \ + -e ${final.python3Minimal} \ + -e ${final.fancy.readline} \ + -e ${final.fancy.libgccjit} \ + -e ${final.fancy.json_c} \ + '{}' +; + ''; + }) + ); +in +{ + fancy = prev.fancy // { + inherit sources; + xxHash = (dep prev.xxHash).overrideAttrs (orig: { + cmakeFlags = (orig.cmakeFlags or [ ]) ++ [ + "-DBUILD_SHARED_LIBS=OFF" + "-DXXH_STATIC_LINKING_ONLY=ON" + ]; + }); + libyang = ( + (prev.libyang.override { + stdenv = final.stdenv'; + pcre2 = final.fancy.pcre2; + xxHash = final.fancy.xxHash; + }).overrideAttrs + (orig: { + cmakeFlags = (orig.cmakeFlags or [ ]) ++ [ "-DBUILD_SHARED_LIBS=OFF" ]; + propagatedBuildInputs = [ + final.fancy.pcre2 + final.fancy.xxHash + ]; + }) + ); + libcap = ( + (prev.libcap.override { + stdenv = final.stdenv'; + usePam = false; + withGo = false; + }).overrideAttrs + (orig: { + doCheck = false; # tests require privileges + separateDebugInfo = false; + CFLAGS = "-ffat-lto-objects -fsplit-lto-unit"; + makeFlags = [ + "lib=lib" + "PAM_CAP=no" + "CC:=clang" + "SHARED=no" + "LIBCSTATIC=no" + "GOLANG=no" + ]; + configureFlags = (orig.configureFlags or [ ]) ++ [ "--enable-static" ]; + postInstall = orig.postInstall + '' + # extant postInstall removes .a files for no reason + cp ./libcap/*.a $lib/lib; + ''; + }) + ); + json_c = + (dep prev.json_c).overrideAttrs (orig: { + cmakeFlags = (orig.cmakeFlags or [ ]) ++ [ + "-DENABLE_STATIC=1" + ]; + postInstall = (orig.postInstall or "") + '' + mkdir -p $dev/lib + $RANLIB libjson-c.a; + cp libjson-c.a $out/lib; + find "$out" \ + -type f \ + -exec remove-references-to -t ${final.stdenv'.cc} '{}' +; + ''; + nativeBuildInputs = (orig.nativeBuildInputs or [ ]) ++ [ prev.removeReferencesTo ]; + disallowedReferences = (orig.disallowedReferences or [ ]) ++ [ final.stdenv'.cc ]; + }); + rtrlib = dep ( + prev.rtrlib.overrideAttrs (orig: { + cmakeFlags = (orig.cmakeFlags or [ ]) ++ [ "-DENABLE_STATIC=1" ]; + }) + ); + abseil-cpp = dep prev.abseil-cpp; + zlib = ( + prev.zlib.override { + stdenv = final.stdenv'; + static = true; + shared = false; + } + ); + pcre2 = dep ( + prev.pcre2.overrideAttrs (orig: { + configureFlags = (orig.configureFlags or [ ]) ++ [ + "--enable-static" + "--disable-shared" + ]; + }) + ); + ncurses = dep ( + prev.ncurses.override { + stdenv = final.stdenv'; + enableStatic = true; + withCxx = false; + } + ); + readline = dep ( + (prev.readline.override { + stdenv = final.stdenv'; + ncurses = final.fancy.ncurses; + }).overrideAttrs + (orig: { + nativeBuildInputs = (orig.nativeBuildInputs or [ ]) ++ [ prev.removeReferencesTo ]; + disallowedReferences = (orig.disallowedReferences or [ ]) ++ [ final.stdenv'.cc ]; + configureFlags = (orig.configureFlags or [ ]) ++ [ + "--enable-static" + "--enable-shared" + ]; + postInstall = (orig.postInstall or "") + '' + find "$out" \ + -type f \ + -exec remove-references-to -t ${final.stdenv'.cc} '{}' +; + ''; + }) + ); + libxcrypt = (dep prev.libxcrypt).overrideAttrs (orig: { + configureFlags = (orig.configureFlags or [ ]) ++ [ + "--enable-static" + "--disable-shared" + ]; + }); + libgccjit = + (prev.libgccjit.override { + # TODO: debug issue preventing clang build + # stdenv = final.stdenv'; + libxcrypt = final.fancy.libxcrypt; + }).overrideAttrs + (orig: { + configureFlags = (orig.configureFlags or [ ]) ++ [ + "--disable-static" + "--enable-shared" + ]; + }); + c-ares = dep ( + prev.c-ares.overrideAttrs (orig: { + cmakeFlags = (orig.cmakeFlags or [ ]) ++ [ + "-DCARES_SHARED=OFF" + "-DCARES_STATIC=ON" + ]; + }) + ); + frr-agent = dep (final.callPackage ../pkgs/frr-agent final.fancy); + frr-config = dep (final.callPackage ../pkgs/frr-config final.fancy); + dplane-rpc = dep (final.callPackage ../pkgs/dplane-rpc final.fancy); + dplane-plugin = dep (final.callPackage ../pkgs/dplane-plugin final.fancy); + frr.host = frr-build sources.frr; + frr.dataplane = frr-build sources.frr-dp; + }; +} diff --git a/nix/overlays/llvm.nix b/nix/overlays/llvm.nix index a48cd4267..5bffc8823 100644 --- a/nix/overlays/llvm.nix +++ b/nix/overlays/llvm.nix @@ -1,6 +1,7 @@ # SPDX-License-Identifier: Apache-2.0 # Copyright Open Network Fabric Authors { + sources, platform, profile, ... @@ -14,25 +15,45 @@ let with builtins; (mapAttrs (var: val: (toString (orig.${var} or "")) + " " + (toString val)) new) ); adapt = final.stdenvAdapters; - bintools = final.pkgsBuildHost.llvmPackages.bintools; - lld = final.pkgsBuildHost.llvmPackages.lld; + bintools = final.pkgsBuildHost.llvmPackages'.bintools; + lld = final.pkgsBuildHost.llvmPackages'.lld; added-to-env = helpers.addToEnv platform.override.stdenv.env profile; stdenv' = adapt.addAttrsToDerivation (orig: { doCheck = false; - separateDebugInfo = true; + # separateDebugInfo = true; env = helpers.addToEnv added-to-env (orig.env or { }); nativeBuildInputs = (orig.nativeBuildInputs or [ ]) ++ [ bintools lld ]; - }) final.llvmPackages.stdenv; + }) final.llvmPackages'.stdenv; # note: rust-bin comes from oxa's overlay, not nixpkgs. This overlay only works if you have a rust overlay as well. - rust-toolchain = prev.rust-bin.fromRustupToolchainFile ../../rust-toolchain.toml; - rustPlatform' = prev.makeRustPlatform { + rust-toolchain = final.pkgsBuildHost.rust-bin.fromRustupToolchain { + channel = sources.rust.version; + components = [ + "rustc" + "cargo" + "rust-std" + "rust-docs" + "rustfmt" + "clippy" + "rust-analyzer" + "rust-src" + ]; + targets = [ + platform.info.target + ]; + }; + rustPlatform' = final.makeRustPlatform { stdenv = stdenv'; cargo = rust-toolchain; rustc = rust-toolchain; }; + rustPlatform'-dev = final.makeRustPlatform { + stdenv = final.llvmPackages'.stdenv; + cargo = rust-toolchain; + rustc = rust-toolchain; + }; # It is essential that we always use the same version of llvm that our rustc is backed by. # To minimize maintenance burden, we explicitly compute the version of LLVM we need by asking rustc # which version it is using. @@ -40,11 +61,11 @@ let # every time rust updates. # Unfortunately, this is also IFD, so it slows down the nix build a bit :shrug: llvm-version = builtins.readFile ( - prev.runCommand "llvm-version-for-our-rustc" + final.runCommand "llvm-version-for-our-rustc" { RUSTC = "${rust-toolchain.out}/bin/rustc"; - GREP = "${prev.pkgsBuildHost.gnugrep}/bin/grep"; - SED = "${prev.pkgsBuildHost.gnused}/bin/sed"; + GREP = "${final.pkgsBuildHost.gnugrep}/bin/grep"; + SED = "${final.pkgsBuildHost.gnused}/bin/sed"; } '' $RUSTC --version --verbose | \ @@ -54,6 +75,11 @@ let ); in { - inherit rust-toolchain rustPlatform' stdenv'; + inherit + rust-toolchain + rustPlatform' + rustPlatform'-dev + stdenv' + ; llvmPackages' = prev."llvmPackages_${llvm-version}"; } diff --git a/nix/pkgs/dpdk/default.nix b/nix/pkgs/dpdk/default.nix index 8e41adf26..5716bc3c7 100644 --- a/nix/pkgs/dpdk/default.nix +++ b/nix/pkgs/dpdk/default.nix @@ -12,17 +12,11 @@ rdma-core, libnl, python3, - build-params ? { - lto = "true"; - build-type = "release"; # "debug" | "release" - platform = "bluefield3"; - }, writeText, platform, ... }: - stdenv.mkDerivation { pname = "dpdk"; version = src.branch; @@ -270,19 +264,18 @@ stdenv.mkDerivation { cpu = '${cpu}' endian = '${endian}' [properties] - platform = '${build-params.platform}' + platform = '${platform.name}' libc = '${libc-vendor}' ''; in - with build-params; [ - "--buildtype=${build-type}" - "-Dauto_features=disabled" - "-Db_colorout=never" - "-Db_lto=${lto}" - "-Db_lundef=false" + "--buildtype=release" + "-Db_lto=true" + "-Db_lundef=false" # normally I would enable undef symbol checks, but it breaks sanitizer builds "-Db_pgo=off" "-Db_pie=true" + "-Dauto_features=disabled" + "-Db_colorout=never" "-Dbackend=ninja" "-Ddefault_library=static" "-Denable_docs=false" @@ -290,14 +283,13 @@ stdenv.mkDerivation { "-Dmax_numa_nodes=${toString platform.numa.max-nodes}" "-Dtests=false" # Running DPDK tests in CI is usually silly "-Duse_hpet=false" - "-Ddebug=false" ''-Ddisable_drivers=${lib.concatStringsSep "," disabledDrivers}'' ''-Denable_drivers=${lib.concatStringsSep "," enabledDrivers}'' ''-Denable_libs=${lib.concatStringsSep "," enabledLibs}'' ''-Ddisable_apps=*'' ''-Ddisable_libs=${lib.concatStringsSep "," disabledLibs}'' ] - ++ (if isCrossCompile then [ ''--cross-file=${cross-file}'' ] else [ ]); + ++ (if isCrossCompile then [ "--cross-file=${cross-file}" ] else [ ]); outputs = [ "dev" diff --git a/nix/pkgs/dplane-plugin/default.nix b/nix/pkgs/dplane-plugin/default.nix new file mode 100644 index 000000000..9eb5ecd6d --- /dev/null +++ b/nix/pkgs/dplane-plugin/default.nix @@ -0,0 +1,61 @@ +{ + stdenv, + + sources, + # build time + cmake, + dplane-rpc, + frr, + libyang, + pcre2, + protobufc, + json_c, + + # args + cmakeBuildType ? "Release", + ... +}: + +stdenv.mkDerivation (finalAttrs: { + pname = "dplane-plugin"; + version = sources.dplane-plugin.revision; + src = sources.dplane-plugin.outPath; + + doCheck = false; + doFixup = false; + enableParallelBuilding = true; + + dontUnpack = true; + + nativeBuildInputs = [ + cmake + ]; + + buildInputs = [ + dplane-rpc + frr.dataplane + json_c + libyang + pcre2 + protobufc + ]; + + configurePhase = '' + cmake \ + -DCMAKE_BUILD_TYPE=${cmakeBuildType} \ + -DGIT_BRANCH=${sources.dplane-plugin.branch} \ + -DGIT_COMMIT=${sources.dplane-plugin.revision} \ + -DGIT_TAG=${sources.dplane-plugin.revision} \ + -DBUILD_DATE=0 \ + -DOUT=${placeholder "out"} \ + -DHH_FRR_SRC=${frr.dataplane.build}/src/frr \ + -DHH_FRR_INCLUDE=${frr.dataplane}/include/frr \ + -DCMAKE_C_STANDARD=23 \ + -S "$src" + ''; + + buildPhase = '' + make DESTDIR="$out"; + ''; + +}) diff --git a/nix/pkgs/dplane-rpc/default.nix b/nix/pkgs/dplane-rpc/default.nix new file mode 100644 index 000000000..866eaed32 --- /dev/null +++ b/nix/pkgs/dplane-rpc/default.nix @@ -0,0 +1,48 @@ +{ + stdenv, + + # build time + sources, + cmake, + + # args + cmakeBuildType ? "Release", + ... +}: + +stdenv.mkDerivation +(finalAttrs: { + pname = "dplane-rpc"; + version = sources.dplane-rpc.revision; + src = sources.dplane-rpc.outPath; + + doCheck = false; + enableParallelBuilding = true; + + outputs = ["out" "dev"]; + + nativeBuildInputs = [ + cmake + ]; + + configurePhase = '' + cmake \ + -DCMAKE_BUILD_TYPE=${cmakeBuildType} \ + -DCMAKE_C_STANDARD=23 \ + -S ./clib . + ''; + + buildPhase = '' + make DESTDIR="$out"; + ''; + + installPhase = '' + make DESTDIR="$out" install; + mv $out/usr/local/* $out + rmdir $out/usr/local + + mv $out/usr/include $out/include + rmdir $out/usr + ''; + +}) diff --git a/nix/pkgs/frr-agent/default.nix b/nix/pkgs/frr-agent/default.nix new file mode 100644 index 000000000..d6734e955 --- /dev/null +++ b/nix/pkgs/frr-agent/default.nix @@ -0,0 +1,20 @@ +{ + sources, + rustPlatform, + nukeReferences, + libgcc, + stdenv, + ... +}: +rustPlatform.buildRustPackage (final: { + pname = "frr-agent"; + version = sources.frr-agent.revision; + src = sources.frr-agent.outPath; + nativeBuildInputs = [ nukeReferences ]; + cargoLock = { + lockFile = final.src + "/Cargo.lock"; + }; + fixupPhase = '' + find "$out" -exec nuke-refs -e "$out" -e "${stdenv.cc.libc}" -e "${libgcc.lib}" '{}' +; + ''; +}) diff --git a/nix/pkgs/frr-config/config/etc/frr/daemons b/nix/pkgs/frr-config/config/etc/frr/daemons new file mode 100644 index 000000000..f01ba957b --- /dev/null +++ b/nix/pkgs/frr-config/config/etc/frr/daemons @@ -0,0 +1,126 @@ +# This file tells the frr package which daemons to start. +# +# Sample configurations for these daemons can be found in +# /usr/share/doc/frr/examples/. +# +# ATTENTION: +# +# When activating a daemon for the first time, a config file, even if it is +# empty, has to be present *and* be owned by the user and group "frr", else +# the daemon will not be started by /etc/init.d/frr. The permissions should +# be u=rw,g=r,o=. +# When using "vtysh" such a config file is also needed. It should be owned by +# group "frrvty" and set to ug=rw,o= though. Check /etc/pam.d/frr, too. +# +# The watchfrr, zebra and staticd daemons are always started. +# +bgpd=yes +ospfd=no +ospf6d=no +ripd=no +ripngd=no +isisd=no +pimd=no +pim6d=no +ldpd=no +nhrpd=no +eigrpd=no +babeld=no +sharpd=no +pbrd=no +bfdd=yes +fabricd=no +vrrpd=no +pathd=no + +# +# If this option is set the /etc/init.d/frr script automatically loads +# the config via "vtysh -b" when the servers are started. +# Check /etc/pam.d/frr if you intend to use "vtysh"! +# +vtysh_enable=yes +zebra_options=" -A 127.0.0.1 -s 90000000 --log-level debug --log stdout -M hh_dplane:'--local-dp-sock-path /var/run/frr/hh/plugin.sock --remote-dp-sock-path /var/run/frr/hh/dataplane.sock'" +mgmtd_options=" -A 127.0.0.1" +bgpd_options=" -A 127.0.0.1 -M bmp" +ospfd_options=" -A 127.0.0.1" +ospf6d_options=" -A ::1" +ripd_options=" -A 127.0.0.1" +ripngd_options=" -A ::1" +isisd_options=" -A 127.0.0.1" +pimd_options=" -A 127.0.0.1" +pim6d_options=" -A ::1" +ldpd_options=" -A 127.0.0.1" +nhrpd_options=" -A 127.0.0.1" +eigrpd_options=" -A 127.0.0.1" +babeld_options=" -A 127.0.0.1" +sharpd_options=" -A 127.0.0.1" +pbrd_options=" -A 127.0.0.1" +staticd_options="-A 127.0.0.1" +bfdd_options=" -A 127.0.0.1" +fabricd_options="-A 127.0.0.1" +vrrpd_options=" -A 127.0.0.1" +pathd_options=" -A 127.0.0.1" + + +# If you want to pass a common option to all daemons, you can use the +# "frr_global_options" variable. +# +frr_global_options="--limit-fds 100000" + + +# The list of daemons to watch is automatically generated by the init script. +# This variable can be used to pass options to watchfrr that will be passed +# prior to the daemon list. +# +# To make watchfrr create/join the specified netns, add the the "--netns" +# option here. It will only have an effect in /etc/frr//daemons, and +# you need to start FRR with "/usr/lib/frr/frrinit.sh start ". +# +#watchfrr_options="" + + +# configuration profile +# +#frr_profile="traditional" +#frr_profile="datacenter" + + +# This is the maximum number of FD's that will be available. Upon startup this +# is read by the control files and ulimit is called. Uncomment and use a +# reasonable value for your setup if you are expecting a large number of peers +# in say BGP. +# +#MAX_FDS=1024 + +# Uncomment this option if you want to run FRR as a non-root user. Note that +# you should know what you are doing since most of the daemons need root +# to work. This could be useful if you want to run FRR in a container +# for instance. +# FRR_NO_ROOT="yes" + +# For any daemon, you can specify a "wrap" command to start instead of starting +# the daemon directly. This will simply be prepended to the daemon invocation. +# These variables have the form daemon_wrap, where 'daemon' is the name of the +# daemon (the same pattern as the daemon_options variables). +# +# Note that when daemons are started, they are told to daemonize with the `-d` +# option. This has several implications. For one, the init script expects that +# when it invokes a daemon, the invocation returns immediately. If you add a +# wrap command here, it must comply with this expectation and daemonize as +# well, or the init script will never return. Furthermore, because daemons are +# themselves daemonized with -d, you must ensure that your wrapper command is +# capable of following child processes after a fork() if you need it to do so. +# +# If your desired wrapper does not support daemonization, you can wrap it with +# a utility program that daemonizes programs, such as 'daemonize'. An example +# of this might look like: +# +# bgpd_wrap="/usr/bin/daemonize /usr/bin/mywrapper" +# +# This is particularly useful for programs which record processes but lack +# daemonization options, such as perf and rr. +# +# If you wish to wrap all daemons in the same way, you may set the "all_wrap" +# variable. +# +#all_wrap="" diff --git a/nix/pkgs/frr-config/config/etc/frr/vtysh.conf b/nix/pkgs/frr-config/config/etc/frr/vtysh.conf new file mode 100644 index 000000000..e0ab9cb6f --- /dev/null +++ b/nix/pkgs/frr-config/config/etc/frr/vtysh.conf @@ -0,0 +1 @@ +service integrated-vtysh-config diff --git a/nix/pkgs/frr-config/config/etc/frr/zebra.conf b/nix/pkgs/frr-config/config/etc/frr/zebra.conf new file mode 100644 index 000000000..e69de29bb diff --git a/nix/pkgs/frr-config/config/etc/group b/nix/pkgs/frr-config/config/etc/group new file mode 100644 index 000000000..ce5947c38 --- /dev/null +++ b/nix/pkgs/frr-config/config/etc/group @@ -0,0 +1,3 @@ +root:x:0: +frr:x:92: +frrvty:x:85:frr diff --git a/nix/pkgs/frr-config/config/etc/nsswitch.conf b/nix/pkgs/frr-config/config/etc/nsswitch.conf new file mode 100644 index 000000000..790ed58a1 --- /dev/null +++ b/nix/pkgs/frr-config/config/etc/nsswitch.conf @@ -0,0 +1 @@ +hosts: mymachines files myhostname dns diff --git a/nix/pkgs/frr-config/config/etc/passwd b/nix/pkgs/frr-config/config/etc/passwd new file mode 100644 index 000000000..cef881b25 --- /dev/null +++ b/nix/pkgs/frr-config/config/etc/passwd @@ -0,0 +1,2 @@ +root:x:0:0:root:/root:/bin/bash +frr:x:100:92:FRR suite,,,:/var/empty:/bin/false diff --git a/nix/pkgs/frr-config/config/libexec/frr/docker-start b/nix/pkgs/frr-config/config/libexec/frr/docker-start new file mode 100644 index 000000000..a590013df --- /dev/null +++ b/nix/pkgs/frr-config/config/libexec/frr/docker-start @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +. /libexec/frr/frrcommon.sh + +ip -j -d nexthop show | \ + jq --raw-output '.[] | select(.protocol=="zebra").id' | \ + while read -r id; do ip nexthop del id "${id}"; done + +# shellcheck disable=SC2046 +/libexec/frr/watchfrr $(daemon_list) & + +/bin/frr-agent "${@}" & + +wait -n + +exit $? diff --git a/nix/pkgs/frr-config/default.nix b/nix/pkgs/frr-config/default.nix new file mode 100644 index 000000000..b03fd0bd5 --- /dev/null +++ b/nix/pkgs/frr-config/default.nix @@ -0,0 +1,23 @@ +{ + stdenv, + ... +}: + +stdenv.mkDerivation { + pname = "frr-config"; + version = "0"; + + doCheck = false; + enableParallelBuilding = true; + dontPatchShebangs = true; + + dontUnpack = true; + + src = ./config; + + installPhase = '' + cp -r $src $out + chmod +x $out/libexec/frr/docker-start + ''; + +} diff --git a/nix/pkgs/frr/clippy-helper.nix b/nix/pkgs/frr/clippy-helper.nix new file mode 100644 index 000000000..384523730 --- /dev/null +++ b/nix/pkgs/frr/clippy-helper.nix @@ -0,0 +1,62 @@ +{ + lib, + stdenv, + frrSrc, + + # build time + autoreconfHook, + flex, + bison, + pkg-config, + elfutils, + perl, + python3Minimal, + +}: + +stdenv.mkDerivation { + pname = "frr-clippy-helper"; + version = frrSrc.branch; + src = frrSrc.outPath; + + nativeBuildInputs = [ + autoreconfHook + bison + flex + perl + pkg-config + ]; + + buildInputs = [ + python3Minimal + ] + ++ lib.optionals (lib.meta.availableOn stdenv.hostPlatform elfutils) [ + elfutils + ]; + + configureFlags = [ + "--enable-clippy-only" + ]; + + installPhase = '' + mkdir -p $out/bin + cp lib/clippy $out/bin + ''; + + enableParallelBuilding = true; + + meta = with lib; { + homepage = "https://frrouting.org/"; + description = "FRR routing daemon suite: CLI helper tool clippy"; + longDescription = '' + This small tool is used to support generating CLI code for FRR. It is split out here, + to support cross-compiling, because it needs to be compiled with the build system toolchain + and not the target host one. + ''; + license = with licenses; [ + gpl2Plus + lgpl21Plus + ]; + platforms = platforms.unix; + }; +} diff --git a/nix/pkgs/frr/default.nix b/nix/pkgs/frr/default.nix new file mode 100644 index 000000000..04ec2984a --- /dev/null +++ b/nix/pkgs/frr/default.nix @@ -0,0 +1,214 @@ +{ + frrSrc, + lib, + stdenv, + + # build time + autoreconfHook, + bison, + buildPackages, + flex, + perl, + pkg-config, + python3Minimal, + nukeReferences, + removeReferencesTo, + + c-ares, + elfutils, + json_c, + libcap, + libxcrypt, + libyang, + pcre2, + readline, + rtrlib, + libgccjit, + + # other general options besides snmp support + numMultipath ? 8, + + # routing daemon options + bgpdSupport ? true, + bfddSupport ? true, + staticdSupport ? true, + ospfdSupport ? false, + isisdSupport ? false, + + babeldSupport ? false, + eigrpdSupport ? false, + fabricdSupport ? false, + ldpdSupport ? false, + nhrpdSupport ? false, + ospf6dSupport ? false, + pathdSupport ? false, + pbrdSupport ? false, + pim6dSupport ? false, + pimdSupport ? false, + ripdSupport ? false, + ripngdSupport ? false, + sharpdSupport ? false, + vrrpdSupport ? false, + + # BGP options + bgpAnnounce ? true, + bgpBmp ? true, + bgpVnc ? false, + bgpRpki ? false, + + # OSPF options + ospfApi ? false, + + vtysh-extensions ? false, + + ... +}: + +stdenv.mkDerivation (finalAttrs: { + pname = "frr"; + version = frrSrc.branch; + dontPatchShebangs = false; + dontFixup = false; + dontPatchElf = false; + + outputs = [ + "out" + "build" + ]; + + src = frrSrc.outPath; + + # Without the std explicitly set, we may run into abseil-cpp + # compilation errors. + CXXFLAGS = "-std=gnu++23"; + + nativeBuildInputs = [ + autoreconfHook + bison + elfutils + flex + perl + pkg-config + python3Minimal + nukeReferences + removeReferencesTo + ]; + + buildInputs = [ + c-ares + json_c + libcap + libgccjit + libxcrypt + libyang + pcre2 + python3Minimal + readline + ] + ++ lib.optionals bgpRpki [ rtrlib ]; + + # cross-compiling: clippy is compiled with the build host toolchain, split it out to ease + # navigation in dependency hell + clippy-helper = buildPackages.callPackage ./clippy-helper.nix { + inherit frrSrc; + }; + + configureFlags = [ + "--enable-python-runtime" + "--enable-fpm=netlink" # try to disable later + "--with-moduledir=/lib/frr/modules" + # rpath causes confusion in module linking where bmp gets linked to /build (which is broken). + # dontPatchElf and dontFixup are both set to false, so nix will adjust to rpath correctly for us after + # the initial linking step. + "--enable-rpath=no" + + "--enable-configfile-mask=0640" + "--enable-logfile-mask=0640" + "--enable-user=frr" + "--enable-group=frr" + "--enable-vty-group=frrvty" + + "--enable-config-rollbacks=no" + "--disable-doc" + "--disable-doc-html" + "--enable-grpc=no" + "--enable-protobuf=no" + "--enable-scripting=no" + "--enable-sysrepo=no" + "--enable-zeromq=no" + + "--with-libpam=no" + + "--disable-silent-rules" + "--enable-multipath=${toString numMultipath}" + "--localstatedir=/run/frr" + "--includedir=/include" + "--sbindir=/libexec/frr" + "--bindir=/bin" + "--libdir=/lib" + "--prefix=/frr" + "--sysconfdir=/etc" + "--with-clippy=${finalAttrs.clippy-helper}/bin/clippy" + # general options + "--enable-irdp=no" + "--enable-mgmtd=yes" + "--enable-rtadv=yes" + "--enable-watchfrr=yes" + + "--enable-shared" + "--enable-static" + "--enable-static-bin" + + # routing protocols + (lib.strings.enableFeature babeldSupport "babeld") + (lib.strings.enableFeature bfddSupport "bfdd") + (lib.strings.enableFeature bgpdSupport "bgpd") + (lib.strings.enableFeature eigrpdSupport "eigrpd") + (lib.strings.enableFeature fabricdSupport "fabricd") + (lib.strings.enableFeature isisdSupport "isisd") + (lib.strings.enableFeature ldpdSupport "ldpd") + (lib.strings.enableFeature nhrpdSupport "nhrpd") + (lib.strings.enableFeature ospf6dSupport "ospf6d") + (lib.strings.enableFeature ospfdSupport "ospfd") + (lib.strings.enableFeature pathdSupport "pathd") + (lib.strings.enableFeature pbrdSupport "pbrd") + (lib.strings.enableFeature pim6dSupport "pim6d") + (lib.strings.enableFeature pimdSupport "pimd") + (lib.strings.enableFeature ripdSupport "ripd") + (lib.strings.enableFeature ripngdSupport "ripngd") + (lib.strings.enableFeature sharpdSupport "sharpd") + (lib.strings.enableFeature staticdSupport "staticd") + (lib.strings.enableFeature vrrpdSupport "vrrpd") + # BGP options + (lib.strings.enableFeature bgpAnnounce "bgp-announce") + (lib.strings.enableFeature bgpBmp "bgp-bmp") + (lib.strings.enableFeature bgpRpki "rpki") + (lib.strings.enableFeature bgpVnc "bgp-vnc") + # OSPF options + (lib.strings.enableFeature ospfApi "ospfapi") + # Cumulus options + "--enable-cumulus=no" + "--disable-cumulus" + ]; + + patches = [ + ./patches/yang-hack.patch + ./patches/xrelifo.py.fix.patch + ] + ++ lib.optionals vtysh-extensions [ + ./patches/vtysh-extensions.h.patch + ]; + + buildPhase = '' + make "-j$(nproc)"; + ''; + + installPhase = '' + make DESTDIR=$out install; + mkdir -p $build/src/ + cp -r . $build/src/frr + ''; + + doCheck = false; + enableParallelBuilding = true; +}) diff --git a/nix/pkgs/frr/patches/xrelifo.py.fix.patch b/nix/pkgs/frr/patches/xrelifo.py.fix.patch new file mode 100644 index 000000000..e699ac855 --- /dev/null +++ b/nix/pkgs/frr/patches/xrelifo.py.fix.patch @@ -0,0 +1,17 @@ +diff --git a/python/xrelfo.py b/python/xrelfo.py +--- a/python/xrelfo.py ++++ b/python/xrelfo.py +@@ -479,13 +479,9 @@ + try: + xrelfo.load_file(fn) + except: +- errors += 1 + sys.stderr.write("while processing %s:\n" % (fn)) + traceback.print_exc() + +- if xrelfo.note_warn and args.Werror: +- errors += 1 +- + for option in dir(args): + if option.startswith("W") and option != "Werror": + checks = sorted(xrelfo.check(args)) \ No newline at end of file diff --git a/nix/pkgs/frr/patches/yang-hack.patch b/nix/pkgs/frr/patches/yang-hack.patch new file mode 100644 index 000000000..814b30d31 --- /dev/null +++ b/nix/pkgs/frr/patches/yang-hack.patch @@ -0,0 +1,12 @@ +diff --git a/configure.ac b/configure.ac +--- a/configure.ac ++++ b/configure.ac +@@ -2091,8 +2091,6 @@ + ]) + ], [[#include ]]) + +-AC_CHECK_LIB([yang],[lyd_find_xpath3],[],[AC_MSG_ERROR([m4_normalize([ +-libyang missing lyd_find_xpath3])])]) + dnl -- don't add lyd_new_list3 to this list unless bug is fixed upstream + dnl -- https://github.com/CESNET/libyang/issues/2149 + AC_CHECK_FUNCS([ly_strerrcode ly_strvecode lyd_trim_xpath]) \ No newline at end of file diff --git a/nix/platforms.nix b/nix/platforms.nix index b6b54310c..9d8fc92c7 100644 --- a/nix/platforms.nix +++ b/nix/platforms.nix @@ -82,6 +82,16 @@ lib.fix ( final: platforms.${platform} // { + # NOTE: sadly, bluefield2 compiles with the name bluefield in DPDK (for some DPDK specific reason). + # That said, we generate the correct cross compile file for bluefield2 (unlike the soc defn + # in the dpdk meson.build file, which only goes half way and picks armv8-a instead of 8.2-a, or, better yet + # cortex-a72, which is the actual CPU of bluefield 2). + # We don't currently expect to meaningfully support BF2, but it is a handy test target for the build tooling. + name = + { + bluefield2 = "bluefield"; + } + .${platform} or platform; info = { x86_64 = { diff --git a/nix/profiles.nix b/nix/profiles.nix index 98af3083c..75bc1a38c 100644 --- a/nix/profiles.nix +++ b/nix/profiles.nix @@ -27,6 +27,7 @@ let "-Cdebuginfo=full" "-Cdwarf-version=5" "-Csymbol-mangling-version=v0" + "-Clink-arg=-Wl,--as-needed,--gc-sections" # FRR builds don't like this, but rust does fine ] ++ (map (flag: "-Clink-arg=${flag}") common.NIX_CFLAGS_LINK); optimize-for.debug.NIX_CFLAGS_COMPILE = [ @@ -50,8 +51,6 @@ let ]; optimize-for.performance.NIX_CFLAGS_LINK = optimize-for.performance.NIX_CXXFLAGS_COMPILE ++ [ "-Wl,--lto-whole-program-visibility" - "-Wl,--gc-sections" - "-Wl,--as-needed" ]; optimize-for.performance.RUSTFLAGS = [ "-Clinker-plugin-lto" @@ -63,14 +62,12 @@ let "-fstack-clash-protection" # we always want pic/pie and GOT offsets should be computed at compile time whenever possible "-Wl,-z,relro,-z,now" - # "-fcf-protection=full" # requires extra testing before we enable ]; secure.NIX_CXXFLAGS_COMPILE = secure.NIX_CFLAGS_COMPILE; # handing the CFLAGS back to clang/lld is basically required for -fsanitize secure.NIX_CFLAGS_LINK = secure.NIX_CFLAGS_COMPILE; secure.RUSTFLAGS = [ "-Crelro-level=full" - # "-Zcf-protection=full" ] ++ (map (flag: "-Clink-arg=${flag}") secure.NIX_CFLAGS_LINK); march.x86_64.NIX_CFLAGS_COMPILE = [ @@ -81,6 +78,7 @@ let "-mrtm" # TODO: try to convince DPDK not to rely on rtm "-mcrc32" "-mssse3" + "-fcf-protection=full" ]; march.x86_64.NIX_CXXFLAGS_COMPILE = march.x86_64.NIX_CFLAGS_COMPILE; march.x86_64.NIX_CFLAGS_LINK = march.x86_64.NIX_CXXFLAGS_COMPILE; @@ -91,6 +89,7 @@ let # proved to be broken in Intel's implementation, and AMD never built them in the first place. # "-Ctarget-feature=+rtm,+crc32,+ssse3" "-Ctarget-feature=+ssse3" + "-Zcf-protection=full" ] ++ (map (flag: "-Clink-arg=${flag}") march.x86_64.NIX_CFLAGS_LINK); march.aarch64.NIX_CFLAGS_COMPILE = [ ]; @@ -215,7 +214,7 @@ let builtins.foldl' ( acc: element: acc // (builtins.mapAttrs (var: val: (acc.${var} or [ ]) ++ val) element) ) { } features; - profile-map = { + profile-map = rec { debug = combine-profiles [ common optimize-for.debug @@ -225,6 +224,7 @@ let optimize-for.performance secure ]; + fuzz = release; }; in combine-profiles ( diff --git a/npins/sources.json b/npins/sources.json index 1913f4ea6..40d08efdf 100644 --- a/npins/sources.json +++ b/npins/sources.json @@ -11,10 +11,10 @@ "version_upper_bound": null, "release_prefix": null, "submodules": false, - "version": "v0.23.0", - "revision": "61594d90dab41c2f3cd336baf0a8fcd6c37e0408", - "url": "https://api.github.com/repos/ipetkov/crane/tarball/refs/tags/v0.23.0", - "hash": "sha256-VFkNyxHxkqGp8gf8kfFMW1j6XeBy609kv6TE9uF/0Js=" + "version": "v0.23.1", + "revision": "fe2df77bce0b8c492a09e34d281f0fb62d1bea43", + "url": "https://api.github.com/repos/ipetkov/crane/tarball/refs/tags/v0.23.1", + "hash": "sha256-aIlv7FRXF9q70DNJPI237dEDAznSKaXmL5lfK/Id/bI=" }, "dpdk": { "type": "Git", @@ -29,49 +29,88 @@ "url": "https://github.com/githedgehog/dpdk/archive/6736a6e32f5b3a8d16b2bd0e84b73af32540de77.tar.gz", "hash": "sha256-aVtrmUtFkkC2SsnfWJmRN/Klwfb/EGLG+YYtSLm5tBY=" }, - "frr": { + "dplane-plugin": { "type": "Git", "repository": { "type": "GitHub", - "owner": "FRRouting", - "repo": "frr" + "owner": "githedgehog", + "repo": "dplane-plugin" }, - "branch": "stable/10.5", + "branch": "master", "submodules": false, - "revision": "e00528362e9bd6abfe772496db955b4b138d192f", - "url": "https://github.com/FRRouting/frr/archive/e00528362e9bd6abfe772496db955b4b138d192f.tar.gz", - "hash": "sha256-o6PW5PINy/E5Ou/raat8NswWfxNzAA8Wurv8h/3isEE=" + "revision": "ef3e718651d59fd4da5787a9c05e06a594c0136c", + "url": "https://github.com/githedgehog/dplane-plugin/archive/ef3e718651d59fd4da5787a9c05e06a594c0136c.tar.gz", + "hash": "sha256-CRsHKk50XnV23uVJxjN9ZtsIFH/BwZYlW27UL4V0D6E=" }, - "frr-dp": { + "dplane-rpc": { "type": "Git", "repository": { "type": "GitHub", "owner": "githedgehog", - "repo": "frr" + "repo": "dplane-rpc" }, - "branch": "hh-master-10.5", + "branch": "master", "submodules": false, - "revision": "d6cbff16e2261a7e1a88c6369de9e5c1d1bdb4f7", - "url": "https://github.com/githedgehog/frr/archive/d6cbff16e2261a7e1a88c6369de9e5c1d1bdb4f7.tar.gz", - "hash": "sha256-dUjnWQFcP+iUcYl1vetWbawL9HZt8FmMzC0cJdUJrxI=" + "revision": "e8fc33db10e1d00785f2a2b90cbadcad7900f200", + "url": "https://github.com/githedgehog/dplane-rpc/archive/e8fc33db10e1d00785f2a2b90cbadcad7900f200.tar.gz", + "hash": "sha256-tjN4qSbKrWfosOV3wt2AnQxmVL0BPZYBjAHG3X00+aM=" }, - "gateway": { + "fabric": { "type": "GitRelease", "repository": { "type": "GitHub", "owner": "githedgehog", - "repo": "gateway" + "repo": "fabric" }, "pre_releases": false, "version_upper_bound": null, "release_prefix": null, "submodules": false, - "version": "v0.38.0", - "revision": "039d7d2c7785e47197399c046ad668f958b3091e", - "url": "https://api.github.com/repos/githedgehog/gateway/tarball/refs/tags/v0.38.0", - "hash": "sha256-3c1OfNRjZNTAHUHUlwnAVUvb12No+XIpeW0UBtto2Tk=", + "version": "v0.113.2", + "revision": "6d4438e4ddde8079f879e90f603080a1fd027ef8", + "url": "https://api.github.com/repos/githedgehog/fabric/tarball/refs/tags/v0.113.2", + "hash": "sha256-vGv9yXunQ+jTbsycZ3ppVpndSuCESBmPcUiLm2X0nfY=", "frozen": true }, + "frr": { + "type": "Git", + "repository": { + "type": "GitHub", + "owner": "FRRouting", + "repo": "frr" + }, + "branch": "stable/10.5", + "submodules": false, + "revision": "5013dd523001384b6fa0c14b7795a8eebafba523", + "url": "https://github.com/FRRouting/frr/archive/5013dd523001384b6fa0c14b7795a8eebafba523.tar.gz", + "hash": "sha256-7wpSzVrPeyLc972xq+JeOMd4Dw3e/nyHjbRqeOZkQBc=" + }, + "frr-agent": { + "type": "Git", + "repository": { + "type": "GitHub", + "owner": "githedgehog", + "repo": "frr-agent" + }, + "branch": "master", + "submodules": false, + "revision": "16fc0c715d6c83125e51ef68959a6dfe8e8fd847", + "url": "https://github.com/githedgehog/frr-agent/archive/16fc0c715d6c83125e51ef68959a6dfe8e8fd847.tar.gz", + "hash": "sha256-h32eJSnLB2U3tKGp/Uk30XeOVvHelR7n9EN3stOoYGE=" + }, + "frr-dp": { + "type": "Git", + "repository": { + "type": "GitHub", + "owner": "githedgehog", + "repo": "frr" + }, + "branch": "hh-master-10.5", + "submodules": false, + "revision": "d6cbff16e2261a7e1a88c6369de9e5c1d1bdb4f7", + "url": "https://github.com/githedgehog/frr/archive/d6cbff16e2261a7e1a88c6369de9e5c1d1bdb4f7.tar.gz", + "hash": "sha256-dUjnWQFcP+iUcYl1vetWbawL9HZt8FmMzC0cJdUJrxI=" + }, "kopium": { "type": "GitRelease", "repository": { @@ -91,8 +130,8 @@ "nixpkgs": { "type": "Channel", "name": "nixpkgs-unstable", - "url": "https://releases.nixos.org/nixpkgs/nixpkgs-26.05pre934390.48698d12cc10/nixexprs.tar.xz", - "hash": "sha256-YpOjLmOGokqTiFjxFu0ioMpMbxHGP6CckfgmqV5OAck=" + "url": "https://releases.nixos.org/nixpkgs/nixpkgs-26.05pre964972.9cf7092bdd60/nixexprs.tar.xz", + "hash": "sha256-SpvueJPdKD+U/Q2btT3e3rAYgbPQP6nbcYYfK3BJQDA=" }, "perftest": { "type": "Git", @@ -103,9 +142,9 @@ }, "branch": "master", "submodules": false, - "revision": "c77a2b17ccee8a2f7434135513794bdd29881f1f", - "url": "https://github.com/linux-rdma/perftest/archive/c77a2b17ccee8a2f7434135513794bdd29881f1f.tar.gz", - "hash": "sha256-3hEmBo1SPePbeOjT3tXAWZAnt///4lv4lZuh6tlkEDI=" + "revision": "ea1c778782df3ec09b5f8101017fc0140b51a63d", + "url": "https://github.com/linux-rdma/perftest/archive/ea1c778782df3ec09b5f8101017fc0140b51a63d.tar.gz", + "hash": "sha256-O29UkU0fwbGjyuT6Rbxs1imus1CHZxTLkiyuJtOnxBc=" }, "rdma-core": { "type": "Git", @@ -131,10 +170,10 @@ "version_upper_bound": null, "release_prefix": null, "submodules": false, - "version": "1.93.0", - "revision": "539f0812230e3e8b7b42bab0ec4317ae3750f568", - "url": "https://api.github.com/repos/rust-lang/rust/tarball/refs/tags/1.93.0", - "hash": "sha256-Rx4bJh2mjGRhwltKSlh+3c2rWdydazpKR1DuXehxt7k=" + "version": "1.94.0", + "revision": "5518e0609fc902e41fcdb470cb3adca7376759e3", + "url": "https://api.github.com/repos/rust-lang/rust/tarball/refs/tags/1.94.0", + "hash": "sha256-Pq/9fcLHWFJnWousVBJfHkB6vzsyGjB7Ohdrn7Eik1A=" }, "rust-overlay": { "type": "Git", @@ -145,9 +184,9 @@ }, "branch": "master", "submodules": false, - "revision": "e9bcd12156a577ac4e47d131c14dc0293cc9c8c2", - "url": "https://github.com/oxalica/rust-overlay/archive/e9bcd12156a577ac4e47d131c14dc0293cc9c8c2.tar.gz", - "hash": "sha256-YNzh46h8fby49yOIB40lNoQ9ucVoXe1bHVwkZ4AwGe0=" + "revision": "c807e83cc2e32adc35f51138b3bdef722c0812ab", + "url": "https://github.com/oxalica/rust-overlay/archive/c807e83cc2e32adc35f51138b3bdef722c0812ab.tar.gz", + "hash": "sha256-ba3c+hS7KzEiwtZRGHagIAYdcmdY3rCSWVCyn64rx7s=" } }, "version": 7 diff --git a/routing/Cargo.toml b/routing/Cargo.toml index 1b27ea25d..52f8a46b8 100644 --- a/routing/Cargo.toml +++ b/routing/Cargo.toml @@ -38,7 +38,7 @@ netgauze-bgp-pkt = { workspace = true } netgauze-bmp-pkt = { workspace = true } serde = { workspace = true, features = ["derive"] } thiserror = { workspace = true } -tokio = { workspace = true, features = ["fs", "io-util", "macros", "rt", "sync", "rt", "sync", "net", "macros"] } +tokio = { workspace = true, features = ["fs", "io-util", "sync", "rt", "net", "macros"] } tokio-util = { workspace = true, features = ["codec"] } tracing = { workspace = true } @@ -52,4 +52,5 @@ concurrency = { workspace = true } lpm = { workspace = true, features = ["testing"] } net = { workspace = true, features = ["test_buffer"] } rand = { workspace = true, default-features = false, features = ["thread_rng"] } +tokio = { workspace = true, features = ["time", "test-util"] } tracing-test = { workspace = true, features = [] } diff --git a/rust-toolchain.toml b/rust-toolchain.toml deleted file mode 100644 index 63ed2cf8b..000000000 --- a/rust-toolchain.toml +++ /dev/null @@ -1,27 +0,0 @@ -[toolchain] -# NOTE: you can and should manually update this on new rust releases -channel = "1.93.0" - -components = [ - "rustc", - "cargo", - "rust-std", - "rust-docs", - "rustfmt", - "clippy", - "rust-analyzer", - "rust-src", - - ## disabled components ## - # "rust-mingw", # not relevant to us - # "llvm-tools", # we already have a full llvm in the npins, no need for another - # "miri", # not yet functional for us - # "rustc-codegen-cranelift-preview" # not relevant to us -] - -targets = [ - "x86_64-unknown-linux-gnu", - "x86_64-unknown-linux-musl", - "aarch64-unknown-linux-gnu", - "aarch64-unknown-linux-musl" -] diff --git a/scripts/gen-pins.sh b/scripts/gen-pins.sh index b2c89c6f8..3e023d600 100755 --- a/scripts/gen-pins.sh +++ b/scripts/gen-pins.sh @@ -50,12 +50,16 @@ npins add github linux-rdma perftest --branch master # Will pick highest tag on pin bump. npins add github kube-rs kopium -# The gateway is needed to define the CRD we use for code generation at build time. -# The gateway should be pinned to a specific an manually changed version, the best way to reach this goal is to pin the +# The fabric is needed to define the CRD we use for code generation at build time. +# The fabric should be pinned to a specific and manually changed version, the best way to reach this goal is to pin the # release and freeze it with npins. Then you can manually update with `npins update --frozen` instead of repeatedly # editing the script or otherwise fighting the update process. -npins add github githedgehog gateway # Will pick highest tagged version on pin bump -npins freeze gateway +npins add github githedgehog fabric # Will pick highest tagged version on pin bump +npins freeze fabric npins add github FRRouting frr --branch stable/10.5 # floats with branch on pin bump npins add github --name frr-dp githedgehog frr --branch hh-master-10.5 # floats with branch on pin bump + +npins add github githedgehog frr-agent --branch master # floats with branch on pin bump +npins add github githedgehog dplane-rpc --branch master # floats with branch on pin bump +npins add github githedgehog dplane-plugin --branch master # floats with branch on pin bump diff --git a/scripts/k8s-crd.env b/scripts/k8s-crd.env deleted file mode 100644 index fc4f489f6..000000000 --- a/scripts/k8s-crd.env +++ /dev/null @@ -1,5 +0,0 @@ -K8S_GATEWAY_AGENT_REF=v0.42.0 -K8S_GATEWAY_AGENT_CRD_URL="https://raw.githubusercontent.com/githedgehog/gateway/${K8S_GATEWAY_AGENT_REF}/config/crd/bases/gwint.githedgehog.com_gatewayagents.yaml" - -# path to local CRD definitions -K8S_GATEWAY_AGENT_CRD_PATH= diff --git a/scripts/rust.env b/scripts/rust.env deleted file mode 100644 index 823c85e94..000000000 --- a/scripts/rust.env +++ /dev/null @@ -1,24 +0,0 @@ -RUSTC_BOOTSTRAP=1 -NEXTEST_EXPERIMENTAL_LIBTEST_JSON=1 -LINKER="-C linker=./compile-env/bin/clang -C link-arg=--ld-path=./compile-env/bin/ld.lld" -RELRO="-C relro-level=full" -CRT_STATIC="-C target-feature=+crt-static" -CRT_DYNAMIC="-C target-feature=-crt-static" -DEBUG="-C debuginfo=full -C split-debuginfo=off -C dwarf-version=5 -Z embed-source" -DEBUG_ASSERTIONS_ON="-C debug-assertions=on" -DEBUG_ASSERTIONS_OFF="-C debug-assertions=off" -OVERFLOW_CHECK_ON="-C overflow-checks=on" -OVERFLOW_CHECK_OFF="-C overflow-checks=off" -LTO="-C linker-plugin-lto -C lto=thin -C embed-bitcode=yes -C codegen-units=1" -COVERAGE="-C instrument-coverage" -OPTIMIZE_OFF="${DEBUG_ASSERTIONS_ON} ${OVERFLOW_CHECK_ON}" -OPTIMIZE_ON="-C opt-level=3 ${LTO} ${DEBUG_ASSERTIONS_OFF} ${OVERFLOW_CHECK_OFF}" -OPTIMIZE_FUZZ="-C opt-level=3 ${LTO} ${DEBUG_ASSERTIONS_ON} ${OVERFLOW_CHECK_ON}" -TARGET_CPU_DEBUG="-C target-cpu=generic" -TARGET_CPU_RELEASE="-C target-cpu=x86-64-v3" -TOKIO_UNSTABLE="--cfg tokio_unstable" - -COMMON="${LINKER} ${RELRO} ${DEBUG}" -RUSTFLAGS_DEBUG="${COMMON} ${OPTIMIZE_OFF} ${TARGET_CPU_DEBUG} ${CRT_DYNAMIC} ${TOKIO_UNSTABLE}" -RUSTFLAGS_RELEASE="${COMMON} ${OPTIMIZE_ON} ${TARGET_CPU_RELEASE} ${CRT_DYNAMIC} ${TOKIO_UNSTABLE}" -RUSTFLAGS_FUZZ="${COMMON} ${OPTIMIZE_FUZZ} ${TARGET_CPU_RELEASE} ${CRT_DYNAMIC} ${TOKIO_UNSTABLE}" diff --git a/scripts/test-runner.sh b/scripts/test-runner.sh deleted file mode 100755 index 96a1a640c..000000000 --- a/scripts/test-runner.sh +++ /dev/null @@ -1,195 +0,0 @@ -#!/bin/bash - -# SPDX-License-Identifier: Apache-2.0 -# Copyright Open Network Fabric Authors - -# Cargo automatically runs this script for every unit test (this applies to nextest as well). -# The script has two main responsibilities: -# -# 1. It runs `setcap` on the _test binary_ to elevate the test's _permitted_ capabilities. -# This action _does not_ cause the tests to run with these capabilities active by default. -# That would involve setting the _effective_ capabilities for the test binary (which we don't do). -# Instead, assigning the _permitted_ capabilities allows the use of the `caps` crate to allow us to request elevated -# permissions for specific sections of test code. -# -# The purpose of these elevated privileges is to allow the tests to create and destroy virtual network interfaces and -# network namespaces (as is required for integration testing). -# -# 2. It bind mounts the (setcap modified) test binary, the project directory, and a few other files into a (read-only) -# docker container (which executes the test). This docker container contains _only_ libc and libgcc_s (to better -# simulate our deployment environment and discourage faulty assumptions about what will be available at runtime). -# -# The purpose of this container is to -# * minimize the damage a faulty test might do -# * make sure that we aren't relying on resources only available on the developer's machine in the tests (test like -# we are in prod). -# -# Hopefully, this process also requires us to carefully think about what parts of our code require which privileges (and -# to document these requirements carefully). I'm lookin' at you, future me :) - -set -euo pipefail - - -get_docker_sock() { - declare -r DOCKER_HOST="${DOCKER_HOST:-unix:///var/run/docker.sock}" - declare -r without_unix="${DOCKER_HOST##unix://}" - if [ -S "${without_unix}" ]; then - printf -- '%s' "${without_unix}" - elif [ -S /run/docker/docker.sock ]; then - printf -- '%s' "/run/docker/docker.sock" - elif [ -S /var/run/docker.sock ]; then - printf -- '%s' "/var/run/docker.sock" - fi -} - -# compute the location of the directory which contains this file. -declare script_dir -script_dir="$(readlink -e "$(dirname "${0}")")" -declare -r script_dir - -# compute the location of the directory which contains this project. -declare project_dir -project_dir="$(readlink -e "${script_dir}/..")" -declare -r project_dir - -# NOTE: Cargo dispatches this script. -# Therefore, the PATH variable is set in config.toml to point to our compile-env; not the systems normal PATH. -# We can't meaningfully ship sudo in the compile-env (for a lot of reasons). -# It is there, but it won't have the 0 uid owner or its setuid bit set, so it can't work. -# Even if we fixed that, /etc/sudoers et al. wouldn't be reliably configured. -# Thus, we need to look it up on the "normal" PATH. We don't have the official "normal" PATH available, so we check -# the usual suspects to find sudo. -declare SUDO -SUDO="$(PATH="/run/wrappers/bin:/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin:${PATH}" which sudo)" -declare -r SUDO - -# Start with a basic check: we have no reason to assign caps to files we don't own or can't execute. -check_if_reasonable() { - declare -r prog="${1}" - - if [ ! -x "${prog}" ]; then - >&2 echo "ERROR: ${prog} is not executable" - return 1 - fi - - if [ ! -O "${prog}" ]; then - >&2 echo "ERROR: ${prog} is not owned by $(whoami), refusing to edit capabilities" - return 1 - fi - - if [ ! -G "${prog}" ]; then - >&2 echo "ERROR: ${prog} is not owned by $(whoami) effective user group, refusing to edit capabilities" - return 1 - fi -} - - -# some IDEs (e.g., rust-rover) use a helper to run tests / debug sessions. -# in such cases, the test exe is actually $2 ($1 shouldn't have any special privileges in that case) -declare test_exe -if [ -x "${2:-}" ]; then - test_exe="${2}" -else - test_exe="${1}" -fi -declare -r test_exe -check_if_reasonable "${test_exe}" - -# Pull the current version of the sysroot from the env. -# This lets us pick the correct libc container. -source "${script_dir}/dpdk-sys.env" - -declare -ra WRAPPED_TEST_SUITES=( - "dataplane-interface-manager" - "dataplane-mgmt" -) - -declare -i SHOULD_WRAP=0 -declare test_suite -for test_suite in "${WRAPPED_TEST_SUITES[@]}"; do - if [ "${CARGO_PKG_NAME-CARGO_PKG_NAME_NOT_SET}" == "${test_suite}" ]; then - SHOULD_WRAP=1 - break - fi -done -declare -ri SHOULD_WRAP - -# This is the list of capabilities to add to the test binary. -# Note: do not add =e or =i to this setcap command! We don't want privileged execution by default. -# Note: if you adjust this list, then you also need to adjust the symmetric list given to the docker run command. -declare -r CAPS='cap_net_admin,cap_net_raw,cap_sys_admin,cap_sys_rawio=p' - -if [ "${TEST_TYPE:-""}" = "FUZZ" ]; then - # In this branch we are running full fuzz tests. - # These tests are only run from a just command which has already wrapped this script in a docker container. - - # In the case of the full fuzz tests, libstdc++.so.6 will be linked into the test binary because libfuzzer is an LLVM - # project (LLVM is a C++ codebase). - # Unfortunately, the combination of bolero's RUSTFLAGS and the nix fenix rust overlay _do not_ set the rpath for - # libstdc++.so.6. - # As a result, a naive attempt to execute the test binary in the compile-env will result in a file not found error - # when the dynamic linker is unable to find libstdc++.so.6. - # Fortunately, this is relatively easy to fix; we need to patch the test binary to make sure it resolves to the - # exact libstdc++.so.6 file which it was liked against. - # If the compile-env is correct, then `/lib/libstdc++.so.6` will always be a symlink to the `/nix` store which - # contains the correct dynamic library. - patchelf --replace-needed libstdc++.so.6 "$(readlink -e /lib/libstdc++.so.6)" "${test_exe}" - # note: we don't need ${SUDO} here (i.e., we can resolve sudo via the $PATH) because this branch only ever happens - # when this script is being executed in the compile-env; the compile-env is the only place environment able to execute - # the full fuzz tests. - sudo setcap "${CAPS}" "${test_exe}" - exec "${@}" -elif [ "${SHOULD_WRAP}" -eq 0 ]; then - # In this branch - # 1. we are not doing a full fuzz test run, - # 2. and we are not running a test which requires a container wrapper. - # As a consequence, we should never need to call setcap on the test binary. - # We can just run it directly and be done. - exec "${@}" -fi - -# If we reached this point then we aren't using the full fuzz test setup. -# Instead, we are trying to run semi-privileged tests in a libc-container. -# We still need to add capabilities to the test binary, but in this case we need to make sure we are using the -# host system's sudo binary. -"${SUDO}" setcap "${CAPS}" "${test_exe}" - -# Now we can run the docker container -# -# Notes about this command: -# * Note that we mount everything we can as read-only -# * --ipc=host and --pid=host are to allow debuggers to connect to the tests more easily. -# * We mount $1 in case it is an IDE's helper runner. -# If not, then no harm has been done as $1 will be mounted by the project_dir mount anyway. -# * We drop all caps and then add back just the caps we know we need. -# This allows those capabilities into our ambient+inheritable set, letting us elevate to them as needed. -# Critically, it _does not_ give us these capabilities by default (i.e., they aren't in our effective set) because -# the above setcap command has enumerated exactly what our defaults should be. -# * If you adjust the list of --cap-add arguments, then you need to adjust the CAPS env var as well. -docker run \ - --rm \ - --interactive \ - --mount "type=bind,source=$(readlink -e "${1}"),target=$(readlink -e "${1}"),readonly=true,bind-propagation=rprivate" \ - --mount "type=bind,source=${project_dir},target=${project_dir},readonly=true,bind-propagation=rprivate" \ - --mount "type=bind,source=${project_dir}/target,target=${project_dir}/target,readonly=false,bind-propagation=rprivate" \ - --mount "type=bind,source=$(get_docker_sock),target=$(get_docker_sock),readonly=false,bind-propagation=rprivate" \ - --mount "type=bind,source=/dev/net/tun,target=/dev/net/tun,readonly=false,bind-propagation=rprivate" \ - --tmpfs "/run/netns:noexec,nosuid,uid=$(id -u),gid=$(id -g)" \ - --tmpfs "/var/run/netns:noexec,nosuid,uid=$(id -u),gid=$(id -g)" \ - --tmpfs "/tmp:nodev,noexec,nosuid,uid=$(id -u),gid=$(id -g)" \ - --user="$(id -u):$(id -g)" \ - --group-add="$(getent group docker | cut -d: -f3)" \ - --env LLVM_PROFILE_FILE="${LLVM_PROFILE_FILE:-""}" \ - --env CARGO_LLVM_COV="${CARGO_LLVM_COV:-0}" \ - --env CARGO_LLVM_COV_TARGET_DIR="${project_dir}/target" \ - --workdir="${project_dir}" \ - --env DOCKER_HOST="unix://$(get_docker_sock)" \ - --net=none \ - --cap-drop ALL \ - --cap-add NET_ADMIN \ - --cap-add NET_RAW \ - --cap-add SYS_ADMIN \ - --cap-add SYS_RAWIO \ - --read-only \ - "ghcr.io/githedgehog/dpdk-sys/libc-env:${DPDK_SYS_COMMIT}.${LIBC_ENV_PROFILE:-release}" \ - "${@}" diff --git a/scripts/update-doc-headers.sh b/scripts/update-doc-headers.sh index 32565ccdc..3398f0361 100755 --- a/scripts/update-doc-headers.sh +++ b/scripts/update-doc-headers.sh @@ -5,7 +5,7 @@ set -euxo pipefail declare -r MERMAID_VERSION="11.12.2" -declare -r KATEX_VERSION="0.16.27" +declare -r KATEX_VERSION="0.16.28" declare -rx MERMAID_JS_URL="https://cdn.jsdelivr.net/npm/mermaid@${MERMAID_VERSION}/dist/mermaid.min.js" declare -rx KATEX_JS_URL="https://cdn.jsdelivr.net/npm/katex@${KATEX_VERSION}/dist/katex.min.js" diff --git a/shell.nix b/shell.nix index 112c4b8a4..ae7a9b173 100644 --- a/shell.nix +++ b/shell.nix @@ -1,17 +1,8 @@ -{ - pkgs ? import { }, -}: -(pkgs.buildFHSEnv { - name = "dataplane-shell"; - targetPkgs = - pkgs: - (with pkgs; [ - # dev tools - bash - direnv - just - nil - nixd - wget - ]); -}).env +# SPDX-License-Identifier: Apache-2.0 +# Copyright Open Network Fabric Authors +inputs@{ ... }: +let + # Only forward arguments that default.nix accepts, ignoring extras injected by nix-shell. + filtered = builtins.intersectAttrs (builtins.functionArgs (import ./default.nix)) inputs; +in +(import ./default.nix filtered).devenv diff --git a/sysfs/Cargo.toml b/sysfs/Cargo.toml index 0ad73dde9..4c73687c4 100644 --- a/sysfs/Cargo.toml +++ b/sysfs/Cargo.toml @@ -22,6 +22,5 @@ n-vm = { workspace = true } [build-dependencies] # internal -dpdk-sysroot-helper = { workspace = true } # external diff --git a/sysfs/build.rs b/sysfs/build.rs deleted file mode 100644 index 52f5b0197..000000000 --- a/sysfs/build.rs +++ /dev/null @@ -1,8 +0,0 @@ -// SPDX-License-Identifier: Apache-2.0 -// Copyright Open Network Fabric Authors - -fn main() { - let sysroot = dpdk_sysroot_helper::get_sysroot(); - println!("cargo:rustc-link-search=all={sysroot}/lib"); - println!("cargo:rustc-link-arg=--sysroot={sysroot}"); -} diff --git a/testing.md b/testing.md index fe5b29a1f..401bd5fe8 100644 --- a/testing.md +++ b/testing.md @@ -4,25 +4,25 @@ The default test runner works fine, but it is notably slower and less featureful than [nextest]. -Fortunately, [nextest] ships with recent versions of the compile-env, so assuming you have already followed the +Fortunately, [nextest] ships with the nix-shell, so assuming you have already followed the instructions in the [README.md](./README.md), you should be able to run ```shell -just cargo nextest run +cargo nextest run ``` -even if you have not installed [nextest]. +even if you have not installed [nextest] on your system. > [!WARNING] [nextest profiles] are not the same thing as [cargo profiles]. > If you want to select a cargo profile when running [nextest], use, for example ```shell -just cargo nextest run --cargo-profile=release +cargo nextest run --cargo-profile=release ``` ## Code Coverage (llvm-cov) -The compile-env also ships with [cargo llvm-cov] for collecting [code coverage] information. +The nix-shell also ships with [cargo llvm-cov] for collecting [code coverage] information. Assuming you have followed the [README.md](./README.md), you should be able to run ```shell @@ -48,38 +48,14 @@ And then open a web-browser to to view coverage data. The dataplane project makes fairly extensive use of [fuzz testing]. We use the [bolero] crate for our fuzz tests. -Running the test suite via `just cargo test` or `just cargo nextest run` will run the fuzz tests. +Running the test suite via `cargo test` or `cargo nextest run` will run the fuzz tests. - The tests (even the fuzz tests) are only run briefly. - Coverage information and sanitizers are not enabled. -- A full fuzzing engine is not set up, so evolutionary feedback is not provided when the tests are run this way, - -Using [libfuzzer] or [afl] can change this. - -The major downside is that these are very computationally heavy processes and can take a long time to run. -In fact, the [afl] fuzzer runs until you terminate it. - -To run a full fuzz test, start by listing the available fuzz targets: - -```shell -just list-fuzz-tests -``` - -Then pick a target, e.g. `vxlan::test::mutation_of_header_preserves_contract`, and run `libfuzzer` like so - -```shell -just _test_type=FUZZ fuzz vxlan::test::mutation_of_header_preserves_contract -``` - -The test will run for 1 minute by default, but you can change to, e.g., 15 minutes via - -```shell -just _test_type=FUZZ fuzz vxlan::test::mutation_of_header_preserves_contract -T 15min -``` +- A full fuzzing engine is not set up, so evolutionary feedback is not provided when the tests are run this way. > [!NOTE] -> The fuzz tests are run with full optimizations and extensive debugging information, so expect a fairly long compile -> time. +> A `just fuzz` recipe for running full fuzz tests with [libfuzzer] or [afl] is planned for a future PR. [afl]: https://aflplus.plus/ [bolero]: https://github.com/camshaft/bolero diff --git a/validator/src/main.rs b/validator/src/main.rs index 84d7122ea..ad04623f7 100644 --- a/validator/src/main.rs +++ b/validator/src/main.rs @@ -11,7 +11,7 @@ #![allow(clippy::field_reassign_with_default)] use config::{ExternalConfig, GwConfig, converters::k8s::FromK8sConversionError}; -use k8s_intf::generated::gateway_agent_crd::GatewayAgent; +use k8s_intf::gateway_agent_crd::GatewayAgent; use serde::{Deserialize, Serialize}; use std::io::{self, Read};